set
stringclasses 1
value | id
stringlengths 5
9
| chunk_text
stringlengths 1
115k
| chunk_num_tokens
int64 1
106k
| document_num_tokens
int64 58
521k
| document_language
stringclasses 2
values |
---|---|---|---|---|---|
train
|
0.155.7
|
\subsection{Proof of Proposition~\ref{prop_optimal} (Optimality of the convergence rates)}
Before proving the optimality of the convergence rate stated in Proposition~\ref{prop_optimal}, we need the following technical lemma:
\begin{equation}gin{lemma} \label{lemmatech}
Let $y$ a continuously differentiable function with values in $\mathbb R$.
Let $T>0$ and $\epsilon > 0$. If $y$ is bounded, then there exists $t_1 >T$ such that:
\begin{equation}gin{equation*}
|\dot{y}(t_1) | \leqslant \alpharac{\epsilon}{t_1}.
\end{equation*}
\end{lemma}
\begin{equation}gin{proof}
We split the proof into two cases.
\begin{equation}gin{enumerate}
\item
There exists $t_1 >T$ such that $\dot{y}(t_1)=0$.
\item
$\dot{y}(t)$ is of constant sign for $t> T$. For instance we assume $\dot{y}(t)>0$.
By contradiction, let us assume that $\dot{y}(t)>\alpharac{\epsilon}{t}$ $\alphaorall t > T$.
Then $y(t)$ cannot be a bounded function as assumed.
\end{enumerate}
\end{proof}
Let us now prove the Proposition~\ref{prop_optimal}: the idea of the proof is the following: we first show that $\mathcal{H}$ is bounded from below. Since $\mathcal{H}$ is a sum of 3 terms including the term $F-F^*$, we then show that given $t_1 \geq t_0$, there always exists a time $t \geq t_1$ such that the value of $\mathcal{H}$ is concentrated on the term $F-F^*$.
We start the proof by using the fact that, for the function $F(x)=\vert x\vert^{\gamma}$, $\gamma>1$, the inequality of Lemma~\ref{LemmeFonda} is actually an equality. Using the values $p=\alpharac{2\gamma \alpha}{\gamma+2}-2$ and $\lambda=\alpharac{2\alpha}{\gamma+2}$ of Theorems~\ref{Theo1} and \ref{Theo1b}, we have a closed form for the derivative of function $\mathcal{H}$:
\begin{equation}gin{equation}\label{eqH1}
\mathcal{H}'(t) = K_1 t^{p}c(t) =\alpharac{K_1}{2} t^{p-1}\vert x(t)\vert^{2},
\end{equation}
where $K_1$ is the constant given in \eqref{eqdefK1}.
We will now prove that it exists $\ell>0$ such that for $t$ large enough:
\begin{equation}gin{equation*}
\mathcal{H}(t) \geqslant \ell.
\end{equation*}
To prove that point we consider two cases depending on the sign of
$\alpha-(1+\alpharac{2}{\gamma})$.
\begin{equation}gin{enumerate}
\item Case when $\alpha\leqslant 1+\alpharac{2}{\gamma}$, $\xi\geqslant 0$ and $K_1\leqslant 0$. We can first observe that $\mathcal{H}$ is a non negative and non increasing function.
Moreover it exists $\tilde t\geqslant t_0$ such that for $t\geqslant \tilde t$, $|x(t)|\leqslant 1$ and:
\begin{equation}gin{equation*}
t^pc(t)\leqslant \alpharac{t^pa(t)}{2t^2}\leqslant \alpharac{\mathcal{H}(t)}{t^3},
\end{equation*}
which implies using \eqref{eqH1} that:
\begin{equation}gin{equation*}
|\mathcal{H}'(t)|\leqslant |K_1|\alpharac{\mathcal{H}(t)}{t^3}.
\end{equation*}
If we denote $G(t)=\ln (\mathcal{H}(t))$ we get for all $t\geqslant \tilde t$,
\begin{equation}gin{equation*}
|G(t)-G(\tilde t)|\leqslant \int_{\tilde t}^t\alpharac{|K_1|}{s^3}ds.
\end{equation*}
We deduce that $|G(t)|$ is bounded below and then that it exists $\ell>0$ such that for $t$ large enough:
\begin{equation}gin{equation*}
\mathcal{H}(t) \geqslant \ell,
\end{equation*}
\item Case when $\alpha> 1+\alpharac{2}{\gamma}$, $\xi< 0$ and $K_1> 0$. This implies in particular that $\mathcal{H}$ is non-decreasing. Moreover, from Theorem~\ref{Theo1b}, $\mathcal{H}$ is bounded above. Coming back to the inequality \eqref{H:bound}, we observe that $\mathcal{H}(t_0)>0$ provided that $1+\alpharac{\xi}{2t_0^2}>0$, with $K=1$ and $\xi = \lambda(\lambda-\alpha+1)$, i.e.:
$$t_0>\sqrt{\alpharac{\alpha\gamma}{(\gamma +2)^2}(\alpha-(1+\alpharac{2}{\gamma}))}.$$
In particular, we have that for any $t\geqslant t_0$
\begin{equation}gin{equation*}
\mathcal{H}(t) \geqslant \ell,
\end{equation*}
with $\ell=\mathcal{H}(t_0)$
\end{enumerate}
Hence for any $\alpha>0$ and for $t$ large enough
\begin{equation}gin{equation*}
a(t)+b(t)+ \xi c(t) \geqslant \alpharac{\ell}{t^{p+1}}.
\end{equation*}
Moreover, since $c(t)=o(a(t))$ when $t \to + \infty$, we have that for $t$ large enough,
\begin{equation}gin{equation*}
a(t)+b(t) \geqslant \alpharac{\ell}{2 t^{p+1}}.
\end{equation*}
Let $T>0$ and $\epsilon >0$. We set:
\begin{equation}gin{equation*}
y(t):=t^{\lambda} x(t),
\end{equation*}
where: $\lambda =\alpharac{2 \alpha}{\gamma+2}$. From the Theorem~\ref{Theo1} and Theorem \ref{Theo1b}, we know that $y(t)$ is bounded.
Hence, from Lemma~\ref{lemmatech}, there exists $t_1 > T$ such that
\begin{equation}gin{equation} \label{eqt1}
|\dot{y}(t_1) |\leqslant \alpharac{\epsilon}{t_1}.
\end{equation}
But:
\begin{equation}gin{equation*}
\dot{y}(t) = t^{\lambda-1} \left(\lambda x(t)
+t \dot{x}(t)
\right).
\end{equation*}
Hence using \eqref{eqt1}:
\begin{equation}gin{equation*}
t_1^{\lambda} \left|\lambda x(t_1)
+t_1 \dot{x}(t_1)
\right|
\leqslant \epsilon.
\end{equation*}
We recall that: $b(t)=\alpharac{1}{2t}\norm{\lambda(x(t)-x^*)+t\dot{x}(t)}^2$.
We thus have:
\begin{equation}gin{equation*}
b(t_1) \leqslant \alpharac{\epsilon^2}{2 t_1^{2\lambda+1}}.
\end{equation*}
Since $\gamma \leqslant 2$, $\lambda =\alpharac{2 \alpha}{\gamma+2}$ and $p=\alpharac{2\gamma \alpha}{\gamma+2}-2$, we have
$
2\lambda+1
\geq
p+1
$, and thus
\begin{equation}gin{equation*}
b(t_1) \leqslant \alpharac{\epsilon^2}{2 t_1^{p+1}}.
\end{equation*}
For $\epsilon =\sqrt{\alpharac{
\ell}{2}}$ for example, there exists thus some $t_1 >T$ such that $b(t_1) \leqslant \alpharac{\ell}{4 t_1^{p+1}}$.
Then $a(t_1) \geqslant \alpharac{\ell}{4 t_1^{p+1}}$, i.e.
$F(x(t_1))-F^* \geqslant \alpharac{\ell}{4 t_1^{p+2}}$.
Since $p+2= \alpharac{2 \gamma \alpha}{\gamma +2}$, this concludes the proof.
\subsection{Proof of Theorem \ref{Theo2}}
We detail here the proof of Theorem \ref{Theo2}.
Let us consider $\gamma_1>2$, $\gamma_2 >2$, and $\alpha\geqslant \alpharac{\gamma_1+2}{\gamma_1-2}$. We consider here functions $\mathcal{H}$ for all $x^*$ in the set $X^*$ of minimizers of $F$ and prove that these functions are uniformely bounded. More precisely for any $x^*\in X^*$ we define $\mathcal{H}(t)$ with $p=\alpharac{4}{\gamma_1-2}$ and $\lambda=\alpharac{2}{\gamma_1-2}$.
With this choice of $\lambda$ and $p$, using Hypothesis $\textbf{H}_1(\gamma_1)$ we have from Lemma~\ref{LemmeFonda}:
\begin{equation}gin{equation*}
\mathcal{H}'(t)\leqslant 2t^{\alpharac{4}{\gamma_1-2}}\left(\alpharac{\gamma_1+2}{\gamma_1-2}-\alpha\right)b(t).
\end{equation*}
which is non-positive when $\alpha\geqslant\alpharac{\gamma_1+2}{\gamma_1-2}$, which implies that the function $\mathcal{H}$ is bounded above. Hence for any choice of $x^*$ in the set of minimizers $X^*$, the function $\mathcal{H}$ is bounded above and since the set of minimizers is bounded (F is coercive), there exists $A>0$ and $t_0$ such that for all choices of $x^*$ in $X^*$,
\begin{equation}gin{equation*} \label{inegHHAtz}
\mathcal{H}(t_0)\leqslant A,
\end{equation*}
which implies that for all $x^* \in X^{*}$ and
for all $t\geqslant t_0$
\begin{equation}gin{equation*} \label{inegHHA}
\mathcal{H}(t)\leqslant A.
\end{equation*}
Hence for all $t\geqslant t_0$ and for all $x^*\in X^*$
\begin{equation}gin{equation*}\label{BoundWold}
t^{\alpharac{4}{\gamma_1-2}}t^2(F(x(t))-F^*)\leqslant \alpharac{\vert \xi\vert}{2}
t^{\alpharac{4}{\gamma_1-2}}\norm{x(t)-x^*}^2
+A,
\end{equation*}
which implies that
\begin{equation}gin{equation}\label{BoundW}
t^{\alpharac{4}{\gamma_1-2}}t^2(F(x(t))-F^*)\leqslant \alpharac{\vert \xi\vert}{2}
t^{\alpharac{4}{\gamma_1-2}}d(x(t),X^{*})^2
+A.
\end{equation}
We now set:
\begin{equation}gin{equation}\label{defv}
v(t):=t^{\alpharac{4}{\gamma_2-2}}d(x(t),X^*)^2.
\end{equation}
Using \eqref{BoundW} we have:
\begin{equation}gin{equation}\label{Boundu1bos}
t^{\alpharac{2 \gamma_1}{\gamma_1-2}}(F(x(t))-F^*)\leqslant \alpharac{\vert \xi\vert}{2}
t^{\alpharac{4}{\gamma_1-2}-\alpharac{4}{\gamma_2-2}}
v(t)+A.
\end{equation}
Using the hypothesis $\textbf{H}_2(\gamma_2)$ applied under the form given by Lemma \ref{lem:H2} (since $X^*$ is compact), there exists $K>0$ such that
\begin{equation}gin{equation*}
K\left(t^{-\alpharac{4}{\gamma_2-2}}v(t)\right)^{\alpharac{\gamma_2}{2}}\leqslant F(x(t))-F^*,
\end{equation*}
which is equivalent to
\begin{equation}gin{equation*}
Kv(t)^{\alpharac{\gamma_2}{2}}
t^{\alpharac{-2\gamma_2}{\gamma_2-2}}
\leqslant F(x(t))-F^*.
\end{equation*}
Hence:
\begin{equation}gin{equation*}
K
t^{\alpharac{2\gamma_1}{\gamma_1-2}} t^{\alpharac{-2\gamma_2}{\gamma_2-2}}
v(t)^{\alpharac{\gamma_2}{2}}\leqslant
t^{\alpharac{2\gamma_1}{\gamma_1-2}} (F(x(t))-F^*).
\end{equation*}
Using \eqref{Boundu1bos}, we obtain:
\begin{equation}gin{equation*}
K
t^{\alpharac{2\gamma_1}{\gamma_1-2}-\alpharac{2\gamma_2}{\gamma_2-2}}
v(t)^{\alpharac{\gamma_2}{2}}\leqslant \alpharac{|\xi|}{2}
t^{\alpharac{4}{\gamma_1-2}-\alpharac{4}{\gamma_2-2}}
v(t)+A,
\end{equation*}
i.e.:
\begin{equation}gin{equation}\label{vbounded}
K
v(t)^{\alpharac{\gamma_2}{2}}\leqslant \alpharac{|\xi|}{2}
v(t)+A
t^{\alpharac{4}{\gamma_2-2}-\alpharac{4}{\gamma_1-2}}.
\end{equation}
Since $2<\gamma_1 \leqslant \gamma_2$, we deduce that $v$ is bounded.
Hence, using \eqref{Boundu1bos} there exists some positive constant $B$ such that:
\begin{equation}gin{equation*}
F(x(t))-F^*\leqslant
B t^{\alpharac{-2 \gamma_2}{\gamma_2-2}}
+A t^{\alpharac{-2 \gamma_1}{\gamma_1-2}}.
\end{equation*}
Since $2<\gamma_1 \leqslant \gamma_2$, we have $\alpharac{-2 \gamma_2}{\gamma_2-2} \geqslant \alpharac{-2 \gamma_1}{\gamma_1-2}$.
Hence we deduce that $F(x(t))-F^* = O \left( t^{\alpharac{-2 \gamma_2}{\gamma_2-2}}\right)$.\\
| 3,843 | 24,904 |
en
|
train
|
0.155.8
|
\subsection{Proof of Corollary~\ref{Corol2}}
We are now in position to prove Corollary~\ref{Corol2}. The first point of Corollary~\ref{Corol2} is just a particular instance of Theorem~\ref{Theo2}.
In the sequel, we prove the second point of Corollary~\ref{Corol2}.
Let $t\geqslant t_0$ and $\tilde x\in X^*$ such that
\begin{equation}gin{equation*}
\|x(t)-\tilde x\|=d(x(t),X^*).
\end{equation*}
We previously proved that there exists $A>0$ such that for any $t\geqslant t_0$ and any $x^*\in X^*$,
\begin{equation}gin{equation*}
\mathcal{H}(t)\leqslant A.
\end{equation*}
For the choice $x^*=\tilde x$ this inequality ensures that
\begin{equation}gin{equation*}
\alpharac{t^{\alpharac{4}{\gamma-2}}}{2}\norm{\lambda (x(t)-\tilde x)+t\dot x(t)}^2+t^{\alpharac{4}{\gamma-2}}\alpharac{\xi}{2}d(x(t),\tilde x)^2\leqslant A,
\end{equation*}
which is equivalent to
\begin{equation}gin{equation*}
\alpharac{t^{\alpharac{4}{\gamma-2}}}{2}\norm{\lambda (x(t)-\tilde x)+t\dot x(t)}^2\leqslant \alpharac{|\xi|}{2}v(t)+A,
\end{equation*}
where $v(t)$ is defined in \eqref{defv} with $\gamma=\gamma_2$.
Using the fact that the function $v$ is bounded (a consequence of \eqref{vbounded}) we deduce that there exists a positive constant $A_1>0$ such that:
\begin{equation}gin{equation*}
\norm{\lambda (x(t)-\tilde x)+t\dot x(t)}\leqslant \alpharac{A_1}{t^{\alpharac{2}{\gamma-2}}}.
\end{equation*}
Thus:
\begin{equation}gin{equation*}
t\norm{\dot x(t)}\leqslant \alpharac{A_1}{t^{\alpharac{2}{\gamma-2}}}+|\lambda|d(x(t),\tilde x)=\alpharac{A_1+|\lambda|\sqrt{v(t)}}{t^{\alpharac{2}{\gamma-2}}}.
\end{equation*}
Using once again the fact that the function $v$ is bounded we deduce that there exists a real number $A_2$ such that
\begin{equation}gin{equation*}
\norm{\dot x(t)}\leqslant \alpharac{A_2}{t^{\alpharac{\gamma}{\gamma-2}}},
\end{equation*}
which implies that $\norm{\dot x(t)}$ is an integrable function. As a consequence, we deduce that the trajectory $x(t)$ has a finite length.
\subsection{Proof of Proposition \ref{prop:gap}}
The idea of the proof is very similar to that of Proposition~\ref{prop_optimal} (optimality of the convergence rate in the sharp case i.e. when $\gamma \in (1,2]$).
For the exact same choice of parameters $p=\alpharac{2\gamma\alpha}{\gamma+2}-2$ and $\lambda=\alpharac{2\alpha}{\gamma+2}$ and assuming that $1+\alpharac{2}{\gamma}<\alpha < \alpharac{\gamma+2}{\gamma-2}$, we first show that the energy $\mathcal H$ is non-decreasing and then:
\begin{equation}gin{equation}
\alphaorall t\geqslant t_0,~\mathcal H(t) \geqslant \ell,\label{infH}
\end{equation}
where: $\ell=\mathcal H(t_0)>0$. Indeed, since $\gamma>2$ and $\alpha <\alpharac{\gamma+2}{\gamma-2}$, a straightforward computation shows that: $\lambda^2 -|\xi|>0$, so that:
\begin{equation}gin{eqnarray*}
\mathcal H(t_0)
&=& t_0^{p+2}|x(t_0)|^\gamma + \alpharac{t_0^p}{2}\left(|\lambda x(t_0)+t_0\dot x(t_0)|^2 -|\xi||x(t_0)|^2\right)\\
&=& t_0^{p+2}|x(t_0)|^\gamma + \alpharac{t_0^p}{2}\left( \lambda^2 -|\xi|\right)|x(t_0)|^2 >0,
\end{eqnarray*}
without any additional assumption on the initial time $t_0>0$.
Let $T>t_0$. We set: $y(t)=t^\lambda x(t)$. If $y(t)$ is bounded as it is in Proposition~\ref{prop_optimal}, by the exact same arguments, we prove that there exists $t_1>T$ such that: $b(t_1) \leq \alpharac{\ell}{4t_1^{p+1}}$. Moreover since $\xi<0$ we deduce from \eqref{infH} that:
$$t_1^{p+1}(a(t_1)+b(t_1)) \geqslant \ell.$$Hence:
$$a(t_1)=t_1(F(x(t_1)-F^*) \geqslant \alpharac{\ell}{4t_1^{p+1}},$$
i.e.: $F(x(t_1))-F^* \geqslant \alpharac{\ell}{4t_1^{p+2}} = \alpharac{\ell}{4t_1^\alpharac{2\alpha\gamma}{\gamma+2}}$.
If $y(t)$ is not bounded, then the proof is even simpler: indeed, in that case, for any $K>0$, there exists $t_1\geqslant T$ such that: $y(t_1)\geq K$, hence:
$$F(x(t_1))-F^*=|x(t_1)|^\gamma \geqslant \alpharac{K}{t_1^{\lambda\gamma}}=\alpharac{K}{t_1^{\alpharac{2\alpha\gamma}{\gamma+2}}},$$
which concludes the proof.
| 1,566 | 24,904 |
en
|
train
|
0.155.9
|
\section{Proof of Lemma~\ref{LemmeFonda}} \label{appendix}
We prove here Lemma~\ref{LemmeFonda}. Notice that the computations are standard (see e.g. \cite{AujolDossal}).
\begin{equation}gin{lemma} \label{lemma_tec}
\begin{equation}gin{eqnarray*}
\mathcal{E}'(t)&=&2a(t)+\lambda t\ps{-\nabla F(x(t))}{x(t)-x^*}
+ (\xi- \lambda (\lambda + 1 -\alpha)
)\ps{\dot{x}(t)}{x(t)-x^*}\\
&&+
2(\lambda+1-\alpha)
b(t)
-2\lambda^2(\lambda+1-\alpha) c(t)
\end{eqnarray*}\label{lem:tec1}
\end{lemma}
\begin{equation}gin{proof} Let us differentiate the energy $\mathcal E$:
\begin{equation}gin{eqnarray*}
\mathcal{E}'(t)&=&2a(t)+t^2\ps{\nabla F(x(t))}{\dot{x}(t)}+\ps{\lambda \dot{x}(t)+t\ddot{x}(t)+\dot{x}(t)}{\lambda(x(t)-x^*)+t\dot{x}(t)}\\
&&+\xi\ps{\dot{x}(t)}{x(t)-x^*}
\\
&=&2a(t)+t^2\ps{\nabla F(x(t))+\ddot{x}(t)}{\dot{x}(t)}+(\lambda+1)t\norm{\dot{x}(t)}^2+\lambda t\ps{\ddot{x}(t)}{x(t)-x^*}\\
&&+(\lambda(\lambda+1)+\xi)\ps{\dot{x}(t)}{x(t)-x^*}
\\
&=&2a(t)+t^2\ps{-\alpharac{\alpha}{t}\dot{x(t)}}{\dot{x}(t)}+(\lambda+1)t\norm{\dot{x}(t)}^2+\lambda t\ps{\ddot{x}(t)}{x(t)-x^*}\\
&&+(\lambda(\lambda+1)+\xi)\ps{\dot{x}(t)}{x(t)-x^*}\\
&=&2a(t)+t(\lambda+1-\alpha)\norm{\dot{x}(t)}^2
+\lambda t\ps{\ddot{x}(t)}{x(t)-x^*}+
(\lambda(\lambda+1)+\xi)\ps{\dot{x}(t)}{x(t)-x^*}.
\end{eqnarray*}
Using the ODE \eqref{ODE}, we get:
\begin{equation}gin{eqnarray*}
\mathcal{E}'(t)&=&2a(t)+t(\lambda+1-\alpha)\norm{\dot{x}(t)}^2 +\lambda t\ps{-\nabla F(x(t))-\alpharac{\alpha}{t}\dot{x(t)}}{x(t)-x^*}\\
&&+ (\lambda(\lambda+1)+\xi)\ps{\dot{x}(t)}{x(t)-x^*}\\
&=&2a(t)+t(\lambda+1-\alpha)\norm{\dot{x}(t)}^2+\lambda t\ps{-\nabla F(x(t))}{x(t)-x^*}\\
&&+ (\lambda(\lambda+1)-\alpha\lambda+\xi)\ps{\dot{x}(t)}{x(t)-x^*}.
\end{eqnarray*}
Observing now that:
\begin{equation}gin{equation*}\label{eqReformEner}
\alpharac{1}{t}\norm{\lambda(x(t)-x^*)+t\dot{x}(t)}^2=t\norm{\dot{x}(t)}^2+2\lambda\ps{\dot{x}(t)}{x(t)-x^*}+\alpharac{\lambda^2}{t}\norm{x(t)-x^*}^2,
\end{equation*}
we can write:
\begin{equation}gin{eqnarray*}
\mathcal{E}'(t)&=&2a(t)+\lambda t\ps{-\nabla F(x(t))}{x(t)-x^*} + (\xi- \lambda (\lambda + 1 -\alpha)
)\ps{\dot{x}(t)}{x(t)-x^*}
\\
&&+
(\lambda+1-\alpha)\alpharac{1}{t}\norm{\lambda(x(t)-x^*)+t\dot{x}(t)}^2-\alpharac{\lambda^2(\lambda+1-\alpha)}{t}\norm{x(t)-x^*}^2.
\end{eqnarray*}
\end{proof}
\begin{equation}gin{corollary}\label{lemma_tec_conv_beta2}
If $F$ satisfies the hypothesis $\textbf{H}_1(\gamma)$
and
if $\xi = \lambda (\lambda + 1 -\alpha)$, then:
\begin{equation}gin{align}\label{EqConvBeta}
\mathcal{E}'(t)\leqslant&
(2- \gamma \lambda) a(t)
+2(\lambda+1-\alpha)
b(t)
-2\lambda^2(\lambda+1-\alpha) c(t)
\end{align}
\end{corollary}
\begin{equation}gin{proof}
Choosing $\xi = \lambda (\lambda + 1 -\alpha)$ in Lemma~\ref{lem:tec1}, we get:
$$\mathcal{E}'(t)=2a(t)+\lambda t\ps{-\nabla F(x(t))}{x(t)-x^*}
+2(\lambda+1-\alpha)
b(t)
-2\lambda^2(\lambda+1-\alpha) c(t).
$$
Applying now the assumption $\textbf{H}_1(\gamma)$, we finally obtain the expected result.
\end{proof}
One can notice that if $F(x)=|x|^{\gamma}$ the inequality of Lemma
\ref{lem:tec1} is actually an equality when $\xi = \lambda (\lambda + 1 -\alpha)$. This ensures that for this specific function $F$, the inequality in Lemma~\ref{LemmeFonda} is an equality.
\begin{equation}gin{lemma} \label{lemma_tec4}
If $F(x)=|x|^{\gamma}$ and if $\xi = \lambda (\lambda + 1 -\alpha)$, then
\begin{equation}gin{eqnarray*}
\mathcal{H}'(t)&=&
t^{p} \left[ (2+p) a(t)+\lambda t\ps{-\nabla F(x(t))}{x(t)-x^*}
\right.
+(2 \lambda+2 -2\alpha +p)b(t)
\\ &&
\left.
~~~+\lambda(\lambda+1-\alpha) (-2\lambda +p)c(t)
\right]
\end{eqnarray*}
\end{lemma}
\begin{equation}gin{proof}
We have $\mathcal{H}(t)=t^p \mathcal{E}(t)$. Hence $\mathcal{H}'(t)=t^p \mathcal{E}'(t)+pt^{p-1}\mathcal{E}(t)=t^{p-1} (t \mathcal{E}'(t)+p\mathcal{E}(t))$. We conclude by using Lemma~\ref{lem:tec1}.
\end{proof}
In conclusion, to prove Lemma \ref{LemmeFonda}, it is sufficient to plug the assumption $\textbf{H}_1(\gamma)$ into the equality of Lemma \ref{lemma_tec4}.
\begin{itemize}bliographystyle{siamplain}
\begin{itemize}bliography{reference}
\end{document}
| 1,872 | 24,904 |
en
|
train
|
0.156.0
|
\begin{document}
\title[SDEs and PDEs on non-smooth time-dependent domains]{Stochastic and
partial differential equations on non-smooth time-dependent domains}
\address{Niklas L.P. Lundstr\"{o}m\\
Department of Mathematics and Mathematical Statistics, Ume\aa\ University\\
SE-901 87 Ume\aa , Sweden}
\email{[email protected]}
\address{Thomas \"{O}nskog\\
Department of Mathematics, Royal Institute of Technology (KTH)\\
SE-100 44 Stockholm, Sweden}
\email{[email protected]}
\author{Niklas L.P. Lundstr\"{o}m, Thomas \"{O}nskog}
\begin{abstract}
In this article, we consider non-smooth time-dependent domains whose boundary is $\mathcal{W}^{1,p}$ in time and single-valued, smoothly varying directions of reflection at the boundary. In this setting, we first prove existence and uniqueness of strong solutions to stochastic differential equations with oblique reflection. Secondly, we prove, using the theory of viscosity solutions, a comparison principle for fully nonlinear second-order parabolic partial differential equations with oblique derivative boundary conditions. As a consequence, we obtain uniqueness, and, by barrier construction and Perron's method, we also conclude existence of viscosity
solutions. Our results generalize two articles by Dupuis and Ishii to time-dependent domains.
\noindent 2000\textit{\ Mathematics Subject Classification. }35D05, 49L25,
60J50, 60J60.
\noindent \textit{Keywords and phrases. } Reflected diffusion, Skorohod
problem, oblique reflection, time-dependent domain, stochastic differential
equations, non-smooth domain, viscosity solution, parabolic partial
differential equation, comparison principle, existence, uniqueness.
\end{abstract}
\maketitle
\setcounter{equation}{0} \setcounter{theorem}{0}
\section{Introduction\label{intro}}
In this article we establish existence and uniqueness of strong solutions to
stochastic differential equations (SDE) with single-valued, smoothly varying
oblique reflection at the boundary of a bounded, non-smooth time-dependent
domain whose boundary is $\mathcal{W}^{1,p}$ in time. In the same geometric
setting, we also prove a comparison principle, uniqueness and existence of
viscosity solutions to partial differential equations (PDE) with oblique
derivative boundary conditions.
In the SDE case, our approach is based on the Skorohod problem, which, in
the form studied in this article, was first described by Tanaka \cite
{Tanaka1979}. Tanaka established existence and uniqueness of solutions to
the Skorohod problem in convex domains with normal reflection. These results
were subsequently substantially generalized by, in particular, Lions and
Sznitman \cite{LionsSznitman1984} and Saisho \cite{Saisho1987}. To the
authors' knowledge, the most general results on strong solutions to
reflected SDEs in time-independent domains based on the Skorohod problem are
those established by Dupuis and Ishii \cite{DupuisIshii1993}. The aim here
is to generalize the SDE results mentioned above, in particular those of
Case 1 in \cite{DupuisIshii1993}, to the setting of time-dependent domains.
There is, by now, a number of articles on reflected SDEs in time-dependent
domains. Early results on this topic include the exhaustive study of the
heat equation and reflected Brownian motion in smooth time-dependent domains
by Burdzy, Chen, and Sylvester \cite{BurdzyChenSylvester2004AP} and the
study of reflected SDEs in smooth time-dependent domains with reflection in
the normal direction by Costantini, Gobet, and El Karoui \cite
{CostantiniGobetKaroui2006}. We also mention that Burdzy, Kang, and Ramanan
\cite{BurdzyKangRamanan2009} investigated the Skorohod problem in a
one-dimensional, time-dependent domain and, in particular, found conditions
for when there exists a solution to the Skorohod problem in the event that
the two boundaries meet. Existence of weak solutions to SDEs with oblique
reflection in non-smooth time-dependent domains was established by Nystr\"{o}
m and \"{O}nskog \cite{NystromOnskog2010a} under fairly general conditions
using the approach of \cite{Costantini1992}. In the article at hand, we use
the approach of \cite{DupuisIshii1993} and derive regularity conditions,
under which we can obtain existence and also uniqueness of strong solutions
to SDEs with oblique reflection in time-dependent domains.
Turning to the PDE case, we recall that the approach of \cite
{DupuisIshii1993} relies on the construction of test functions used earlier
in Dupuis and Ishii \cite{DupuisIshii1990} to prove the comparison
principle, existence and uniqueness for fully nonlinear second-order
elliptic PDEs in non-smooth time-independent domains. Here we generalize
these test functions to our time-dependent setting, and obtain the
corresponding results for both SDEs and PDEs in time-dependent domains. In
particular, our PDE results generalize the main part of \cite
{DupuisIshii1990} to hold in the setting of fully nonlinear second-order
parabolic PDEs in non-smooth time-dependent domains. Our proofs are based on
the theory of viscosity solutions. The first step is to observe that the
maximum principle for semicontinuous functions by Crandall and Ishii \cite
{CrandallIshii1990} holds in time-dependent domains. Using the maximum
principle and the above-mentioned test functions, we prove the comparison
principle by following the nowadays standard method, see Crandall, Ishii,
and Lions \cite{CrandallIshiiLions1992} and \cite{DupuisIshii1990}. Next, we
prove existence of a unique solution to the PDE problem by means of Perron's
method, the comparison principle and by constructing several explicit sub-
and supersolutions (barriers) to the PDE.
To the authors' knowledge, there are no previous results on the oblique
derivative problem for parabolic PDEs in non-smooth time-dependent domains.
For time-independent domains, however, there are several articles in the
literature. Besides \cite{DupuisIshii1990}, Dupuis and Ishii studied oblique
derivative problems for fully nonlinear elliptic PDEs on domains with
corners in \cite{DupuisIshii1991}. Moreover, Barles \cite{Barles1993} proved
a comparison principle and existence of unique solutions to degenerate
elliptic and parabolic boundary value problems with nonlinear Neumann type
boundary conditions in bounded domains with $\mathcal{W}^{3,\infty }$
-boundary. Ishii and Sato \cite{IshiiSato2004} proved similar theorems for
boundary value problems for some singular degenerate parabolic partial
differential equations with nonlinear oblique derivative boundary conditions
in bounded $\mathcal{C}^{1}$-domains. Further, in bounded domains with $
\mathcal{W}^{3,\infty }$-boundary, Bourgoing \cite{Bourgoing2008} considered
singular degenerate parabolic equations and equations having $L^{1}$
dependence in time.
Concerning PDEs in the setting of time-dependent domains, we mention that Bj
\"{o}rn\textit{\ et al}.~\cite{BjornBjornGianazzaParviainen2015} proved,
among other results, a comparison principle for solutions of degenerate and
singular parabolic equations with Dirichlet boundary conditions using a
different technique and that Avelin \cite{Avelin2016} proved boundary
estimates of solutions to the degenerate $p$-parabolic equation.
As a motivation for considering SDEs and PDEs in time-dependent domains, we
mention that such geometries arise naturally in a wide range of applications
in which the governing equation of interest is a differential equation, for
example in modelling of crack propagation \cite{NicaiseSandig2007},
modelling of fluids \cite{FiloZauskova2008}, \cite{HeHsiao2000} and
modelling of chemical, petrochemical and pharmaceutical processes \cite
{IzadiAbdollahiDubljevic2014}.
The rest of the paper is organized as follows. In Section \ref{DNA} we give
preliminary definitions, notations, assumptions and also state our main
results. In Section \ref{test} we construct the test functions crucial for
the proofs of both the SDE and the PDE results. Using these test functions,
we prove existence of solutions to the Skorohod problem in Section \ref{SP}.
The results on the Skorohod problem are subsequently used, in Section \ref
{RSDE}, to prove the main results for SDEs. Finally, in Section \ref{PDE},
we use the theory of viscosity solutions together with the test functions
derived in Section \ref{test} to establish the PDE results.
\setcounter{equation}{0} \setcounter{theorem}{0}
| 2,392 | 73,518 |
en
|
train
|
0.156.1
|
\section{Preliminaries and statement of main results\label{DNA}}
Throughout this article we will use the following definitions and
assumptions. Given $n\geq 1$, $T>0$ and a bounded, open, connected set $
\Omega ^{\prime }\subset
\mathbb{R}
^{n+1}$ we will refer to
\begin{equation}
\Omega =\Omega ^{\prime }\cap ([0,T]\times
\mathbb{R}
^{n}), \label{timedep}
\end{equation}
as a time-dependent domain. Given $\Omega $ and $t\in \left[ 0,T\right] $,
we define the time sections of $\Omega $ as $\Omega _{t}=\left\{ x:\left(
t,x\right) \in \Omega \right\} $, and we assume that
\begin{equation}
\Omega _{t}\neq \emptyset \text{ and that }\Omega _{t}\text{ is bounded and
connected for every }t\in \left[ 0,T\right] . \label{timesect}
\end{equation}
Let $\partial \Omega _{t}$, for $t\in \left[ 0,T\right] $, denote the
boundary of $\Omega _{t}$. Let $\left\langle \cdot ,\cdot \right\rangle $
and $\left\vert \cdot \right\vert =\left\langle \cdot ,\cdot \right\rangle
^{1/2}$ define the Euclidean inner product and norm, respectively, on $
\mathbb{R}
^{n}$ and define, whenever $a\in
\mathbb{R}
^{n}$ and $\,b>0$, the sets $B\left( a,b\right) =\left\{ x\in
\mathbb{R}
^{n}:\left\vert x-a\right\vert \leq b\right\} $ and $S\left( a,b\right)
=\left\{ x\in
\mathbb{R}
^{n}:\left\vert x-a\right\vert =b\right\} $. For any Euclidean spaces $E$
and $F$, we define the following spaces of functions mapping $E$ into $F$. $
\mathcal{C}\left( E,F\right) $ denotes the set of continuous functions, $
\mathcal{C}^{k}\left( E,F\right) $ denotes the set of $k$ times continuously
differentiable functions and $\mathcal{W}^{1,p}\left( E,F\right) $ denotes
the Sobolev space of functions whose first order weak derivatives belong to $
L^{p}\left( E\right) $. If we can distinguish the time variable from the
spatial variables, we let $\mathcal{C}^{1,2}\left( E,F\right) $ denote the
set of functions, whose elements are continuously differentiable once with
respect to the time variable and twice with respect to any space variable,
and by $\mathcal{C}_{b}^{1,2}\left( E,F\right) $ we denote the space of
bounded functions in $\mathcal{C}^{1,2}\left( E,F\right) $ having bounded
derivatives. Moreover, $\mathcal{BV}\left( E,F\right) $ denotes the set of
functions with bounded variation. In particular, for $\eta \in \mathcal{BV}
\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $, we let $\left\vert \eta \right\vert \left( t\right) $ denote
the total variation of $\eta $ over the interval $\left[ 0,t\right] $.
\subsection{Assumptions on the domain and directions of reflection\label
{geoassume}}
Throughout this article we consider non-smooth time-dependent domains of the
following type. Let $\Omega \subset
\mathbb{R}
^{n+1}$ be a time-dependent domain satisfying \eqref{timesect}. The
direction of reflection at $x\in \partial \Omega _{t}$, $t\in \left[ 0,T
\right] $, is given by $\gamma \left( t,x\right) $ satisfying
\begin{equation}
\gamma \in \mathcal{C}_{b}^{1,2}\left( \mathbb{R}^{n+1},B\left( 0,1\right)
\right) , \label{smooth_gamma}
\end{equation}
such that $\gamma \left( t,x\right) \in S\left( 0,1\right) $ for all $\left(
t,x\right) \in V$, where $V$ is an open set satisfying $\Omega
_{t}^{c}\subset V$ for all $t\in \lbrack 0,T]$. Moreover, there is a
constant $\rho \in \left( 0,1\right) $ such that the exterior cone condition
\begin{equation}
\bigcup_{0\leq \zeta \leq \rho }B\left( x-\zeta \gamma \left( t,x\right)
,\zeta \rho \right) \subset \Omega _{t}^{c}, \label{boundarylip}
\end{equation}
holds, for all $x\in \partial \Omega _{t}$, $t\in \left[ 0,T\right] $. Note
that it follows from \eqref{boundarylip} that $\gamma $ points into the
domain and this is indeed the standard convention for SDEs. For PDEs,
however, the standard convention is to let $\gamma $ point out of the
domain. To facilitate for readers accustomed with either of these
conventions we, in the following, let $\gamma $ point inward whenever SDEs
are treated, whereas when we treat PDEs we assume the existence of a
function
\begin{equation}
\widetilde{\gamma }\in \mathcal{C}_{b}^{1,2}\left( \mathbb{R}^{n+1},B\left(
0,1\right) \right) , \label{smooth_gamma2}
\end{equation}
defined as $\widetilde{\gamma }\left( t,x\right) =-\gamma \left( t,x\right) $
, with $\gamma $ as in \eqref{smooth_gamma}. In particular, we have
\begin{equation}
\bigcup_{0\leq \zeta \leq \rho }B\left( x+\zeta \widetilde{\gamma }\left(
t,x\right) ,\zeta \rho \right) \subset \Omega _{t}^{c}, \label{boundarylip2}
\end{equation}
for all $x\in \partial \Omega _{t}$, $t\in \left[ 0,T\right] $. Finally,
regarding the temporal variation of the domain, we define $d\left(
t,x\right) =\inf_{y\in \Omega _{t}}\left\vert x-y\right\vert $, for all $
t\in \left[ 0,T\right] $, $x\in
\mathbb{R}
^{n}$, and assume that for some fixed $p\in \left( 1,\infty \right) $ and
all $x\in
\mathbb{R}
^{n}$,
\begin{equation}
d\left( \cdot ,x\right) \in \mathcal{W}^{1,p}\left( \left[ 0,T\right] ,\left[
0,\infty \right) \right) , \label{templip}
\end{equation}
with Sobolev norm uniformly bounded in space. We also assume that $
D_{t}d(t,x)$ is jointly measurable in $(t,x)$.
\begin{remark}
\label{spaceremark}A simple contradiction argument based on the exterior
cone condition \eqref{boundarylip} for the time sections and the regularity
of $\gamma $ and $\Omega _{t}$, shows that the time sections satisfy the
interior cone condition
\begin{equation*}
\bigcup_{0\leq \zeta \leq \rho }B\left( x+\zeta \gamma \left( t,x\right)
,\zeta \rho \right) \subset \overline{\Omega }_{t},
\end{equation*}
for all $x\in \partial \Omega _{t}$, $t\in \left[ 0,T\right] $. The exterior
and interior cone conditions together imply that the boundary of $\Omega
_{t} $ is Lipschitz continuous (in space) with a Lipschitz constant $K_{t}$
satisfying $\sup_{t\in \left[ 0,T\right] }K_{t}<\infty $. Moreover, these
conditions imply that for a suitable constant $\theta \in \left( 0,1\right) $
, $\theta ^{2}>1-\rho ^{2}$, there exists $\delta >0$ such that
\begin{equation*}
\left\langle y-x,\gamma \left( t,x\right) \right\rangle \geq -\theta
\left\vert y-x\right\vert ,
\end{equation*}
for all $x\in \partial \Omega _{t}$, $y\in \overline{\Omega }_{t}$, $t\in
\left[ 0,T\right] $ satisfying $\left\vert x-y\right\vert \leq \delta $.
\end{remark}
\begin{remark}
\label{timeholder}By Morrey's inequality, condition \eqref{templip} implies
the existence of a H\"{o}lder exponent $\widehat{\alpha }=1-1/p\in \left(
0,1\right) $ and a H\"{o}lder constant $K\in \left( 0,\infty \right) $ such
that, for all $s,t\in \left[ 0,T\right] $, $x\in
\mathbb{R}
^{n}$,
\begin{equation}
\left\vert d\left( s,x\right) -d\left( t,x\right) \right\vert \leq
K\left\vert s-t\right\vert ^{\widehat{\alpha }}. \label{tempholder}
\end{equation}
\end{remark}
\begin{remark}
The assumptions imposed on the time sections of the time-dependent domain in
\eqref{smooth_gamma}, \eqref{boundarylip} coincide with those imposed on the
time-independent domains in \cite{DupuisIshii1990} and in Case 1 of \cite
{DupuisIshii1993}. For time-independent domains, existence and uniqueness
results for SDE and PDE have also been obtained under the conditions given
in \cite{DupuisIshii1991a} and in Case 2 of \cite{DupuisIshii1993}. It is
likely that also these results can be extended to time-dependent domains
using a procedure similar to that of the article at hand, but we leave this
as a topic of future research.
\end{remark}
\begin{remark}
Consider the function
\begin{equation*}
l\left( r\right) =\sup_{s,t\in \left[ 0,T\right] ,\text{ }\left\vert
s-t\right\vert \leq r}\,\,\sup_{x\in \overline{\Omega }_{s}}\,\,\inf_{y\in
\overline{\Omega }_{t}}\left\vert x-y\right\vert ,
\end{equation*}
introduced in \cite{CostantiniGobetKaroui2006} and frequently used in \cite
{NystromOnskog2010a}. Condition \eqref{tempholder} is equivalent to,
\begin{equation*}
l\left( r\right) \leq Kr^{\widehat{\alpha }},
\end{equation*}
which is considerably stronger than the condition $\lim_{r\rightarrow
0^{+}}l\left( r\right) =0$ assumed in \cite{NystromOnskog2010a}. On the
other hand, it was assumed in \cite{NystromOnskog2010a} that $\Omega _{t}$
satisfies a uniform exterior sphere condition, and this does not hold in
general for domains satisfying \eqref{boundarylip}.
\end{remark}
| 2,947 | 73,518 |
en
|
train
|
0.156.2
|
\subsection{Statement of main result for SDEs}
We consider the Skorohod problem in the following form.
\begin{definition}
\label{skorohodprob}Given $\psi \in \mathcal{C}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $, with $\psi \left( 0\right) \in \overline{\Omega }_{0}$, we
say that the pair $\left( \phi ,\lambda \right) \in \mathcal{C}\left( \left[
0,T\right] ,
\mathbb{R}
^{n}\right) \times \mathcal{C}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ is a solution to the Skorohod problem for $\left( \Omega
,\gamma ,\psi \right) $ if $\left( \psi ,\phi ,\lambda \right) $ satisfies,
for all $t\in \left[ 0,T\right] $,
\begin{eqnarray}
\phi \left( t\right) &=&\psi \left( t\right) +\lambda \left( t\right) ,\quad
\phi \left( 0\right) =\psi \left( 0\right) , \label{SP1} \\
\phi \left( t\right) &\in &\overline{\Omega }_{t}, \label{SP2} \\
\left\vert \lambda \right\vert \left( T\right) &<&\infty , \label{SP3} \\
\left\vert \lambda \right\vert \left( t\right) &=&\int_{\left( 0,t\right]
}I_{\left\{ \phi \left( s\right) \in \partial \Omega _{s}\right\}
}d\left\vert \lambda \right\vert \left( s\right) , \label{SP4} \\
\lambda \left( t\right) &=&\int_{\left( 0,t\right] }\widehat{\gamma }\left(
s\right) d\left\vert \lambda \right\vert \left( s\right) , \label{SP5}
\end{eqnarray}
for some measurable function $\widehat{\gamma }:\left[ 0,T\right]
\rightarrow
\mathbb{R}
^{n}$ satisfying $\widehat{\gamma }\left( s\right) =\gamma \left( s,\phi
\left( s\right) \right) $ $d\left\vert \lambda \right\vert $-a.s.
\end{definition}
We use the Skorohod problem to construct solutions to SDEs confined to the
given time-dependent domain $\overline{\Omega }$ and with direction of
reflection given by $\gamma $. We shall consider the following notion of
SDEs. Let $\left( \Omega ,\mathcal{F},\mathbb{P}\right) $ be a complete
probability space and let $\left\{ \mathcal{F}_{t}\right\} _{t\geq 0}$ be a
filtration satisfying the usual conditions. Let $m$ be a positive integer,
let $W=\left( W_{i}\right) $ be an $m$-dimensional Wiener process and let $b:
\left[ 0,T\right] \times
\mathbb{R}
^{n}\rightarrow
\mathbb{R}
^{n}$ and $\sigma :\left[ 0,T\right] \times
\mathbb{R}
^{n}\rightarrow
\mathbb{R}
^{n\times m}$ be continuous functions.
\begin{definition}
\label{strong}A strong solution to the SDE in $\overline{\Omega }$ driven by
the Wiener process $W$ and with coefficients $b$ and $\sigma $, direction of
reflection along $\gamma $ and initial condition $x\in \overline{\Omega }
_{0} $ is an $\left\{ \mathcal{F}_{t}\right\} $-adapted continuous
stochastic process $X\left( t\right) $ which satisfies, $\mathbb{P}$-almost
surely, whenever $t\in \left[ 0,T\right] $,
\begin{equation}
X\left( t\right) =x+\int_{0}^{t}b\left( s,X\left( s\right) \right)
ds+\int_{0}^{t}\left\langle \sigma \left( s,X\left( s\right) \right)
,dW\left( s\right) \right\rangle +\Lambda \left( t\right) , \label{RSDE1}
\end{equation}
where
\begin{equation}
X\left( t\right) \in \overline{\Omega }_{t},\quad \left\vert \Lambda
\right\vert \left( t\right) =\int_{\left( 0,t\right] }I_{\left\{ X\left(
s\right) \in \partial \Omega _{s}\right\} }d\left\vert \Lambda \right\vert
\left( s\right) <\infty , \label{RSDE2}
\end{equation}
and where
\begin{equation}
\Lambda \left( t\right) =\int_{\left( 0,t\right] }\widehat{\gamma }\left(
s\right) d|\Lambda |\left( s\right) , \label{RSDE3}
\end{equation}
for some measurable stochastic process $\widehat{\gamma }:\left[ 0,T\right]
\rightarrow
\mathbb{R}
^{n}$ satisfying $\widehat{\gamma }\left( s\right) =\gamma \left( s,X\left(
s\right) \right) $ $d|\Lambda |$-a.s.
\end{definition}
Comparing Definition \ref{skorohodprob} with Definition \ref{strong}, it is
clear that $\left( X\left( \cdot \right) ,\Lambda \left( \cdot \right)
\right) $ should solve the Skorohod problem for $\psi \left( \cdot \right)
=x+\int_{0}^{\cdot }b\left( s,X\left( s\right) \right) ds+\int_{0}^{\cdot
}\left\langle \sigma \left( s,X\left( s\right) \right) ,dW\left( s\right)
\right\rangle $ on an a.s.~pathwise basis. We assume that the coefficient
functions $b\left( t,x\right) $ and $\sigma \left( t,x\right) $ satisfy the
Lipschitz continuity condition
\begin{equation}
\left\vert b_i\left( t,x\right) -b_i\left( t,y\right) \right\vert \leq
K\left\vert x-y\right\vert \quad \text{and\quad }\left\vert \sigma_{i,j}
\left( t,x\right) -\sigma_{i,j} \left( t,y\right) \right\vert \leq
K\left\vert x-y\right\vert , \label{lipcoeff}
\end{equation}
for all $(i,j) \in \{1,\dots n\} \times \{1,\dots, m\}$, $x,y \in \mathbb{R}
^n$ and for some positive constant $K\in \left( 0,\infty \right)$.
Our main result for SDEs is the following theorem.
\begin{theorem}
\label{main}Let $\Omega \subset
\mathbb{R}
^{n+1}$ be a time-dependent domain satisfying \eqref{timesect} and assume
that \eqref{smooth_gamma}, \eqref{boundarylip}, \eqref{templip} and
\eqref{lipcoeff} hold. Then there exists a unique strong solution to the SDE
in $\overline{\Omega }$ driven by the Wiener process $W$ and with
coefficients $b$ and $\sigma$, direction of reflection along $\gamma $ and
initial condition $x\in \overline{\Omega}_{0}$.
\end{theorem}
We prove Theorem \ref{main} by completing the following steps. First, in
Lemma \ref{smoothexist}, we use a penalty method to prove existence of
solutions to the Skorohod problem for smooth functions. In Lemma \ref
{compactest}, we then derive a compactness estimate for solutions to the
Skorohod problem. Based on the compactness estimate, we are, in Lemma \ref
{contexist}, able to generalize the existence result for the Skorohod
problem to all continuous functions. Finally, in Section \ref{RSDE}, we use
two classes of test functions and the existence result for the Skorohod
problem to obtain existence and uniqueness of strong solutions to SDEs with
oblique reflection at the boundary of a bounded, time-dependent domain. Note
that we are able to obtain uniqueness of the reflected SDE although the
solution to the corresponding Skorohod problem need not be unique.
| 2,163 | 73,518 |
en
|
train
|
0.156.3
|
\subsection{Statement of main results for PDEs}
To state and prove our results for PDEs we introduce some more notation. Let
$\Omega ^{\prime }$ be as in \eqref{timedep} and put
\begin{equation*}
\Omega ^{\circ }=\Omega ^{\prime }\cap \left( \left(0,T\right)\times \mathbb{R}
^{n}\right), \quad \widetilde{\Omega }=\overline{\Omega }^{\prime }\cap
\left( [0,T)\times \mathbb{R}^{n}\right) , \quad \partial \Omega = \left(
\overline{\Omega}^{\prime} \setminus \Omega ^{\prime }\right) \cap \left(
\left(0,T\right)\times \mathbb{R}^{n}\right).
\end{equation*}
We consider fully nonlinear parabolic PDEs of the form
\begin{equation}
u_{t}+F\left(t,x,u,Du,D^{2}u\right)=0\quad \text{in}\;\Omega ^{\circ }.
\label{huvudekvationen}
\end{equation}
Here $F$ is a given real function on $\overline{\Omega }\times \mathbb{R}
\times \mathbb{R}^{n}\times \mathbb{S}^{n}$, where $\mathbb{S}^{n}$ denotes
the space of $n\times n$ real symmetric matrices equipped with the positive
semi-definite ordering; that is, for $X,Y\in \mathbb{S}^{n}$, we write $
X\leq Y$ if $\langle \left(X-Y\right)\xi ,\xi \rangle \leq 0$ for all $\xi \in \mathbb{R
}^{n}$. We also adopt the matrix norm notation
\begin{equation*}
\left\Vert A\right\Vert =\sup \{|\lambda |:\lambda \text{ is an eigenvalue
of }A\}=\sup \{|\langle A\xi ,\xi \rangle |:|\xi |\leq 1\}.
\end{equation*}
Moreover, $u$ represents a real function in $\Omega ^{\circ }$ and $Du$ and $
D^{2}u$ denote the gradient and Hessian matrix, respectively, of $u$ with
respect to the spatial variables. On the boundary we impose the oblique
derivative condition to the unknown $u$
\begin{equation}
\frac{\partial u}{\partial \widetilde{\gamma }}+f\left(t,x,u\left(t,x\right)\right)=0\quad \text{on
}\;\partial \Omega , \label{randvillkor}
\end{equation}
where $f$ is a real valued function on $\overline{\partial \Omega }\times
\mathbb{R}$ and $\widetilde{\gamma }\left( t,\cdot \right) $ is the vector
field on $\mathbb{R}^{n}$, oblique to $\partial \Omega _{t}$, introduced in
\eqref{smooth_gamma2} and \eqref{boundarylip2}.
Regarding the function $F$, we make the following assumptions.
\begin{equation}
F\in C\left(\overline{\Omega }\times \mathbb{R}\times \mathbb{R}^{n}\times
\mathbb{S}^{n}\right). \label{ass_F_cont}
\end{equation}
For some $\lambda \in \mathbb{R}$ and each $\left(t,x,p,A\right)\in \overline{\Omega }
\times \mathbb{R}^{n}\times \mathbb{S}^{n}$ the function
\begin{equation}
\text{$r\rightarrow F\left(t,x,r,p,A\right)-\lambda r$ is nondecreasing on $\mathbb{R}$.
} \label{ass_F_nondecreasing}
\end{equation}
There is a function $m_{1}\in C\left([0,\infty)\right)$ satisfying $m_{1}\left(0\right)=0$ for
which
\begin{align}
& F\left(t,y,r,p,-Y\right)-F\left(t,x,r,p,X\right)\leq m_{1}\left(|x-y|\left(|p|+1\right)+\alpha |x-y|^{2}\right)
\label{ass_F_XY} \\
& \text{if}\qquad -\alpha \left(
\begin{array}{cc}
I & 0 \\
0 & I
\end{array}
\right) \leq \left(
\begin{array}{cc}
X & 0 \\
0 & Y
\end{array}
\right) \leq \alpha \left(
\begin{array}{cc}
I & -I \\
-I & I
\end{array}
\right) , \notag
\end{align}
for all $\alpha \geq 1$, $\left(t,x\right),\left(t,y\right)\in \overline{\Omega }$, $r\in \mathbb{R
}$, $p\in \mathbb{R}^{n}$ and $X,Y\in \mathbb{S}^{n}$, where $I$ denotes the
unit matrix of size $n\times n$. There is a neighborhood $U$ of $\partial
\Omega $ in $\overline{\Omega }$ and a function $m_{2}\in C\left([0,\infty )\right)$
satisfying $m_{2}\left(0\right)=0$ for which
\begin{equation}
|F\left(t,x,r,p,X\right)-F\left(t,x,r,q,Y\right)|\leq m_{2}\left(|p-q|+||X-Y||\right), \label{ass_F_boundary}
\end{equation}
for $\left(t,x\right)\in U$, $r\in \mathbb{R}$, $p,q\in \mathbb{R}^{n}$ and $X,Y\in
\mathbb{S}^{n}$. Regarding the function $f$ we assume that
\begin{equation}
f\left(t,x,r\right)\in C\left(\overline{\partial \Omega }\times \mathbb{R}\right),
\label{f_kontinuerlig}
\end{equation}
and that for each $\left(t,x\right)\in \overline{\partial \Omega }$ the function
\begin{equation}
\text{ $r\rightarrow f\left(t,x,r\right)$ is nondecreasing on $\mathbb{R}$}.
\label{ass_f_nondecreasing}
\end{equation}
We remark that assumptions \eqref{ass_F_cont} and \eqref{ass_F_XY} imply the
degenerate ellipticity
\begin{equation}
F\left(t,x,r,p,A+B\right)\leq F\left(t,x,r,p,A\right)\quad \text{if}\;B\geq 0,
\label{F_fundamental}
\end{equation}
for $\left(t,x\right)\in \overline{\Omega }$, $r\in \mathbb{R}$, $p\in \mathbb{R}^{n}$
and $A,B\in \mathbb{S}^{n}$, see Remark 3.4 in \cite{CrandallIshiiLions1992}
for a proof. To handle the strong degeneracy allowed, we will adapt the
notion of viscosity solutions \cite{CrandallIshiiLions1992}, which we recall
for problem \eqref{huvudekvationen}-\eqref{randvillkor} in Section \ref{PDE}.
Let $USC(E)$ ($LSC(E)$) denote the set of upper (lower) semi-continuous functions on $E\subset\mathbb{R}^{n+1}$.
Our main results for PDEs are given in the following theorems.
\begin{theorem}
\label{comparison}Let $\Omega ^{\circ }$ be a time-dependent domain
satisfying \eqref{timesect} and assume that \eqref{smooth_gamma2}-
\eqref{templip} and \eqref{ass_F_cont}-\eqref{ass_f_nondecreasing} hold. Let
$u\in USC(\widetilde{\Omega})$ be a viscosity subsolution, and $v\in LSC(
\widetilde{\Omega})$ be a viscosity supersolution of problem
\eqref{huvudekvationen}-\eqref{randvillkor} in $\Omega ^{\circ }$. If $
u\left(0,x\right)\leq v\left(0,x\right)$ for all $x\in \overline{\Omega }_{0}$, then
$u\leq v\;\text{in}\;\widetilde{\Omega }$.
\end{theorem}
\begin{theorem}
\label{existence}Let $\Omega ^{\circ }$ be a time-dependent domain
satisfying \eqref{timesect} and assume that \eqref{smooth_gamma2}-
\eqref{templip} and \eqref{ass_F_cont}-\eqref{ass_f_nondecreasing} hold.
Then there exists a unique viscosity solution, continuous on $\widetilde{
\Omega }$, to the initial value problem
\begin{align}
u_{t}+F\left(t,x,u,Du,D^{2}u\right)& =0\qquad \quad \,\text{in}\quad \Omega ^{\circ },
\notag \label{initial_value_problem} \\
\frac{\partial u}{\partial \widetilde{\gamma }}+f\left(t,x,u\left(t,x\right)\right)& =0\qquad
\quad \,\text{on}\quad \partial \Omega , \notag \\
u\left(0,x\right)& =g\left(x\right)\qquad \text{for}\quad x\in \overline{\Omega }_{0},
\end{align}
where $g\in C\left(\overline{\Omega }_{0}\right)$.
\end{theorem}
Theorems \ref{comparison} and \ref{existence} are proved in Section \ref{PDE}
. The comparison principle in Theorem \ref{comparison} is obtained using two
of the test functions constructed in Section \ref{test} together with
nowadays standard techniques from the theory of viscosity solutions for
fully nonlinear PDEs as described in \cite{CrandallIshiiLions1992}. Our
proof uses ideas from the corresponding elliptic result given in \cite
{DupuisIshii1990}. The uniqueness part of Theorem \ref{existence} is
immediate from the formulation of Theorem \ref{comparison}, which also,
together with the maximum principle in Lemma \ref{maxrand}, allows comparison
in the setting of mixed boundary conditions, as follows.
\begin{corollary}
\label{maxrand_partial} Let $\Omega ^{\circ }$ be a time-dependent domain
satisfying \eqref{timesect} and assume that \eqref{smooth_gamma2}-
\eqref{templip} and \eqref{ass_F_cont}-\eqref{ass_f_nondecreasing} hold. Let
$u\in USC(\widetilde{\Omega })$ be a viscosity subsolution, and $v\in LSC(
\widetilde{\Omega })$ be a viscosity supersolution of \eqref{huvudekvationen}
in $\Omega ^{\circ }$. Suppose also that $u$ and $v$ satisfy the oblique
derivative boundary condition \eqref{randvillkor} on a subset $G\subset
\partial \Omega$.
Then $\sup_{\widetilde{\Omega}}u-v\leq \sup_{\left(\partial \Omega \setminus
G\right)\cup \,\overline{\Omega }_{0}}\left(u-v\right)^{+}$.
\end{corollary}
The existence part of Theorem \ref{existence} is proved using Perron's
method and Corollary \ref{maxrand_partial}, together with constructions of
several explicit viscosity sub- and supersolutions to the problem
\eqref{huvudekvationen}-\eqref{randvillkor}.
\setcounter{equation}{0} \setcounter{theorem}{0}
| 2,923 | 73,518 |
en
|
train
|
0.156.4
|
\section{Construction of test functions\label{test}}
In this section we show how the classes of test functions constructed in
\cite{DupuisIshii1990} for time-independent domains can be generalized to
similar classes of test functions valid for time-dependent domains. Lemma
\ref{testlemma3} and Lemma \ref{testlemma4} provide test functions that are
modifications of the square function, but which interact with the direction
of $\gamma $ in a suitable way. The derivations of these functions follow
the lines of the derivations of the corresponding test functions in \cite
{DupuisIshii1990} with the addition that it has to be verified that the time
derivative of the test functions has a certain order. Lemma \ref{testlemma5}
provides a non-negative test function in $\mathcal{C}^{1,2}\left( \overline{
\Omega },
\mathbb{R}
\right) $, whose gradient is aligned with $\gamma $ at the boundary. To
verify the existence of this function, the proof for the corresponding
function in \cite{DupuisIshii1990} has to be extended considerably due to
the time-dependence of the domain. In particular, new methods have to be
used to obtain differentiability with respect to the time variable.
The constructions of the test functions below are given with sufficient
detail and for those parts of the constructions that are identical in
time-dependent and time-independent domains, we refer the reader to \cite
{DupuisIshii1990}. We start by stating a straightforward extension of Lemma
4.4 in \cite{DupuisIshii1990} from $\xi \in S\left( 0,1\right) $ to $\xi \in
B\left( 0,1\right) $. The proof follows directly from the construction in
Lemma 4.4 in \cite{DupuisIshii1990} and is omitted. For any $\theta \in
\left( 0,1\right) $, there exists a function $g\in \mathcal{C}\left(
\mathbb{R}
^{n}\times
\mathbb{R}
^{n},
\mathbb{R}
\right) $ and positive constants $\chi ,C$ such that
\begin{equation}
g\in \mathcal{C}^{1}\left(
\mathbb{R}
^{n}\times
\mathbb{R}
^{n},
\mathbb{R}
\right) \cap \mathcal{C}^{2}\left(
\mathbb{R}
^{n}\times \left(
\mathbb{R}
^{n}\setminus \left\{ 0\right\} \right) ,
\mathbb{R}
\right) , \label{testlemma21}
\end{equation}
\begin{equation}
g\left( \xi ,p\right) \geq \chi \left\vert p\right\vert ^{2},\quad \text{for
}\xi \in B\left( 0,1\right) \text{, }p\in
\mathbb{R}
^{n}, \label{testlemma22}
\end{equation}
\begin{equation}
g\left( \xi ,0\right) =0,\quad \text{for }\xi \in
\mathbb{R}
^{n}, \label{testlemma23}
\end{equation}
\begin{equation}
\left\langle D_{p}g\left( \xi ,p\right) ,\xi \right\rangle \geq 0,\quad
\text{for }\xi \in S\left( 0,1\right) \text{, }p\in
\mathbb{R}
^{n}\text{ and }\left\langle p,\xi \right\rangle \geq -\theta \left\vert
p\right\vert \text{,} \label{testlemma24}
\end{equation}
\begin{equation}
\left\langle D_{p}g\left( \xi ,p\right) ,\xi \right\rangle \leq 0,\quad
\text{for }\xi \in S\left( 0,1\right) \text{, }p\in
\mathbb{R}
^{n}\text{ and }\left\langle p,\xi \right\rangle \leq \theta \left\vert
p\right\vert , \label{testlemma25}
\end{equation}
\begin{equation}
\left\vert D_{\xi }g\left( \xi ,p\right) \right\vert \leq C\left\vert
p\right\vert ^{2},\quad \left\vert D_{p}g\left( \xi ,p\right) \right\vert
\leq C\left\vert p\right\vert ,\quad \text{for }\xi \in B\left( 0,1\right)
\text{, }p\in
\mathbb{R}
^{n}, \label{testlemma26}
\end{equation}
and
\begin{equation}
\left\Vert D_{\xi }^{2}g\left( \xi ,p\right) \right\Vert \leq C\left\vert
p\right\vert ^{2},\quad \left\Vert D_{\xi }D_{p}g\left( \xi ,p\right)
\right\Vert \leq C\left\vert p\right\vert ,\quad \left\Vert D_{p}^{2}g\left(
\xi ,p\right) \right\Vert \leq C, \label{testlemma28}
\end{equation}
for $\xi \in B\left( 0,1\right) $, $p\in
\mathbb{R}
^{n}\setminus \left\{ 0\right\} $. The test function provided by the
following lemma will be used to assert relative compactness of solutions to
the Skorohod problem in Lemma \ref{compactest} below.
\begin{lemma}
\label{testlemma3}For any $\theta \in \left( 0,1\right) $, there exists a
function $h\in \mathcal{C}^{1,2}\left( \left[ 0,T\right] \times
\mathbb{R}
^{n}\times
\mathbb{R}
^{n},
\mathbb{R}
\right) $ and positive constants $\chi ,C$ such that, for all $\left(
t,x,p\right) \in \lbrack 0,T]\times
\mathbb{R}
^{n}\times
\mathbb{R}
^{n}$,
\begin{equation}
h\left( t,x,p\right) \geq \chi \left\vert p\right\vert ^{2},
\label{testlemma31}
\end{equation}
\begin{equation}
h\left( t,x,0\right) =1, \label{testlemma32}
\end{equation}
\begin{equation}
\left\langle D_{p}h\left( t,x,p\right) ,\gamma \left( t,x\right)
\right\rangle \geq 0,\quad \text{for }x\in \partial \Omega _{t}\text{ and }
\left\langle p,\gamma \left( t,x\right) \right\rangle \geq -\theta
\left\vert p\right\vert , \label{testlemma33}
\end{equation}
\begin{equation}
\left\langle D_{p}h\left( t,x,p\right) ,\gamma \left( t,x\right)
\right\rangle \leq 0,\quad \text{for }x\in \partial \Omega _{t}\text{ and }
\left\langle p,\gamma \left( t,x\right) \right\rangle \leq \theta \left\vert
p\right\vert , \label{testlemma34}
\end{equation}
\begin{equation}
\left\vert D_{t}h\left( t,x,p\right) \right\vert \leq C\left\vert
p\right\vert ^{2},\quad \left\vert D_{x}h\left( t,x,p\right) \right\vert
\leq C\left\vert p\right\vert ^{2},\left\vert D_{p}h\left( t,x,p\right)
\right\vert \leq C\left\vert p\right\vert , \label{testlemma35}
\end{equation}
and
\begin{equation}
\left\Vert D_{x}^{2}h\left( t,x,p\right) \right\Vert \leq C\left\vert
p\right\vert ^{2},\quad \left\Vert D_{x}D_{p}h\left( t,x,p\right)
\right\Vert \leq C\left\vert p\right\vert ,\quad \left\Vert D_{p}^{2}h\left(
t,x,p\right) \right\Vert \leq C. \label{testlemma37}
\end{equation}
\end{lemma}
\noindent \textbf{Proof.} Let $\nu \in \mathcal{C}^{2}\left(
\mathbb{R}
,
\mathbb{R}
\right) $ be such that $\nu \left( t\right) =t$ for $t\geq 2$, $\nu \left(
t\right) =1$ for $t\leq 1/2$, $\nu ^{\prime }\left( t\right) \geq 0$ and $
\nu \left( t\right) \geq t$ for all $t\in
\mathbb{R}
$. Let $\theta \in \left( 0,1\right) $ be given, choose $g\in \mathcal{C}
\left(
\mathbb{R}
^{n}\times
\mathbb{R}
^{n},
\mathbb{R}
\right) $ satisfying \eqref{testlemma21}-\eqref{testlemma28} and define
\begin{equation*}
h\left( t,x,p\right) =\nu \left( g\left( \gamma \left( t,x\right) ,p\right)
\right) .
\end{equation*}
The regularity of $h$ follows easily from the regularity of $g$ and $\nu $
and \eqref{testlemma23}. It is straightforward to deduce properties
\eqref{testlemma31}-\eqref{testlemma37} from \eqref{testlemma21}-
\eqref{testlemma28} and we limit the proof to two examples, which are not
fully covered in \cite{DupuisIshii1990}. We have
\begin{equation*}
\left\vert D_{t}h\left( t,x,p\right) \right\vert =\left\vert \nu ^{\prime
}\left( g\left( \gamma \left( t,x\right) ,p\right) \right) \right\vert
\left\vert D_{\xi }g\left( \gamma \left( t,x\right) ,p\right) \right\vert
\left\vert \frac{\partial \gamma }{\partial t}\right\vert \leq C\left\vert
p\right\vert ^{2},
\end{equation*}
by \eqref{testlemma26} and the regularity of $\nu $ and $\gamma $. Moreover,
\begin{eqnarray*}
\left\Vert D_{x}^{2}h\left( t,x,p\right) \right\Vert &\leq &C(n)\bigg(
\left\vert \nu ^{\prime \prime }\left( g\left( \gamma \left( t,x\right)
,p\right) \right) \right\vert \left\vert D_{\xi }g\left( \gamma \left(
t,x\right) ,p\right) \right\vert ^{2}\left\Vert \frac{\partial \gamma }{
\partial x}\right\Vert ^{2} \\
&&+\left\vert \nu ^{\prime }\left( g\left( \gamma \left( t,x\right)
,p\right) \right) \right\vert \left\Vert D_{\xi }^{2}g\left( \gamma \left(
t,x\right) ,p\right) \right\Vert \left\Vert \frac{\partial \gamma }{\partial
x}\right\Vert ^{2} \\
&&+\left\vert \nu ^{\prime }\left( g\left( \gamma \left( t,x\right)
,p\right) \right) \right\vert \left\vert D_{\xi }g\left( \gamma \left(
t,x\right) ,p\right) \right\vert \max_{1\leq k\leq n}\left\Vert \frac{
\partial ^{2}\gamma _{k}}{\partial x^{2}}\right\Vert \bigg).
\end{eqnarray*}
Since $\nu ^{\prime \prime }$ is zero unless $2\geq g\left( \gamma \left(
t,x\right) ,p\right) \geq \chi \left\vert p\right\vert ^{2}$, the first
term, which is of order $C\left\vert p\right\vert ^{4}$, only contributes
for small $\left\vert p\right\vert ^{2}$ and can thus be bounded from above
by $C\left\vert p\right\vert ^{2}$. By \eqref{testlemma26}-
\eqref{testlemma28}, the two latter terms are also bounded from above by $
C\left\vert p\right\vert ^{2}$.
$\Box $
The test function in Lemma \ref{testlemma3} is also used to verify the
existence of the following test function, which will be useful in the proofs
of Theorem \ref{comparison} and Lemma \ref{rsdetheorem}.
\begin{lemma}
\label{testlemma4}For any $\theta \in \left( 0,1\right) $, there exists a
family $\left\{ w_{\varepsilon }\right\} _{\varepsilon >0}$ of functions $
w_{\varepsilon }\in \mathcal{C}^{1,2}\left( \left[ 0,T\right] \times
\mathbb{R}
^{n}\times
\mathbb{R}
^{n},
\mathbb{R}
\right) $ and positive constants $\chi ,C$ (independent of $\varepsilon $)
such that, for all $\left( t,x,y\right) \in \lbrack 0,T]\times
\mathbb{R}
^{n}\times
\mathbb{R}
| 3,556 | 73,518 |
en
|
train
|
0.156.5
|
\noindent \textbf{Proof.} Let $\nu \in \mathcal{C}^{2}\left(
\mathbb{R}
,
\mathbb{R}
\right) $ be such that $\nu \left( t\right) =t$ for $t\geq 2$, $\nu \left(
t\right) =1$ for $t\leq 1/2$, $\nu ^{\prime }\left( t\right) \geq 0$ and $
\nu \left( t\right) \geq t$ for all $t\in
\mathbb{R}
$. Let $\theta \in \left( 0,1\right) $ be given, choose $g\in \mathcal{C}
\left(
\mathbb{R}
^{n}\times
\mathbb{R}
^{n},
\mathbb{R}
\right) $ satisfying \eqref{testlemma21}-\eqref{testlemma28} and define
\begin{equation*}
h\left( t,x,p\right) =\nu \left( g\left( \gamma \left( t,x\right) ,p\right)
\right) .
\end{equation*}
The regularity of $h$ follows easily from the regularity of $g$ and $\nu $
and \eqref{testlemma23}. It is straightforward to deduce properties
\eqref{testlemma31}-\eqref{testlemma37} from \eqref{testlemma21}-
\eqref{testlemma28} and we limit the proof to two examples, which are not
fully covered in \cite{DupuisIshii1990}. We have
\begin{equation*}
\left\vert D_{t}h\left( t,x,p\right) \right\vert =\left\vert \nu ^{\prime
}\left( g\left( \gamma \left( t,x\right) ,p\right) \right) \right\vert
\left\vert D_{\xi }g\left( \gamma \left( t,x\right) ,p\right) \right\vert
\left\vert \frac{\partial \gamma }{\partial t}\right\vert \leq C\left\vert
p\right\vert ^{2},
\end{equation*}
by \eqref{testlemma26} and the regularity of $\nu $ and $\gamma $. Moreover,
\begin{eqnarray*}
\left\Vert D_{x}^{2}h\left( t,x,p\right) \right\Vert &\leq &C(n)\bigg(
\left\vert \nu ^{\prime \prime }\left( g\left( \gamma \left( t,x\right)
,p\right) \right) \right\vert \left\vert D_{\xi }g\left( \gamma \left(
t,x\right) ,p\right) \right\vert ^{2}\left\Vert \frac{\partial \gamma }{
\partial x}\right\Vert ^{2} \\
&&+\left\vert \nu ^{\prime }\left( g\left( \gamma \left( t,x\right)
,p\right) \right) \right\vert \left\Vert D_{\xi }^{2}g\left( \gamma \left(
t,x\right) ,p\right) \right\Vert \left\Vert \frac{\partial \gamma }{\partial
x}\right\Vert ^{2} \\
&&+\left\vert \nu ^{\prime }\left( g\left( \gamma \left( t,x\right)
,p\right) \right) \right\vert \left\vert D_{\xi }g\left( \gamma \left(
t,x\right) ,p\right) \right\vert \max_{1\leq k\leq n}\left\Vert \frac{
\partial ^{2}\gamma _{k}}{\partial x^{2}}\right\Vert \bigg).
\end{eqnarray*}
Since $\nu ^{\prime \prime }$ is zero unless $2\geq g\left( \gamma \left(
t,x\right) ,p\right) \geq \chi \left\vert p\right\vert ^{2}$, the first
term, which is of order $C\left\vert p\right\vert ^{4}$, only contributes
for small $\left\vert p\right\vert ^{2}$ and can thus be bounded from above
by $C\left\vert p\right\vert ^{2}$. By \eqref{testlemma26}-
\eqref{testlemma28}, the two latter terms are also bounded from above by $
C\left\vert p\right\vert ^{2}$.
$\Box $
The test function in Lemma \ref{testlemma3} is also used to verify the
existence of the following test function, which will be useful in the proofs
of Theorem \ref{comparison} and Lemma \ref{rsdetheorem}.
\begin{lemma}
\label{testlemma4}For any $\theta \in \left( 0,1\right) $, there exists a
family $\left\{ w_{\varepsilon }\right\} _{\varepsilon >0}$ of functions $
w_{\varepsilon }\in \mathcal{C}^{1,2}\left( \left[ 0,T\right] \times
\mathbb{R}
^{n}\times
\mathbb{R}
^{n},
\mathbb{R}
\right) $ and positive constants $\chi ,C$ (independent of $\varepsilon $)
such that, for all $\left( t,x,y\right) \in \lbrack 0,T]\times
\mathbb{R}
^{n}\times
\mathbb{R}
^{n}$,
\begin{equation}
w_{\varepsilon }\left( t,x,y\right) \geq \chi \frac{\left\vert
x-y\right\vert ^{2}}{\varepsilon }, \label{testlemma41}
\end{equation}
\begin{equation}
w_{\varepsilon }\left( t,x,y\right) \leq C\left( \varepsilon +\frac{
\left\vert x-y\right\vert ^{2}}{\varepsilon }\right) , \label{testlemma42}
\end{equation}
\begin{equation}
\left\langle D_{x}w_{\varepsilon }\left( t,x,y\right) ,\gamma \left(
t,x\right) \right\rangle \leq C\frac{\left\vert x-y\right\vert ^{2}}{
\varepsilon },\quad \text{for }x\in \partial \Omega _{t}\text{, }
\left\langle y-x,\gamma \left( t,x\right) \right\rangle \geq -\theta
\left\vert x-y\right\vert , \label{testlemma43}
\end{equation}
\begin{equation}
\left\langle D_{y}w_{\varepsilon }\left( t,x,y\right) ,\gamma \left(
t,x\right) \right\rangle \leq 0,\quad \text{for }x\in \partial \Omega _{t}
\text{, }\left\langle x-y,\gamma \left( t,x\right) \right\rangle \geq
-\theta \left\vert x-y\right\vert , \label{testlemma49}
\end{equation}
\begin{equation}
\left\langle D_{y}w_{\varepsilon }\left( t,x,y\right) ,\gamma \left(
t,y\right) \right\rangle \leq C\frac{\left\vert x-y\right\vert ^{2}}{
\varepsilon },\quad \text{for }y\in \partial \Omega _{t}\text{, }
\left\langle x-y,\gamma \left( t,y\right) \right\rangle \geq -\theta
\left\vert x-y\right\vert , \label{testlemma44}
\end{equation}
\begin{equation}
\left\vert D_{t}w_{\varepsilon }\left( t,x,y\right) \right\vert \leq C\frac{
\left\vert x-y\right\vert ^{2}}{\varepsilon }, \label{testlemma45}
\end{equation}
\begin{equation}
\left\vert D_{y}w_{\varepsilon }\left( t,x,y\right) \right\vert \leq C\frac{
\left\vert x-y\right\vert }{\varepsilon },\quad \left\vert
D_{x}w_{\varepsilon }\left( t,x,y\right) +D_{y}w_{\varepsilon }\left(
t,x,y\right) \right\vert \leq C\frac{\left\vert x-y\right\vert ^{2}}{
\varepsilon }, \label{testlemma46}
\end{equation}
and
\begin{equation}
D^{2}w_{\varepsilon }\left( t,x,y\right) \leq \frac{C}{\varepsilon }\left(
\begin{array}{cc}
I & -I \\
-I & I
\end{array}
\right) +\frac{C\left\vert x-y\right\vert ^{2}}{\varepsilon }\left(
\begin{array}{cc}
I & 0 \\
0 & I
\end{array}
\right) . \label{testlemma47}
\end{equation}
\end{lemma}
\noindent \textbf{Proof.} Let $\theta \in \left( 0,1\right) $ be given and
choose $h\in \mathcal{C}^{1,2}\left( \left[ 0,T\right] \times
\mathbb{R}
^{n}\times
\mathbb{R}
^{n},
\mathbb{R}
\right) $ as in Lemma \ref{testlemma3}. For all $\varepsilon >0$, we define
the function $w_{\varepsilon }$ as
\begin{equation*}
w_{\varepsilon }\left( t,x,y\right) =\varepsilon h\left( t,x,\frac{x-y}{
\varepsilon }\right) .
\end{equation*}
Property \eqref{testlemma41} follows easily from \eqref{testlemma31} and
property \eqref{testlemma42} was verified in Remark 3.3 in \cite
{DupuisIshii1993}. Moreover, properties \eqref{testlemma43},
\eqref{testlemma49}, \eqref{testlemma46} and \eqref{testlemma47} were
verified in the proof of Theorem 4.1 in \cite{DupuisIshii1990} and
\eqref{testlemma45} is a simple consequence of \eqref{testlemma35}. To prove
\eqref{testlemma44}, we note that
\begin{eqnarray*}
\left\langle D_{y}w_{\varepsilon }\left( t,x,y\right) ,\gamma \left(
t,y\right) \right\rangle &=&-\left\langle D_{p}h\left( t,x,\frac{x-y}{
\varepsilon }\right) ,\gamma \left( t,y\right) \right\rangle \\
&=&-\left\langle D_{p}h\left( t,y,\frac{x-y}{\varepsilon }\right) ,\gamma
\left( t,y\right) \right\rangle \\
&&+\left\langle D_{p}h\left( t,y,\frac{x-y}{\varepsilon }\right)
-D_{p}h\left( t,x,\frac{x-y}{\varepsilon }\right) ,\gamma \left( t,y\right)
\right\rangle .
\end{eqnarray*}
Moreover, if $\left\langle x-y,\gamma \left( t,y\right) \right\rangle \geq
-\theta \left\vert x-y\right\vert $, then by \eqref{testlemma33}, $
\left\langle D_{p}h\left( t,y,p\right) ,\gamma \left( t,y\right)
\right\rangle \geq 0$ with $p=\left( x-y\right) /\varepsilon $. Hence, for
some $\xi $ in the segment joining $x$ and $y$, we obtain, with the aid of
the mean value theorem and \eqref{testlemma37},
\begin{eqnarray*}
\left\langle D_{y}w_{\varepsilon }\left( t,x,y\right) ,\gamma \left(
t,y\right) \right\rangle &\leq &\left\Vert D_{x}D_{p}h\left( t,\xi ,\frac{x-y
}{\varepsilon }\right) \right\Vert \left\vert x-y\right\vert \\
&\leq &C\left\vert \frac{x-y}{\varepsilon }\right\vert \left\vert
x-y\right\vert =C\frac{\left\vert x-y\right\vert ^{2}}{\varepsilon }.
\end{eqnarray*}
$\Box $
We conclude this section by proving Lemma \ref{testlemma5} using an
appropriate Cauchy problem. The test function $\alpha $ in Lemma \ref
{testlemma5} will be crucial for the proofs of Theorems \ref{comparison},
\ref{existence} and Lemma \ref{rsdetheorem}.
\begin{lemma}
\label{testlemma5} There exists a nonnegative function $\alpha \in \mathcal{C
}^{1,2}\left( \overline{\Omega },
\mathbb{R}
\right) $, which satisfies
\begin{equation}
\left\langle D_{x}\alpha \left( t,x\right) ,\gamma \left( t,x\right)
\right\rangle \geq 1, \label{alfaprop}
\end{equation}
for $x\in \partial \Omega _{t}$, $t\in \left[ 0,T\right] $. Moreover, the
support of $\alpha$ can be assumed to lie in the neighbourhood $U$ defined
in \eqref{ass_F_boundary}.
\end{lemma}
\noindent \textbf{Proof.} Fix $s\in \left[ 0,T\right] $ and $z\in \partial
\Omega _{s}$ and define $H_{s,z}$ as the hyperplane
\begin{equation*}
H_{s,z}=\left\{ x\in
\mathbb{R}
^{n}:\left\langle x-z,\gamma \left( s,z\right) \right\rangle =0\right\} .
\end{equation*}
Given a function $u_{0}\in \mathcal{C}^{2}\left( H_{s,z},
\mathbb{R}
\right) $, such that $u_{0}\left( z\right) =1$, $u_{0}\geq 0$ and supp $
u_{0}\subset B\left( z,\delta ^{2}/4\right) \cap H_{s,z}$, we can use the
method of characteristics to solve the Cauchy problem
\begin{eqnarray*}
\left\langle D_{x}u_{\left( t\right) }\left( x\right) ,\gamma \left(
t,x\right) \right\rangle &=&0, \\
\left. u_{\left( t\right) }\right\vert _{H_{s,z}} &=&u_{0}.
\end{eqnarray*}
Choosing the positive constants $\delta $ and $\eta $ sufficiently small,
the Cauchy problem above has, for all $t\in \left[ s-\eta ,s+\eta \right] $,
a solution $u_{\left( t\right) }\in \mathcal{C}^{2}\left( B\left( z,\delta
\right) ,
\mathbb{R}
\right) $ satisfying $u_{\left( t\right) }\geq 0$. Based on the continuity
of $\gamma $ and the restriction on the support of $u_{0}$, we may also
assume that
\begin{equation*}
\text{supp }u_{\left( t\right) }\subset \bigcup_{\zeta \in
\mathbb{R}
}B(z-\zeta \gamma (s,z),\delta ^{2}/3)\cap B\left( z,\delta \right) .
\end{equation*}
Next, we define the combined function
\begin{equation*}
u\left( t,x\right) =u_{\left( t\right) }\left( x\right) ,
\end{equation*}
and we claim for now that $u\in \mathcal{C}^{1,2}\left( \left[ s-\eta
,s+\eta \right] \times B\left( z,\delta \right) ,
\mathbb{R}
| 3,990 | 73,518 |
en
|
train
|
0.156.6
|
$\Box $
We conclude this section by proving Lemma \ref{testlemma5} using an
appropriate Cauchy problem. The test function $\alpha $ in Lemma \ref
{testlemma5} will be crucial for the proofs of Theorems \ref{comparison},
\ref{existence} and Lemma \ref{rsdetheorem}.
\begin{lemma}
\label{testlemma5} There exists a nonnegative function $\alpha \in \mathcal{C
}^{1,2}\left( \overline{\Omega },
\mathbb{R}
\right) $, which satisfies
\begin{equation}
\left\langle D_{x}\alpha \left( t,x\right) ,\gamma \left( t,x\right)
\right\rangle \geq 1, \label{alfaprop}
\end{equation}
for $x\in \partial \Omega _{t}$, $t\in \left[ 0,T\right] $. Moreover, the
support of $\alpha$ can be assumed to lie in the neighbourhood $U$ defined
in \eqref{ass_F_boundary}.
\end{lemma}
\noindent \textbf{Proof.} Fix $s\in \left[ 0,T\right] $ and $z\in \partial
\Omega _{s}$ and define $H_{s,z}$ as the hyperplane
\begin{equation*}
H_{s,z}=\left\{ x\in
\mathbb{R}
^{n}:\left\langle x-z,\gamma \left( s,z\right) \right\rangle =0\right\} .
\end{equation*}
Given a function $u_{0}\in \mathcal{C}^{2}\left( H_{s,z},
\mathbb{R}
\right) $, such that $u_{0}\left( z\right) =1$, $u_{0}\geq 0$ and supp $
u_{0}\subset B\left( z,\delta ^{2}/4\right) \cap H_{s,z}$, we can use the
method of characteristics to solve the Cauchy problem
\begin{eqnarray*}
\left\langle D_{x}u_{\left( t\right) }\left( x\right) ,\gamma \left(
t,x\right) \right\rangle &=&0, \\
\left. u_{\left( t\right) }\right\vert _{H_{s,z}} &=&u_{0}.
\end{eqnarray*}
Choosing the positive constants $\delta $ and $\eta $ sufficiently small,
the Cauchy problem above has, for all $t\in \left[ s-\eta ,s+\eta \right] $,
a solution $u_{\left( t\right) }\in \mathcal{C}^{2}\left( B\left( z,\delta
\right) ,
\mathbb{R}
\right) $ satisfying $u_{\left( t\right) }\geq 0$. Based on the continuity
of $\gamma $ and the restriction on the support of $u_{0}$, we may also
assume that
\begin{equation*}
\text{supp }u_{\left( t\right) }\subset \bigcup_{\zeta \in
\mathbb{R}
}B(z-\zeta \gamma (s,z),\delta ^{2}/3)\cap B\left( z,\delta \right) .
\end{equation*}
Next, we define the combined function
\begin{equation*}
u\left( t,x\right) =u_{\left( t\right) }\left( x\right) ,
\end{equation*}
and we claim for now that $u\in \mathcal{C}^{1,2}\left( \left[ s-\eta
,s+\eta \right] \times B\left( z,\delta \right) ,
\mathbb{R}
\right) $ and postpone the proof of this claim to the end of the proof of
the lemma. By the exterior and interior cone conditions, we can, for
sufficiently small $\delta $, find $\varepsilon >0$ such that
\begin{eqnarray*}
&&\bigcup_{\zeta >0}B(z-\zeta \gamma (s,z),\delta ^{2}/3)\cap \left(B\left(
z,\delta \right) \setminus B\left( z,\delta -2\varepsilon \right)\right) \\
&\subset &\bigcup_{\zeta >0}B(z-\zeta \gamma (s,z),\zeta \delta )\cap
B\left( z,\delta \right) \subset \Omega _{s}^{c},
\end{eqnarray*}
and such that the similar union over $\zeta <0$ belongs to $\Omega _{s}$.
Hence
\begin{equation*}
\partial \Omega _{s}\cap \left(\text{supp }u_{\left( t\right) }\setminus
B\left( z,\delta -2\varepsilon \right)\right) =\emptyset ,
\end{equation*}
and, by \eqref{tempholder}, it follows that if $\eta $ also satisfies the
constraint $\eta <\left( \varepsilon /K\right) ^{1/\widehat{\alpha }}$, then
\begin{equation}
\partial \Omega _{t}\cap \left(\text{supp }u_{\left( t\right) }\setminus
B\left( z,\delta -\varepsilon \right)\right) =\emptyset \text{,}
\label{suppbound}
\end{equation}
for all $t\in \left[ s-\eta ,s+\eta \right] $.
Now, choose a function $\xi \in \mathcal{C}_{0}^{1,2}\left( \left[ s-\eta
,s+\eta \right] \times B\left( z,\delta \right) ,
\mathbb{R}
\right) $ so that $\xi \left( t,x\right) =1$ for $t\in \left[ s-\eta +
\varepsilon ,s+\eta - \varepsilon \right] $, $x\in B\left( z,\delta
-\varepsilon \right) $ and $\xi \geq 0$, and set
\begin{equation*}
v_{s,z}\left( t,x\right) =u\left( t,x\right) \xi \left( t,x\right) .
\end{equation*}
Then $v_{s,z}\in \mathcal{C}_{0}^{1,2}\left( \left[ s-\eta ,s+\eta \right]
\times B\left( z,\delta \right) ,
\mathbb{R}
\right) $ satisfies $v_{s,z}\geq 0$. By \eqref{suppbound} and the
construction of $u$ and $\xi $, we obtain
\begin{equation*}
\left\langle D_{x}v_{s,z}\left( t,x\right) ,\gamma \left( t,x\right)
\right\rangle =0\text{ for }x\in B\left( z,\delta \right) \cap \partial
\Omega _{t}\text{, }t\in \left[ s-\eta ,s+\eta \right] .
\end{equation*}
Define $w_{s,z}\in \mathcal{C}^{2}\left( B\left( z,\delta \right) ,
\mathbb{R}
\right) $ by
\begin{equation*}
w_{s,z}\left( x\right) =\left\langle x-z,\gamma \left( s,z\right)
\right\rangle +M,
\end{equation*}
where $M$ is large enough so that $w_{s,z}\geq 0$. Using the continuity of $
\gamma $, we can find $\delta $ and $\eta $ such that $\left\langle \gamma
\left( s,z\right) ,\gamma \left( t,x\right) \right\rangle \geq 0$ for all $
\left( t,x\right) \in \left[ s-\eta ,s+\eta \right] \times B\left( z,\delta
\right) $. Setting
\begin{equation*}
g_{s,z}\left( t,x\right) =v_{s,z}\left( t,x\right) w_{s,z}\left( x\right) ,
\end{equation*}
we find that $g_{s,z}\in \mathcal{C}_{0}^{1,2}\left( \left[ s-\eta ,s+\eta
\right] \times B\left( z,\delta \right) ,
\mathbb{R}
\right) $ satisfies $g_{s,z}\geq 0$. Moreover, using $\left\vert \gamma
\left( t,x\right) \right\vert =1$, we have
\begin{eqnarray*}
\left\langle D_{x}g_{s,z}\left( s,z\right) ,\gamma \left( s,z\right)
\right\rangle &=&v_{s,z}\left( s,z\right) \left\langle D_{x}w_{s,z}\left(
z\right) ,\gamma \left( s,z\right) \right\rangle \\
&&+w_{s,z}\left( z\right) \left\langle D_{x}v_{s,z}\left( s,z\right) ,\gamma
\left( s,z\right) \right\rangle \\
&=&u\left( s,z\right) \xi \left( s,z\right) \left\vert \gamma \left(
s,z\right) \right\vert ^{2}=1,
\end{eqnarray*}
and a similar calculation shows that
\begin{eqnarray*}
\left\langle D_{x}g_{s,z}\left( t,x\right) ,\gamma \left( t,x\right)
\right\rangle &=&v_{s,z}\left( t,x\right) \left\langle D_{x}w_{s,z}\left(
x\right) ,\gamma \left( t,x\right) \right\rangle \\
&&+w_{s,z}\left( x\right) \left\langle D_{x}v_{s,z}\left( t,x\right) ,\gamma
\left( t,x\right) \right\rangle \\
&=&v_{s,z}\left( t,x\right) \left\langle \gamma \left( s,z\right) ,\gamma
\left( t,x\right) \right\rangle \geq 0,
\end{eqnarray*}
for $x\in B\left( z,\delta \right) \cap \partial \Omega _{t}$, $t\in \left[
s-\eta ,s+\eta \right] $. Now,
using a standard compactness argument we conclude the existence of a
nonnegative function $\alpha \in \mathcal{C}^{1,2}(\overline{\Omega },
\mathbb{R})$, which satisfies $\left\langle D_{x}\alpha \left( t,x\right)
,\gamma \left( t,x\right) \right\rangle \geq 1$ for $x\in \partial \Omega
_{t}$, $t\in \left[ 0,T\right] $. Moreover, by the above construction, we
can assume that the support of $\alpha $ lies within the neighbourhood $U$
defined in \eqref{ass_F_boundary}.
It remains to prove the proposed regularity $u\in \mathcal{C}^{1,2}\left(
\left[ s-\eta ,s+\eta \right] \times B\left( z,\delta \right) ,
\mathbb{R}
\right) $. The regularity in the spatial variables follows directly by
construction, so it remains to show that $u$ is continuously differentiable
in the time variable. Let $x\in B\left( z,\delta \right) $ and let $t$ and $
t+h$ belong to $\left[ s-\eta ,s+\eta \right] $. Denote by $y\left( t,\cdot
\right) $ and $y\left( t+h,\cdot \right) $ the characteristic curves through
$x$ for the vector fields $\gamma \left( t,\cdot \right) $ and $\gamma
\left( t+h,\cdot \right) $, respectively, so that
\begin{eqnarray*}
\frac{\partial y}{\partial r}\left( t,r\right) &=&\pm \gamma \left(
t,y\left( t,r\right) \right) , \\
y\left( t,0\right) &=&x,
\end{eqnarray*}
and analogously for $y\left( t+h,\cdot \right) $. Choose the sign in the
parametrization of $y\left( t,\cdot \right) $ so that there exists some $
r\left( t\right) >0$ such that $y\left( t,r\left( t\right) \right) =z\left(
t\right) \in H_{s,z}$. Choosing the same sign in the parametrization of $
y\left( t+h,\cdot \right) $ asserts the existence of some $r\left(
t+h\right) >0$ such that $y\left( t+h,r\left( t+h\right) \right) =z\left(
t+h\right) \in H_{s,z}$. Without lack of generality, we assume the sign
above to be positive. Since $u\left( t,x\right) =u_{0}\left( z\left(
t\right) \right) $, where $u_{0}$ is continuously differentiable, it remains
to show that the function $z$ is continuously differentiable.
| 3,229 | 73,518 |
en
|
train
|
0.156.7
|
\mathbb{R}
\right) $ by
\begin{equation*}
w_{s,z}\left( x\right) =\left\langle x-z,\gamma \left( s,z\right)
\right\rangle +M,
\end{equation*}
where $M$ is large enough so that $w_{s,z}\geq 0$. Using the continuity of $
\gamma $, we can find $\delta $ and $\eta $ such that $\left\langle \gamma
\left( s,z\right) ,\gamma \left( t,x\right) \right\rangle \geq 0$ for all $
\left( t,x\right) \in \left[ s-\eta ,s+\eta \right] \times B\left( z,\delta
\right) $. Setting
\begin{equation*}
g_{s,z}\left( t,x\right) =v_{s,z}\left( t,x\right) w_{s,z}\left( x\right) ,
\end{equation*}
we find that $g_{s,z}\in \mathcal{C}_{0}^{1,2}\left( \left[ s-\eta ,s+\eta
\right] \times B\left( z,\delta \right) ,
\mathbb{R}
\right) $ satisfies $g_{s,z}\geq 0$. Moreover, using $\left\vert \gamma
\left( t,x\right) \right\vert =1$, we have
\begin{eqnarray*}
\left\langle D_{x}g_{s,z}\left( s,z\right) ,\gamma \left( s,z\right)
\right\rangle &=&v_{s,z}\left( s,z\right) \left\langle D_{x}w_{s,z}\left(
z\right) ,\gamma \left( s,z\right) \right\rangle \\
&&+w_{s,z}\left( z\right) \left\langle D_{x}v_{s,z}\left( s,z\right) ,\gamma
\left( s,z\right) \right\rangle \\
&=&u\left( s,z\right) \xi \left( s,z\right) \left\vert \gamma \left(
s,z\right) \right\vert ^{2}=1,
\end{eqnarray*}
and a similar calculation shows that
\begin{eqnarray*}
\left\langle D_{x}g_{s,z}\left( t,x\right) ,\gamma \left( t,x\right)
\right\rangle &=&v_{s,z}\left( t,x\right) \left\langle D_{x}w_{s,z}\left(
x\right) ,\gamma \left( t,x\right) \right\rangle \\
&&+w_{s,z}\left( x\right) \left\langle D_{x}v_{s,z}\left( t,x\right) ,\gamma
\left( t,x\right) \right\rangle \\
&=&v_{s,z}\left( t,x\right) \left\langle \gamma \left( s,z\right) ,\gamma
\left( t,x\right) \right\rangle \geq 0,
\end{eqnarray*}
for $x\in B\left( z,\delta \right) \cap \partial \Omega _{t}$, $t\in \left[
s-\eta ,s+\eta \right] $. Now,
using a standard compactness argument we conclude the existence of a
nonnegative function $\alpha \in \mathcal{C}^{1,2}(\overline{\Omega },
\mathbb{R})$, which satisfies $\left\langle D_{x}\alpha \left( t,x\right)
,\gamma \left( t,x\right) \right\rangle \geq 1$ for $x\in \partial \Omega
_{t}$, $t\in \left[ 0,T\right] $. Moreover, by the above construction, we
can assume that the support of $\alpha $ lies within the neighbourhood $U$
defined in \eqref{ass_F_boundary}.
It remains to prove the proposed regularity $u\in \mathcal{C}^{1,2}\left(
\left[ s-\eta ,s+\eta \right] \times B\left( z,\delta \right) ,
\mathbb{R}
\right) $. The regularity in the spatial variables follows directly by
construction, so it remains to show that $u$ is continuously differentiable
in the time variable. Let $x\in B\left( z,\delta \right) $ and let $t$ and $
t+h$ belong to $\left[ s-\eta ,s+\eta \right] $. Denote by $y\left( t,\cdot
\right) $ and $y\left( t+h,\cdot \right) $ the characteristic curves through
$x$ for the vector fields $\gamma \left( t,\cdot \right) $ and $\gamma
\left( t+h,\cdot \right) $, respectively, so that
\begin{eqnarray*}
\frac{\partial y}{\partial r}\left( t,r\right) &=&\pm \gamma \left(
t,y\left( t,r\right) \right) , \\
y\left( t,0\right) &=&x,
\end{eqnarray*}
and analogously for $y\left( t+h,\cdot \right) $. Choose the sign in the
parametrization of $y\left( t,\cdot \right) $ so that there exists some $
r\left( t\right) >0$ such that $y\left( t,r\left( t\right) \right) =z\left(
t\right) \in H_{s,z}$. Choosing the same sign in the parametrization of $
y\left( t+h,\cdot \right) $ asserts the existence of some $r\left(
t+h\right) >0$ such that $y\left( t+h,r\left( t+h\right) \right) =z\left(
t+h\right) \in H_{s,z}$. Without lack of generality, we assume the sign
above to be positive. Since $u\left( t,x\right) =u_{0}\left( z\left(
t\right) \right) $, where $u_{0}$ is continuously differentiable, it remains
to show that the function $z$ is continuously differentiable.
We will first show that $y\left( \cdot ,r\right) $ is continuously
differentiable by following an argument that can be found in e.g. \cite
{Monti2010}. Differentiating the Cauchy problem formally with respect to the
time variable and introducing the function $\psi \left( t,r\right) =\dfrac{
\partial y}{\partial t}\left( t,r\right) $, we obtain
\begin{eqnarray*}
\frac{\partial }{\partial r}\psi \left( t,r\right) &=&\frac{\partial \gamma
}{\partial t}\left( t,y\left( t,r\right) \right) +\frac{\partial \gamma }{
\partial y}\left( t,y\left( t,r\right) \right) \psi \left( t,r\right) , \\
\psi \left( t,0\right) &=&0.
\end{eqnarray*}
This Cauchy problem has a unique solution, which we will next show satisfies
\begin{equation}
\psi \left( t,r\right) =\lim_{h\rightarrow 0}\frac{y\left( t+h,r\right)
-y\left( t,r\right) }{h}, \label{eq:limit_true}
\end{equation}
so that $\psi $ is in fact the time derivative of $y$ (not just formally).
Define
\begin{equation}
R\left( t,r,h\right) =\frac{y\left( t+h,r\right) -y\left( t,r\right) }{h}
-\psi \left( t,r\right) . \label{eq:R_def}
\end{equation}
Now
\begin{eqnarray*}
R\left( t,r,h\right) &=&\int_{0}^{r}\left( \frac{\gamma \left( t+h,y\left(
t+h,u\right) \right) -\gamma \left( t,y\left( t,u\right) \right) }{h}\right)
du \\
&&-\int_{0}^{r}\left( \frac{\partial \gamma }{\partial t}\left( t,y\left(
t,u\right) \right) +\frac{\partial \gamma }{\partial y}\left( t,y\left(
t,u\right) \right) \psi \left( t,u\right) \right) du.
\end{eqnarray*}
By the mean value theorem
\begin{eqnarray*}
&&\gamma _{i}\left( t+h,y\left( t+h,u\right) \right) -\gamma _{i}\left(
t,y\left( t,u\right) \right) \\
&=&\frac{\partial \gamma _{i}}{\partial t}\left( \overline{t}_{i},y\left(
t+h,u\right) \right) h+\frac{\partial \gamma _{i}}{\partial y}\left( t,
\overline{y}_{i}\right) \left( y\left( t+h,u\right) -y\left( t,u\right)
\right) ,
\end{eqnarray*}
for some $\overline{t}_{i}$ between $t$ and $t+h$, some $\overline{y}_{i}$
between $y\left( t,u\right) $ and $y\left( t+h,u\right) $ and all $i\in
\left\{ 1,...,n\right\} $. Hence, the $i^{\text{th}}$ component of $R\left(
t,r,h\right) $ is
\begin{eqnarray*}
R_{i}\left( t,r,h\right) &=&\int_{0}^{r}\left( \frac{\partial \gamma _{i}}{
\partial t}\left( \overline{t}_{i},y\left( t+h,u\right) \right) -\frac{
\partial \gamma _{i}}{\partial t}\left( t,y\left( t,u\right) \right) \right)
du \\
&&+\int_{0}^{r}\left( \frac{\partial \gamma _{i}}{\partial y}\left( t,
\overline{y}_{i}\right) \frac{y\left( t+h,u\right) -y\left( t,u\right) }{h}-
\frac{\partial \gamma _{i}}{\partial y}\left( t,y\left( t,u\right) \right)
\psi \left( t,u\right) \right) du,
\end{eqnarray*}
where the second term on the right hand side can be rewritten as
\begin{equation*}
\int_{0}^{r}\left( \frac{\partial \gamma _{i}}{\partial y}\left( t,\overline{
y}_{i}\right) R\left( t,u,h\right) +\left( \frac{\partial \gamma _{i}}{
\partial y}\left( t,\overline{y}_{i}\right) -\frac{\partial \gamma _{i}}{
\partial y}\left( t,y\left( t,u\right) \right) \right) \psi \left(
t,u\right) \right) du.
\end{equation*}
Therefore we have
\begin{eqnarray*}
\left\vert R\left( t,r,h\right) \right\vert &\leq
&\int_{0}^{r}\sum_{i=1}^{n}\left\vert \frac{\partial \gamma _{i}}{\partial t}
\left( \overline{t}_{i},y\left( t+h,u\right) \right) -\frac{\partial \gamma
_{i}}{\partial t}\left( t,y\left( t,u\right) \right) \right\vert du \\
&&+\int_{0}^{r}\left\vert R\left( t,u,h\right) \right\vert
\sum_{i=1}^{n}\left\vert \frac{\partial \gamma _{i}}{\partial y}\left( t,
\overline{y}_{i}\right) \right\vert du \\
&&+\int_{0}^{r}\left\vert \psi \left( t,u\right) \right\vert
\sum_{i=1}^{n}\left\vert \frac{\partial \gamma _{i}}{\partial y}\left( t,
\overline{y}_{i}\right) -\frac{\partial \gamma _{i}}{\partial y}\left(
t,y\left( t,u\right) \right) \right\vert du,
\end{eqnarray*}
and by Gronwall's inequality we obtain
\begin{eqnarray}
\left\vert R\left( t,r,h\right) \right\vert &\leq
&C\int_{0}^{r}\sum_{i=1}^{n}\left\vert \frac{\partial \gamma _{i}}{\partial t
}\left( \overline{t}_{i},y\left( t+h,u\right) \right) -\frac{\partial \gamma
_{i}}{\partial t}\left( t,y\left( t,u\right) \right) \right\vert du \notag
\label{eq:gronvall_deriv} \\
&&+C\int_{0}^{r}\left\vert \psi \left( t,u\right) \right\vert
\sum_{i=1}^{n}\left\vert \frac{\partial \gamma _{i}}{\partial y}\left( t,
\overline{y}_{i}\right) -\frac{\partial \gamma _{i}}{\partial y}\left(
t,y\left( t,u\right) \right) \right\vert du,
\end{eqnarray}
for some positive constant $C$. Since $\left\vert \psi \left( t,u\right)
\right\vert $ exists and is bounded, and since the time and space
derivatives of $\gamma $ are continuous, \eqref{eq:gronvall_deriv} implies
boundedness of $\left\vert R\left( t,r,h\right) \right\vert $. Therefore, by
\eqref{eq:R_def} we have $|y\left( t+h,r\right) -y\left( t,r\right) |\leq Ch$
, for some constant $C$, and we can conclude that $\overline{y}
_{i}\rightarrow y(t,u)$ and $\overline{t}_{i}\rightarrow t$ for all $i\in
\{1,\dots ,n\}$ as $h\rightarrow 0$. It follows that the differences in the
integrands in \eqref{eq:gronvall_deriv} vanish as $h\rightarrow 0$ and hence
$\lim_{h\rightarrow 0}R\left( t,r,h\right) =0$. This proves
\eqref{eq:limit_true} and therefore that $y\left( \cdot ,r\right) $ is
continuously differentiable.
Now, by the mean value theorem,
\begin{eqnarray*}
z_{i}(t+h)-z_{i}(t) &=&y_{i}(t+h,r\left( t+h\right) )-y_{i}(t,r\left(
t\right) ) \\
&=&y_{i}(t+h,r\left( t+h\right) )-y_{i}(t+h,r\left( t\right) ) \\
&&+y_{i}(t+h,r\left( t\right) )-y_{i}(t,r\left( t\right) ) \\
&=&\frac{\partial y_{i}}{\partial r}\left( t+h,\overline{r}_{i}\right)
\left( r\left( t+h\right) -r\left( t\right) \right) +\frac{\partial y_{i}}{
\partial t}\left( \overline{t}_{i},r\left( t\right) \right) h,
\end{eqnarray*}
for some $\overline{r}_{i}$ between $r\left( t\right) $ and $
r\left(t+h\right)$, some $\overline{t}_{i}$ between $t$ and $t+h$ and all $
i\in \left\{ 1,...,n\right\}$.
| 3,943 | 73,518 |
en
|
train
|
0.156.8
|
Now, by the mean value theorem,
\begin{eqnarray*}
z_{i}(t+h)-z_{i}(t) &=&y_{i}(t+h,r\left( t+h\right) )-y_{i}(t,r\left(
t\right) ) \\
&=&y_{i}(t+h,r\left( t+h\right) )-y_{i}(t+h,r\left( t\right) ) \\
&&+y_{i}(t+h,r\left( t\right) )-y_{i}(t,r\left( t\right) ) \\
&=&\frac{\partial y_{i}}{\partial r}\left( t+h,\overline{r}_{i}\right)
\left( r\left( t+h\right) -r\left( t\right) \right) +\frac{\partial y_{i}}{
\partial t}\left( \overline{t}_{i},r\left( t\right) \right) h,
\end{eqnarray*}
for some $\overline{r}_{i}$ between $r\left( t\right) $ and $
r\left(t+h\right)$, some $\overline{t}_{i}$ between $t$ and $t+h$ and all $
i\in \left\{ 1,...,n\right\}$.
Since the function $r(t)$ is defined so that
\begin{align*}
\langle y(t, r(t)) - z, \gamma(s, z)\rangle = 0, \quad t \in (s - \eta, s +
\eta),
\end{align*}
it follows by the implicit function theorem and by the regularity of $y(t,r)$
that $r(t)$ is a continuously differentiable function. Hence, we conclude
that $\overline{r}_i \rightarrow r(t)$ and $\overline{t}_i \rightarrow t$,
all $i \in \{1,\dots,n\}$, as $h \rightarrow 0$ and therefore,
\begin{equation*}
\lim_{h\rightarrow 0}\frac{z(t+h)-z(t)}{h}=\frac{\partial y}{\partial r}
\left( t,r\left( t\right) \right) r^{\prime }\left( t\right) +\frac{\partial
y}{\partial t}\left( t,r\left( t\right) \right) ,
\end{equation*}
where the right hand side is a continuous function. This proves that $z$ is
continuously differentiable and, hence, that $u\in \mathcal{C}^{1,2}\left(
\left[ s-\eta ,s+\eta \right] \times B\left( z,\delta \right) ,
\mathbb{R}
\right) $.
$\Box $
\setcounter{equation}{0} \setcounter{theorem}{0}
| 672 | 73,518 |
en
|
train
|
0.156.9
|
\section{The Skorohod problem\label{SP}}
In this section we prove existence of solutions to the Skorohod problem
under the assumptions in Section \ref{geoassume}. This result could be
achieved using the methods in \cite{NystromOnskog2010a}, but as we here
assume more regularity on the direction of reflection and the temporal
variation of the domain compared to the setting in \cite{NystromOnskog2010a}
(and this is essential for the other sections of this article), we follow a
more direct approach using a penalty method. We first note that, mimicking
the proof of Lemma 4.1 in \cite{DupuisIshii1993}, we can prove the following
result.
\begin{lemma}
\label{dlemma} There is a constant $\mu >0$ such that, for every $t\in \left[
0,T\right] $, there exists a neighbourhood $U_{t}$ of $\partial \Omega _{t}$
such that
\begin{equation}
\left\langle D_{x}d\left( t,x\right) ,\gamma \left( t,x\right) \right\rangle
\leq -\mu ,\quad \text{for a.e. }x\in U_{t}\setminus \overline{\Omega }_{t}
\text{.} \label{unmollineq}
\end{equation}
\end{lemma}
As \eqref{unmollineq} holds only for almost every point in a neighbourhood
of a non-smooth domain, we cannot apply \eqref{unmollineq} directly and will
use the following mollifier approach instead. Based on the construction of
the neighbourhoods $\left\{ U_{t}\right\} _{t\in \left[ 0,T\right] }$ in
Lemma \ref{dlemma} (see the proof of the corresponding lemma in \cite
{DupuisIshii1993} for details), there exists a constant $\overline{\beta }>0$
such that $B\left( x,3\overline{\beta }\right) \subset U_{t}$ for all $x\in
\partial \Omega _{t}$, $t\in \left[ 0,T\right] $. For the value of $p$ given
in \eqref{templip}, let
\begin{equation*}
v\left( t,x\right) =\left( d\left( t,x\right) \right) ^{p}\quad \text{
and\quad }\widetilde{v}\left( t,x\right) =\left( d\left( t,x\right) \right)
^{p-1}.
\end{equation*}
Moreover, let $\varphi _{\beta }\in $ $\mathcal{C}^{\infty }\left(
\mathbb{R}
^{n},
\mathbb{R}
\right) $ be a positive mollifier with support in $B\left( 0,\beta \right) $
, for some $\beta >0$, and define the spatial convolutions
\begin{equation*}
v_{\beta }=v\ast \varphi _{\beta }\quad \text{and\quad }\widetilde{v}_{\beta
}=\widetilde{v}\ast \varphi _{\beta }.
\end{equation*}
\begin{lemma}
\label{vlemma} There is a constant $\kappa >0$ such that, for sufficiently
small $\beta > 0$ and every $t\in \left[ 0,T\right]$, there exists a
neighbourhood $\widetilde{U}_{t}$ of $\partial \Omega _{t}$, $\widetilde{U}
_{t}\supset \left\{ x:d\left( x,\partial \Omega _{t}\right) <2\overline{
\beta }\right\}$, such that
\begin{equation}
\left\langle D_{x}v_{\beta }\left( t,x\right) ,\gamma \left( t,x\right)
\right\rangle \leq -\kappa \widetilde{v}_{\beta }\left( t,x\right) ,\quad
\text{for }x\in \widetilde{U}_{t}\setminus \overline{\Omega }_{t}\text{.}
\label{mollineq}
\end{equation}
\end{lemma}
\noindent \textbf{Proof.} For all $x\in U_{t}\setminus \overline{\Omega }
_{t} $ such that $B\left( x,\overline{\beta }\right) \subset U_{t}$ and for
all $\beta \leq \overline{\beta }$, we have
\begin{eqnarray*}
&&\left\langle D_{x}v_{\beta }\left( t,x\right) ,\gamma \left( t,x\right)
\right\rangle =\int_{
\mathbb{R}
^{n}}\left\langle \varphi _{\beta }\left( x-y\right) D_{y}v\left( t,y\right)
,\gamma \left( t,x\right) \right\rangle dy \\
&=&\int_{
\mathbb{R}
^{n}}\left( \left\langle D_{y}v\left( t,y\right) ,\gamma \left( t,y\right)
\right\rangle +\left\langle D_{y}v\left( t,y\right) ,\gamma \left(
t,x\right) -\gamma \left( t,y\right) \right\rangle \right) \varphi _{\beta
}\left( x-y\right) dy.
\end{eqnarray*}
The inner product in the second term is bounded from above by
\begin{equation*}
p\left( d\left( t,y\right) \right) ^{p-1}\left\vert D_{y}d\left( t,y\right)
\right\vert L\beta ,
\end{equation*}
where $L$ is the Lipschitz coefficient of $\gamma $ in spatial dimensions
over the compact set $\left[ 0,T\right] \times \bigcup_{t\in \left[ 0,T
\right] }\overline{U}_{t}$. By Lemma \ref{dlemma}, we have, for almost every
$y\in U_{t}\setminus \overline{\Omega }_{t}$, $t\in \left[ 0,T\right] $,
\begin{equation*}
\left\langle D_{y}v\left( t,y\right) ,\gamma \left( t,y\right) \right\rangle
=p\left( d\left( t,y\right) \right) ^{p-1}\left\langle D_{y}d\left(
t,y\right) ,\gamma \left( t,y\right) \right\rangle \leq -p\mu \left( d\left(
t,y\right) \right) ^{p-1},
\end{equation*}
and, for sufficiently small $\beta >0$,
\begin{equation*}
p\left( d\left( t,y\right) \right) ^{p-1}L\beta -p\mu \left( d\left(
t,y\right) \right) ^{p-1}\leq -\kappa \left( d\left( t,y\right) \right)
^{p-1},
\end{equation*}
for some constant $\kappa >0$. This proves \eqref{mollineq}.
$\Box $
We next use a penalty method to verify the existence of a solution to the
Skorohod problem for continuously differentiable functions. The following
lemma generalizes Theorem 2.1 in \cite{LionsSznitman1984} and Lemma 4.5 in
\cite{DupuisIshii1993}.
\begin{lemma}
\label{smoothexist}Let $\psi \in \mathcal{C}^{1}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ with $\psi \left( 0\right) \in \overline{\Omega }_{0}$. Then
there exists a solution $\left( \phi ,\lambda \right) \in \mathcal{W}
^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) \times \mathcal{W}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ to the Skorohod problem for $\left( \Omega ,\gamma ,\psi
\right) $.
\end{lemma}
| 2,036 | 73,518 |
en
|
train
|
0.156.10
|
\mathbb{R}
^{n},
\mathbb{R}
\right) $ be a positive mollifier with support in $B\left( 0,\beta \right) $
, for some $\beta >0$, and define the spatial convolutions
\begin{equation*}
v_{\beta }=v\ast \varphi _{\beta }\quad \text{and\quad }\widetilde{v}_{\beta
}=\widetilde{v}\ast \varphi _{\beta }.
\end{equation*}
\begin{lemma}
\label{vlemma} There is a constant $\kappa >0$ such that, for sufficiently
small $\beta > 0$ and every $t\in \left[ 0,T\right]$, there exists a
neighbourhood $\widetilde{U}_{t}$ of $\partial \Omega _{t}$, $\widetilde{U}
_{t}\supset \left\{ x:d\left( x,\partial \Omega _{t}\right) <2\overline{
\beta }\right\}$, such that
\begin{equation}
\left\langle D_{x}v_{\beta }\left( t,x\right) ,\gamma \left( t,x\right)
\right\rangle \leq -\kappa \widetilde{v}_{\beta }\left( t,x\right) ,\quad
\text{for }x\in \widetilde{U}_{t}\setminus \overline{\Omega }_{t}\text{.}
\label{mollineq}
\end{equation}
\end{lemma}
\noindent \textbf{Proof.} For all $x\in U_{t}\setminus \overline{\Omega }
_{t} $ such that $B\left( x,\overline{\beta }\right) \subset U_{t}$ and for
all $\beta \leq \overline{\beta }$, we have
\begin{eqnarray*}
&&\left\langle D_{x}v_{\beta }\left( t,x\right) ,\gamma \left( t,x\right)
\right\rangle =\int_{
\mathbb{R}
^{n}}\left\langle \varphi _{\beta }\left( x-y\right) D_{y}v\left( t,y\right)
,\gamma \left( t,x\right) \right\rangle dy \\
&=&\int_{
\mathbb{R}
^{n}}\left( \left\langle D_{y}v\left( t,y\right) ,\gamma \left( t,y\right)
\right\rangle +\left\langle D_{y}v\left( t,y\right) ,\gamma \left(
t,x\right) -\gamma \left( t,y\right) \right\rangle \right) \varphi _{\beta
}\left( x-y\right) dy.
\end{eqnarray*}
The inner product in the second term is bounded from above by
\begin{equation*}
p\left( d\left( t,y\right) \right) ^{p-1}\left\vert D_{y}d\left( t,y\right)
\right\vert L\beta ,
\end{equation*}
where $L$ is the Lipschitz coefficient of $\gamma $ in spatial dimensions
over the compact set $\left[ 0,T\right] \times \bigcup_{t\in \left[ 0,T
\right] }\overline{U}_{t}$. By Lemma \ref{dlemma}, we have, for almost every
$y\in U_{t}\setminus \overline{\Omega }_{t}$, $t\in \left[ 0,T\right] $,
\begin{equation*}
\left\langle D_{y}v\left( t,y\right) ,\gamma \left( t,y\right) \right\rangle
=p\left( d\left( t,y\right) \right) ^{p-1}\left\langle D_{y}d\left(
t,y\right) ,\gamma \left( t,y\right) \right\rangle \leq -p\mu \left( d\left(
t,y\right) \right) ^{p-1},
\end{equation*}
and, for sufficiently small $\beta >0$,
\begin{equation*}
p\left( d\left( t,y\right) \right) ^{p-1}L\beta -p\mu \left( d\left(
t,y\right) \right) ^{p-1}\leq -\kappa \left( d\left( t,y\right) \right)
^{p-1},
\end{equation*}
for some constant $\kappa >0$. This proves \eqref{mollineq}.
$\Box $
We next use a penalty method to verify the existence of a solution to the
Skorohod problem for continuously differentiable functions. The following
lemma generalizes Theorem 2.1 in \cite{LionsSznitman1984} and Lemma 4.5 in
\cite{DupuisIshii1993}.
\begin{lemma}
\label{smoothexist}Let $\psi \in \mathcal{C}^{1}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ with $\psi \left( 0\right) \in \overline{\Omega }_{0}$. Then
there exists a solution $\left( \phi ,\lambda \right) \in \mathcal{W}
^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) \times \mathcal{W}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ to the Skorohod problem for $\left( \Omega ,\gamma ,\psi
\right) $.
\end{lemma}
\noindent \textbf{Proof.} Choose $\varepsilon >0$ and consider the ordinary
differential equation
\begin{equation}
\phi _{\varepsilon }^{\prime }\left( t\right) =\frac{1}{\varepsilon }d\left(
t,\phi _{\varepsilon }\left( t\right) \right) \gamma \left( t,\phi
_{\varepsilon }\left( t\right) \right) +\psi ^{\prime }\left( t\right)
,\quad \phi _{\varepsilon }\left( 0\right) =\psi \left( 0\right) ,
\label{ode}
\end{equation}
for $\phi _{\varepsilon }\left( t\right) $, which has a unique solution on $
\left[ 0,T\right] $. Let $\kappa >0$ and the family of neighbourhoods $\{
\widetilde{U}_{t}\}_{t\in \left[ 0,T\right] }$ be as in Lemma \ref{vlemma}.
Choose a function $\zeta \in \mathcal{C}^{\infty }\left( \left[ 0,\infty
\right) ,\left[ 0,\infty \right) \right) $ such that
\begin{equation*}
\zeta \left( r\right) =\left\{
\begin{array}{ll}
r, & \text{for }r\leq \overline{\beta }^{p}/2, \\
3\overline{\beta }^{p}/4, & \text{for }r\geq \overline{\beta }^{p},
\end{array}
\right.
\end{equation*}
and $0\leq \zeta ^{\prime }\left( r\right) \leq 1$ for all $r\in \left[
0,\infty \right) $. Note that if $\phi _{\varepsilon }\left( t\right) \notin
\widetilde{U}_{t}\cup \overline{\Omega }_{t}$, then $d\left( t,\phi
_{\varepsilon }\left( t\right) \right) \geq 2\overline{\beta }$ and, as a
consequence, for all $\beta \leq \overline{\beta }$ it holds that $v_{\beta
}\left( t,\phi _{\varepsilon }\left( t\right) \right) \geq \overline{\beta }
^{p}$ and $\zeta ^{\prime }\left( v_{\beta }\left( t,\phi _{\varepsilon
}\left( t\right) \right) \right) =0$. We next define the function $F\left(
t\right) =\zeta \left( v_{\beta }\left( t,\phi _{\varepsilon }\left(
t\right) \right) \right) $, for $t\in \left[ 0,T\right] $, and investigate
its time derivative. Let $D_{t}d$ denote the weak derivative guaranteed by
\eqref{templip} and note that
\begin{eqnarray}
F^{\prime }\left( t\right) &=&\zeta ^{\prime }\left( v_{\beta }\left( t,\phi
_{\varepsilon }\left( t\right) \right) \right) \left( D_{t}v_{\beta }\left(
t,\phi _{\varepsilon }\left( t\right) \right) +\left\langle D_{x}v_{\beta
}\left( t,\phi _{\varepsilon }\left( t\right) \right) ,\phi _{\varepsilon
}^{\prime }\left( t\right) \right\rangle \right) \notag \\
&=&\zeta ^{\prime }\left( v_{\beta }\left( t,\phi _{\varepsilon }\left(
t\right) \right) \right) \Bigg(D_{t}v_{\beta }\left( t,\phi _{\varepsilon
}\left( t\right) \right) \frac{{}}{{}} \notag \\
&&+\left\langle D_{x}v_{\beta }\left( t,\phi _{\varepsilon }\left( t\right)
\right) ,\,\frac{1}{\varepsilon }d\left( t,\phi _{\varepsilon }\left(
t\right) \right) \gamma \left( t,\phi _{\varepsilon }\left( t\right) \right)
+\psi ^{\prime }\left( t\right) \right\rangle \Bigg), \label{vprim}
\end{eqnarray}
as $\phi _{\varepsilon }\left( t\right) $ solves \eqref{ode}. From Lemma \ref
{vlemma}, we have
\begin{eqnarray*}
&&\zeta ^{\prime }\left( v_{\beta }\left( t,\phi _{\varepsilon }\left(
t\right) \right) \right) \left\langle D_{x}v_{\beta }\left( t,\phi
_{\varepsilon }\left( t\right) \right) ,\,\frac{1}{\varepsilon }d\left(
t,\phi _{\varepsilon }\left( t\right) \right) \gamma \left( t,\phi
_{\varepsilon }\left( t\right) \right) \right\rangle \\
&\leq &-\zeta ^{\prime }\left( v_{\beta }\left( t,\phi _{\varepsilon }\left(
t\right) \right) \right) \frac{\kappa }{\varepsilon }d\left( t,\phi
_{\varepsilon }\left( t\right) \right) \widetilde{v}_{\beta }\left( t,\phi
_{\varepsilon }\left( t\right) \right) ,
\end{eqnarray*}
for $\phi _{\varepsilon }\left( t\right) \in \widetilde{U}_{t}\setminus
\overline{\Omega }_{t}$ and for all other $\phi _{\varepsilon }\left(
t\right) $ both sides vanish when $\beta \leq \overline{\beta }$.
Integrating the estimate for $F^{\prime }$, suppressing the $s$-dependence
in $\phi _{\varepsilon }$ and $\psi $ and denoting $\zeta ^{\prime }\left(
v_{\beta }\left( s,\phi _{\varepsilon }\right) \right) $ by $\zeta ^{\prime
}\left( v_{\beta }\right) $ for simplicity, we obtain, for all $t\in \left[
0,T\right] $,
\begin{eqnarray}
&&\zeta \left( v_{\beta }\left( t,\phi _{\varepsilon }\left( t\right)
\right) \right) -\zeta \left( v_{\beta }\left( 0,\phi _{\varepsilon }\left(
0\right) \right) \right) +\frac{\kappa }{\varepsilon }\int_{0}^{t}\zeta
^{\prime }\left( v_{\beta }\right) d\left( s,\phi _{\varepsilon }\right)
\widetilde{v}_{\beta }\left( s,\phi _{\varepsilon }\right) ds
\label{eq:nyref} \\
&\leq &\int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left\vert
D_{s}v_{\beta }\left( s,\phi _{\varepsilon }\right) \right\vert
ds+\int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left\vert
D_{x}v_{\beta }\left( s,\phi _{\varepsilon }\right) \right\vert \left\vert
\psi ^{\prime }\right\vert ds=I_{1}+I_{2}. \notag
\end{eqnarray}
Note that since $\left\vert D_{x}d\right\vert \leq 1$ a.e. we have $
\left\vert D_{x}v_{\beta }\left( s,\phi _{\varepsilon }\right) \right\vert
\leq p\widetilde{v}_{\beta }\left( s,\phi _{\varepsilon }\right) $, and
hence, H\"{o}lder's inequality implies
\begin{eqnarray*}
I_{2} &=&\int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left\vert
D_{x}v_{\beta }\left( s,\phi _{\varepsilon }\right) \right\vert \left\vert
\psi ^{\prime }\right\vert ds\leq p\int_{0}^{t}\zeta ^{\prime }\left(
v_{\beta }\right) \widetilde{v}_{\beta }\left( s,\phi _{\varepsilon }\right)
\left\vert \psi ^{\prime }\right\vert ds \\
&\leq &p\left( \int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right)
\left\vert \psi ^{\prime }\right\vert ^{p}ds\right) ^{1/p}\left(
\int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left( \widetilde{v}
_{\beta }\left( s,\phi _{\varepsilon }\right) \right) ^{p/(p-1)}ds\right)
^{(p-1)/p}.
\end{eqnarray*}
Moreover, since
$|D_{s}v_{\beta }|\leq p\left( v_{\beta }\left( s,\phi _{\varepsilon
}\right) \right) ^{(p-1)/p}\left( |D_{s}d|^{p}\ast \varphi _{\beta }\right)
^{1/p}$,
| 3,511 | 73,518 |
en
|
train
|
0.156.11
|
$|D_{s}v_{\beta }|\leq p\left( v_{\beta }\left( s,\phi _{\varepsilon
}\right) \right) ^{(p-1)/p}\left( |D_{s}d|^{p}\ast \varphi _{\beta }\right)
^{1/p}$,
we also have
\begin{eqnarray*}
I_{1} &=&\int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left\vert
D_{s}v_{\beta }\left( s,\phi _{\varepsilon }\right) \right\vert ds \\
&\leq &p\int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left( v_{\beta
}\left( s,\phi _{\varepsilon }\right) \right) ^{(p-1)/p}\left(
|D_{s}d|^{p}\ast \varphi _{\beta }\right) ^{1/p}ds \\
&\leq &p\left( \int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) v_{\beta
}\left( s,\phi _{\varepsilon }\right) ds\right) ^{(p-1)/p}\left(
\int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left( |D_{s}d|^{p}\ast
\varphi _{\beta }\right) ds\right) ^{1/p}.
\end{eqnarray*}
Inserting the bounds for $I_{1}$ and $I_{2}$ into \eqref{eq:nyref} yields
\begin{eqnarray}
&&\frac{1}{p}\zeta \left( v_{\beta }\left( t,\phi _{\varepsilon }\left(
t\right) \right) \right) +\frac{\kappa }{\varepsilon p}\int_{0}^{t}\zeta
^{\prime }\left( v_{\beta }\right) d\left( s,\phi _{\varepsilon }\right)
\widetilde{v}_{\beta }\left( s,\phi _{\varepsilon }\right) ds
\label{betabound} \\
&\leq &\left( \int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) v_{\beta
}\left( s,\phi _{\varepsilon }\right) ds\right) ^{(p-1)/p}\left(
\int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left( |D_{s}d|^{p}\ast
\varphi _{\beta }\right) ds\right) ^{1/p} \notag \\
&+&\left( \int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left(
\widetilde{v}_{\beta }\left( s,\phi _{\varepsilon }\right) \right)
^{p/(p-1)}ds\right) ^{(p-1)/p}\left( \int_{0}^{t}\zeta ^{\prime }\left(
v_{\beta }\right) \left\vert \psi ^{\prime }\right\vert ^{p}ds\right)
^{1/p}+\rho \left( \beta \right) , \notag
\end{eqnarray}
where $\rho \left( \beta \right) =p^{-1}\zeta \left( v_{\beta }\left( 0,\phi
_{\varepsilon }\left( 0\right) \right) \right) \rightarrow 0$ as $\beta
\rightarrow 0$. By spatial Lipschitz continuity of $d(t,x)$ we have $
v_{\beta }\left( s,\phi _{\varepsilon }\right) \rightarrow v\left( s,\phi
_{\varepsilon }\right) $ and $\widetilde{v}_{\beta }\left( s,\phi
_{\varepsilon }\right) \rightarrow \widetilde{v}\left( s,\phi _{\varepsilon
}\right) $ as $\beta \rightarrow 0$. Moreover, since $d$ satisfies
\eqref{templip}, uniformly in space, we also have
\begin{equation*}
\int_{0}^{t}\left\vert D_{s}d\right\vert ^{p}ds\leq C(T)^{p},
\end{equation*}
for some constant $C(T)$ independent of $x$. Therefore, by the
Fubini-Tonelli theorem we can conclude, since $D_{s}d(t,x)$ is jointly
measurable in $(t,x)$, that
\begin{equation*}
\int_{0}^{t}\left( |D_{s}d|^{p}\ast \varphi _{\beta }\right) ds=\int_{
\mathbb{R}^{n}}\left( \int_{0}^{t}\left\vert D_{s}d\right\vert ^{p}ds\right)
\varphi _{\beta }(x-y)dy\leq C(T)^{p},
\end{equation*}
and so
\begin{equation*}
\left( \int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left(
|D_{s}d|^{p}\ast \varphi _{\beta }\right) ds\right) ^{1/p}+\left(
\int_{0}^{t}\zeta ^{\prime }\left( v_{\beta }\right) \left\vert \psi
^{\prime }\right\vert ^{p}ds\right) ^{1/p}\leq C\left( T\right) <\infty ,
\end{equation*}
since by construction $\left\vert \zeta ^{\prime }\left( v_{\beta }\right)
\right\vert \leq 1$, and $\psi \in \mathcal{C}^{1}\left( \left[ 0,T\right] ,
\mathbb{R}^{n}\right) $. Thus, letting $\beta $ tend to zero in
\eqref{betabound}, we obtain
\begin{eqnarray*}
&&\frac{1}{p}\zeta \left( v\left( t,\phi _{\varepsilon }\left( t\right)
\right) \right) +\frac{\kappa }{\varepsilon p}\int_{0}^{t}\zeta ^{\prime
}\left( v\left( s,\phi _{\varepsilon }\right) \right) v\left( s,\phi
_{\varepsilon }\right) ds \\
&\leq &C\left( T\right) \left( \int_{0}^{t}\zeta ^{\prime }\left( v\left(
s,\phi _{\varepsilon }\right) \right) v\left( s,\phi _{\varepsilon }\right)
ds\right) ^{\left( p-1\right) /p}.
\end{eqnarray*}
Both terms on the left hand side are positive and each of the terms are
therefore bounded from above by the right hand side. Hence
\begin{equation*}
\frac{\kappa }{\varepsilon p}\left( \int_{0}^{t}\zeta ^{\prime }\left(
v\left( s,\phi _{\varepsilon }\right) \right) v\left( s,\phi _{\varepsilon
}\right) ds\right) ^{1/p}\leq C\left( T\right) ,
\end{equation*}
and, as a consequence,
\begin{equation*}
\zeta \left( v\left( t,\phi _{\varepsilon }\left( t\right) \right) \right) +
\frac{\kappa }{\varepsilon }\int_{0}^{t}\zeta ^{\prime }\left( v\left(
s,\phi _{\varepsilon }\right) \right) v\left( s,\phi _{\varepsilon }\right)
ds\leq K\left( T\right) \varepsilon ^{p-1}.
\end{equation*}
We may assume that $\varepsilon >0$ has been chosen small enough such that $
v\left( t,\phi _{\varepsilon }\left( t\right) \right) \leq \overline{\beta }
^{p}/2$, for all $t\in \left[ 0,T\right] $. Then, by the definition of $
\zeta $,
\begin{equation}
\frac{1}{\varepsilon ^{p-1}}\left( d\left( t,\phi _{\varepsilon }\left(
t\right) \right) \right) ^{p}+\frac{\kappa }{\varepsilon ^{p}}
\int_{0}^{t}\left( d\left( s,\phi _{\varepsilon }\left( s\right) \right)
\right) ^{p}ds\leq K\left( T\right) , \label{d2bound}
\end{equation}
for $t\in \left[ 0,T\right] $.
The remainder of the proof follows along the lines of the proof of Lemma 4.5
in \cite{DupuisIshii1993}, but we give the details for completeness.
Relation \eqref{d2bound} asserts that the sequences $\left\{ l_{\varepsilon
}\right\} _{\varepsilon >0}$ and $\left\{ \lambda _{\varepsilon }\right\}
_{\varepsilon \geq 0}$, where
\begin{equation*}
l_{\varepsilon }\left( t\right) =\frac{1}{\varepsilon }d\left( t,\phi
_{\varepsilon }\left( t\right) \right) ,\quad \lambda _{\varepsilon }\left(
t\right) =\frac{1}{\varepsilon }\int_{0}^{t}d\left( s,\phi _{\varepsilon
}\left( s\right) \right) \gamma \left( s,\phi _{\varepsilon }\left( s\right)
\right) ds,
\end{equation*}
are bounded in $L^{p}\left( \left[ 0,T\right] ,
\mathbb{R}
\right) $ and $\mathcal{W}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ respectively. Thus, we may assume that $l_{\varepsilon }$ and $
\lambda _{\varepsilon }$ converge weakly to $l\in L^{p}\left( \left[ 0,T
\right] ,
\mathbb{R}
\right) $ and $\lambda \in \mathcal{W}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) \subset \mathcal{C}\left( [0,T],\mathbb{R}^{n}\right) $,
respectively, as $\varepsilon \rightarrow 0$. Moreover, from \eqref{ode} we
conclude that $\phi _{\varepsilon }$ converges weakly to $\phi \in \mathcal{W
}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ and that $\phi \left( t\right) =\psi \left( t\right) +\lambda
\left( t\right) $, $\phi \left( 0\right) =\psi \left( 0\right) $. This
proves \eqref{SP1} and, moreover, \eqref{SP2} holds due to \eqref{d2bound}.
By construction, $\lambda _{\varepsilon }^{\prime }\left( t\right)
=l_{\varepsilon }\left( t\right) \gamma \left( t,\phi _{\varepsilon }\left(
t\right) \right) $ and this implies that $\lambda ^{\prime }\left( t\right)
=l\left( t\right) \gamma \left( t,\phi \left( t\right) \right) $. Moreover,
if we let $\tau =\left\{ t\in \left[ 0,T\right] :\phi \left( t\right) \in
\Omega _{t}\right\} $ and note that for each fixed $t\in \tau $, we have $
l_{\varepsilon }\left( t\right) =0$ for all sufficiently small $\varepsilon $
and hence $l\left( t\right) =0$ on $\tau $. Therefore
\begin{equation*}
\left\vert \lambda \right\vert \left( t\right) =\int_{0}^{t}\left\vert
\lambda ^{\prime }\left( s\right) \right\vert ds=\int_{0}^{t}l\left(
s\right) \left\vert \gamma \left( s,\phi \left( s\right) \right) \right\vert
ds=\int_{0}^{t}l\left( s\right) ds,\quad \text{for all }t\in \left[ 0,T
\right] ,
\end{equation*}
as $\left\vert \gamma \left( s,\phi \left( s\right) \right) \right\vert =1$
for all $s\in \left[ 0,T\right] \setminus \tau $. This proves \eqref{SP3}.
In addition,
\begin{equation*}
\lambda \left( t\right) =\int_{0}^{t}l\left( s\right) \gamma \left( s,\phi
\left( s\right) \right) ds=\int_{0}^{t}\gamma \left( s,\phi \left( s\right)
\right) d\left\vert \lambda \right\vert \left( s\right) ,\quad \text{for all
}t\in \left[ 0,T\right] ,
\end{equation*}
which proves \eqref{SP5}. It remains to verify \eqref{SP4}, but this follows
readily from
\begin{equation*}
\left\vert \lambda \right\vert \left( t\right) =\int_{0}^{t}l\left( s\right)
\left\vert \gamma \left( s,\phi \left( s\right) \right) \right\vert
ds=\int_{0}^{t}I_{\left\{ \phi \left( s\right) \in \partial \Omega
_{s}\right\} }l\left( s\right) ds=\int_{0}^{t}I_{\left\{ \phi \left(
s\right) \in \partial \Omega _{s}\right\} }d\left\vert \lambda \right\vert
\left( s\right) .
\end{equation*}
We have completed the proof that $\left( \phi ,\lambda \right) \in \mathcal{W
}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) \times \mathcal{W}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ solves the Skorohod problem for $\left( \Omega ,\gamma ,\psi
\right) $.
$\Box $
The next step is to prove relative compactness of solutions to the Skorohod
problem. The proof follows the proof of Lemma 4.7 in \cite{DupuisIshii1993},
but a number of changes must be made carefully to handle the time dependency
of the domain.
\begin{lemma}
\label{compactest}Let $A$ be a compact subset of $\mathcal{C}\left( \left[
0,T\right] ,
\mathbb{R}
^{n}\right) $. Then
\begin{description}
\item[(i)] There exists a constant $L<\infty $ such that
\begin{equation*}
\left\vert \lambda \right\vert \left( T\right) <L,
\end{equation*}
for all solutions $\left( \psi +\lambda ,\lambda \right) $ to the Skorohod
problem for $\left( \Omega ,\gamma ,\psi \right) $ with $\psi \in A$.
\item[(ii)] The set of $\phi $, such that $\left( \phi ,\lambda \right) $
solves the Skorohod problem for $\left( \Omega ,\gamma ,\psi \right) $ with $
\psi \in A$, is relatively compact.
\end{description}
\end{lemma}
| 3,723 | 73,518 |
en
|
train
|
0.156.12
|
\mathbb{R}
\right) $ and $\mathcal{W}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ respectively. Thus, we may assume that $l_{\varepsilon }$ and $
\lambda _{\varepsilon }$ converge weakly to $l\in L^{p}\left( \left[ 0,T
\right] ,
\mathbb{R}
\right) $ and $\lambda \in \mathcal{W}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) \subset \mathcal{C}\left( [0,T],\mathbb{R}^{n}\right) $,
respectively, as $\varepsilon \rightarrow 0$. Moreover, from \eqref{ode} we
conclude that $\phi _{\varepsilon }$ converges weakly to $\phi \in \mathcal{W
}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ and that $\phi \left( t\right) =\psi \left( t\right) +\lambda
\left( t\right) $, $\phi \left( 0\right) =\psi \left( 0\right) $. This
proves \eqref{SP1} and, moreover, \eqref{SP2} holds due to \eqref{d2bound}.
By construction, $\lambda _{\varepsilon }^{\prime }\left( t\right)
=l_{\varepsilon }\left( t\right) \gamma \left( t,\phi _{\varepsilon }\left(
t\right) \right) $ and this implies that $\lambda ^{\prime }\left( t\right)
=l\left( t\right) \gamma \left( t,\phi \left( t\right) \right) $. Moreover,
if we let $\tau =\left\{ t\in \left[ 0,T\right] :\phi \left( t\right) \in
\Omega _{t}\right\} $ and note that for each fixed $t\in \tau $, we have $
l_{\varepsilon }\left( t\right) =0$ for all sufficiently small $\varepsilon $
and hence $l\left( t\right) =0$ on $\tau $. Therefore
\begin{equation*}
\left\vert \lambda \right\vert \left( t\right) =\int_{0}^{t}\left\vert
\lambda ^{\prime }\left( s\right) \right\vert ds=\int_{0}^{t}l\left(
s\right) \left\vert \gamma \left( s,\phi \left( s\right) \right) \right\vert
ds=\int_{0}^{t}l\left( s\right) ds,\quad \text{for all }t\in \left[ 0,T
\right] ,
\end{equation*}
as $\left\vert \gamma \left( s,\phi \left( s\right) \right) \right\vert =1$
for all $s\in \left[ 0,T\right] \setminus \tau $. This proves \eqref{SP3}.
In addition,
\begin{equation*}
\lambda \left( t\right) =\int_{0}^{t}l\left( s\right) \gamma \left( s,\phi
\left( s\right) \right) ds=\int_{0}^{t}\gamma \left( s,\phi \left( s\right)
\right) d\left\vert \lambda \right\vert \left( s\right) ,\quad \text{for all
}t\in \left[ 0,T\right] ,
\end{equation*}
which proves \eqref{SP5}. It remains to verify \eqref{SP4}, but this follows
readily from
\begin{equation*}
\left\vert \lambda \right\vert \left( t\right) =\int_{0}^{t}l\left( s\right)
\left\vert \gamma \left( s,\phi \left( s\right) \right) \right\vert
ds=\int_{0}^{t}I_{\left\{ \phi \left( s\right) \in \partial \Omega
_{s}\right\} }l\left( s\right) ds=\int_{0}^{t}I_{\left\{ \phi \left(
s\right) \in \partial \Omega _{s}\right\} }d\left\vert \lambda \right\vert
\left( s\right) .
\end{equation*}
We have completed the proof that $\left( \phi ,\lambda \right) \in \mathcal{W
}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) \times \mathcal{W}^{1,p}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ solves the Skorohod problem for $\left( \Omega ,\gamma ,\psi
\right) $.
$\Box $
The next step is to prove relative compactness of solutions to the Skorohod
problem. The proof follows the proof of Lemma 4.7 in \cite{DupuisIshii1993},
but a number of changes must be made carefully to handle the time dependency
of the domain.
\begin{lemma}
\label{compactest}Let $A$ be a compact subset of $\mathcal{C}\left( \left[
0,T\right] ,
\mathbb{R}
^{n}\right) $. Then
\begin{description}
\item[(i)] There exists a constant $L<\infty $ such that
\begin{equation*}
\left\vert \lambda \right\vert \left( T\right) <L,
\end{equation*}
for all solutions $\left( \psi +\lambda ,\lambda \right) $ to the Skorohod
problem for $\left( \Omega ,\gamma ,\psi \right) $ with $\psi \in A$.
\item[(ii)] The set of $\phi $, such that $\left( \phi ,\lambda \right) $
solves the Skorohod problem for $\left( \Omega ,\gamma ,\psi \right) $ with $
\psi \in A$, is relatively compact.
\end{description}
\end{lemma}
\noindent \textbf{Proof.} By the compactness of $\overline{\Omega }$ and the
continuity of $\gamma $, there exists a constant $c>0$ such that for every $
t\in \left[ 0,T\right] $ and $x\in \overline{\Omega }_{t}\cap V$, where $V$
is the set defined in connection with \eqref{smooth_gamma}, there exists a
vector $v\left( t,x\right) $ and a set $\left[ t,t+c\right] \times B\left(
x,c\right) $ such that $\left\langle \gamma \left( s,y\right) ,v\left(
t,x\right) \right\rangle >c$ for all $\left( s,y\right) \in \left[ t,t+c
\right] \times B\left( x,c\right) $. Without lack of generality, we may
assume that $c<\delta $, for the $\delta $ introduced in Remark \ref
{spaceremark}. Let $\psi \in A$ be given and let $\left( \phi ,\lambda
\right) $ be any solution to the Skorohod problem for $\left( \Omega ,\gamma
,\psi \right) $. Define $T_{1}$ to be smallest of $T$, $c$ and $\inf \left\{
t\in \left[ 0,T\right] :\phi \left( t\right) \notin B\left( \phi \left(
0\right) ,c\right) \right\} $. Next define $T_{2}$ to be the smallest of $T$
, $T_{1}+c$ and $\inf \left\{ t\in \left[ T_{1},T\right] :\phi \left(
t\right) \notin B\left( \phi \left( T_{1}\right) ,c\right) \right\} $.
Continuing in this fashion, we obtain a sequence $\left\{ T_{m}\right\}
_{m=1,2,...}$ of time instants. By construction, for all $s\in \left[
T_{m-1},T_{m}\right) $ we have $s\in \left[ T_{m-1},T_{m-1}+c\right] $ and $
\phi \left( s\right) \in B\left( \phi \left( T_{m-1}\right) ,c\right) $. For
all $m$ such that $\phi \left( T_{m-1}\right) \in $ $\overline{\Omega }
_{T_{m-1}}\cap V$, we have $\left\langle \gamma \left( s,\phi \left(
s\right) \right) ,v\left( T_{m-1},\phi \left( T_{m-1}\right) \right)
\right\rangle >c$ and hence
\begin{eqnarray*}
&&\left\langle \phi \left( T_{m}\right) -\phi \left( T_{m-1}\right) ,v\left(
T_{m-1},\phi \left( T_{m-1}\right) \right) \right\rangle \\
&&-\left\langle \psi \left( T_{m}\right) -\psi \left( T_{m-1}\right)
,v\left( T_{m-1},\phi \left( T_{m-1}\right) \right) \right\rangle \\
&=&\int_{T_{m-1}}^{T_{m}}\left\langle \gamma \left( s,\phi \left( s\right)
\right) ,v\left( T_{m-1},\phi \left( T_{m-1}\right) \right) \right\rangle
d\left\vert \lambda \right\vert \left( s\right) \geq c\left( \left\vert
\lambda \right\vert \left( T_{m}\right) -\left\vert \lambda \right\vert
\left( T_{m-1}\right) \right) .
\end{eqnarray*}
Since $A$ is compact, the set $\left\{ \psi \left( t\right) :t\in \left[ 0,T
\right] ,\psi \in A\right\} $ is bounded. Moreover, since $\overline{\Omega }
$ is compact and $\phi \left( t\right) \in \overline{\Omega }_{t}$ for all $
t\in \left[ 0,T\right] $, there exists a constant $M<\infty $ such that
\begin{equation*}
\left\vert \lambda \right\vert \left( T_{m}\right) -\left\vert \lambda
\right\vert \left( T_{m-1}\right) <M.
\end{equation*}
Note also that, for all $m$ such that $\phi \left( T_{m-1}\right) \notin $ $
\overline{\Omega }_{T_{m-1}}\cap V$, we have, for $c$ sufficiently small,
that $\left\vert \lambda \right\vert \left( T_{m}\right) -\left\vert \lambda
\right\vert \left( T_{m-1}\right) =0$.
Define the modulus of continuity of a function $f\in \mathcal{C}\left( \left[
0,T\right] ,
\mathbb{R}
| 2,764 | 73,518 |
en
|
train
|
0.156.13
|
Define the modulus of continuity of a function $f\in \mathcal{C}\left( \left[
0,T\right] ,
\mathbb{R}
^{n}\right) $ as $\left\Vert f\right\Vert _{s,t}=\sup_{s\leq t_{1}\leq
t_{2}\leq t}\left\vert f\left( t_{2}\right) -f\left( t_{1}\right)
\right\vert $ for $0\leq s\leq t\leq T$. We next prove that there exists a
positive constant $R$ such that, for any $\psi \in A$ and $T_{m-1}\leq \tau
\leq T_{m}$, it holds that
\begin{equation}
\left\Vert \lambda \right\Vert _{T_{m-1},\tau }\leq R\left( \left\Vert \psi
\right\Vert _{T_{m-1},\tau }^{1/2}+\left\Vert \psi \right\Vert
_{T_{m-1},\tau }^{3/2}+\left( \tau -T_{m-1}\right) ^{\widehat{\alpha }
/2}\right) , \label{apriori}
\end{equation}
where $\widehat{\alpha }$ is the H\"{o}lder exponent in Remark \ref
{timeholder}. As we are only interested in the behaviour during the time
interval $\left[ T_{m-1},T_{m}\right] $, we simplify the notation by
setting, without loss of generality, $T_{m-1}=0$, $\phi \left(
T_{m-1}\right) =x$, $\psi \left( T_{m-1}\right) =x$, $\lambda \left(
T_{m-1}\right) =0$ and $\left\vert \lambda \right\vert \left( T_{m-1}\right)
=0$. Let $h$ be the function in Lemma \ref{testlemma3} and let $\chi ,C$ be
the corresponding positive constants. Define $B_{\varepsilon }\left(
t\right) =\varepsilon h\left( t,x,-\lambda \left( t\right) /\varepsilon
\right) $ and $E\left( t\right) =e^{-\left( 2\left\vert \lambda \right\vert
\left( t\right) +t\right) C/\chi }$. Since $h\left( t,x,0\right) =1$, we get
\begin{eqnarray*}
B_{\varepsilon }\left( \tau \right) E\left( \tau \right) &=&B_{\varepsilon
}\left( 0\right) E\left( 0\right) +\int_{0}^{\tau }\left( E\left( u\right)
dB_{\varepsilon }\left( u\right) +B_{\varepsilon }\left( u\right) dE\left(
u\right) \right) \\
&=&\varepsilon +\int_{0}^{\tau }E\left( u\right) dB_{\varepsilon }\left(
u\right) -\frac{2C}{\chi }\int_{0}^{\tau }B_{\varepsilon }\left( u\right)
E\left( u\right) d\left\vert \lambda \right\vert \left( u\right) \\
&&-\frac{C}{\chi }\int_{0}^{\tau }B_{\varepsilon }\left( u\right) E\left(
u\right) du,
\end{eqnarray*}
where the first integral can be rewritten as
\begin{eqnarray*}
\int_{0}^{\tau }E\left( u\right) dB_{\varepsilon }\left( u\right)
&=&\int_{0}^{\tau }E\left( u\right) \varepsilon D_{t}h\left( u,x,-\lambda
\left( u\right) /\varepsilon \right) du \\
&&-\int_{0}^{\tau }E\left( u\right) \left\langle D_{p}h\left( u,x,-\lambda
\left( u\right) /\varepsilon \right) ,d\lambda \left( u\right) \right\rangle
.
\end{eqnarray*}
By \eqref{testlemma31} and \eqref{testlemma35}, the integral involving $
D_{t}h$ has the upper bound
\begin{eqnarray*}
&&\int_{0}^{\tau }E\left( u\right) \varepsilon D_{t}h\left( u,x,-\lambda
\left( u\right) /\varepsilon \right) du \\
&\leq &C\varepsilon \int_{0}^{\tau }E\left( u\right) \left\vert \lambda
\left( u\right) /\varepsilon \right\vert ^{2}du\leq \frac{C}{\chi }
\int_{0}^{\tau }E\left( u\right) B_{\varepsilon }\left( u\right) du.
\end{eqnarray*}
Next, we would like to find an upper bound for the integral involving $
D_{p}h $ using \eqref{testlemma33} in some appropriate way, but we have to
be somewhat careful due to the temporal variation of the domain. Assume that
$\phi \left( u\right) \in \partial \Omega _{u}$. If $x\notin \overline{
\Omega }_{u}$, there exists at least one point $y_{u}\in \overline{\Omega }
_{u}\cap B\left( x,c\right) $ such that $\left\vert x-y_{u}\right\vert
=d\left( u,x\right) $. We have chosen $c<\delta $, so $\left\langle
y_{u}-\phi \left( u\right) ,\gamma \left( u,\phi \left( u\right) \right)
\right\rangle \geq -\theta \left\vert y_{u}-\phi \left( u\right) \right\vert
$ holds by Remark \ref{spaceremark} and, due to \eqref{testlemma33}, we can
conclude
\begin{equation*}
I_{1}:=-\int_{0}^{\tau }E\left( u\right) \left\langle D_{p}h\left( u,\phi
\left( u\right) ,\left( y_{u}-\phi \left( u\right) \right) /\varepsilon
\right) ,\gamma \left( u,\phi \left( u\right) \right) \right\rangle
d\left\vert \lambda \right\vert \left( u\right) \leq 0,
\end{equation*}
since $d\left\vert \lambda \right\vert \left( u\right) =0$ if $\phi \left(
u\right) \notin \partial \Omega _{u}$. If $x\in \overline{\Omega }_{u}$, the
above estimate holds with $y_{u}$ replaced by $x$. The integral involving $
D_{p}h$ can be decomposed into
\begin{equation*}
-\int_{0}^{\tau }E\left( u\right) \left\langle D_{p}h\left( u,x,-\lambda
\left( u\right) /\varepsilon \right) ,d\lambda \left( u\right) \right\rangle
=I_{1}+I_{2}+I_{3},
\end{equation*}
for $I_{1}$ as above and
\begin{equation*}
I_{2}=\int_{0}^{\tau }E\left( u\right) \left\langle D_{p}h\left( u,\phi
\left( u\right) ,-\lambda \left( u\right) /\varepsilon \right) -D_{p}h\left(
u,x,-\lambda \left( u\right) /\varepsilon \right) ,d \lambda
\left( u\right) \right\rangle ,
\end{equation*}
\begin{equation*}
I_{3}=\int_{0}^{\tau }E\left( u\right) \left\langle D_{p}h\left( u,\phi
\left( u\right) ,\left( y_{u}-\phi \left( u\right) \right) /\varepsilon
\right) -D_{p}h\left( u,\phi \left( u\right) ,-\lambda \left( u\right)
/\varepsilon \right) ,d \lambda \left( u\right)
\right\rangle .
\end{equation*}
By \eqref{testlemma31} and \eqref{testlemma37}, these integrals can be
bounded from above by
\begin{eqnarray*}
I_{2} &\leq &\frac{C}{\varepsilon }\int_{0}^{\tau }E\left( u\right)
\left\vert \lambda \left( u\right) \right\vert \left\vert x-\phi \left(
u\right) \right\vert d\left\vert \lambda \right\vert \left( u\right) \\
&\leq &\frac{C}{\varepsilon }\int_{0}^{\tau }E\left( u\right) \left(
\left\vert \lambda \left( u\right) \right\vert ^{2}+\left\vert x-\psi \left(
u\right) \right\vert \left\vert \lambda \left( u\right) \right\vert \right)
d\left\vert \lambda \right\vert \left( u\right) \\
&\leq &\frac{2C}{\varepsilon }\int_{0}^{\tau }E\left( u\right) \left(
\left\vert \lambda \left( u\right) \right\vert ^{2}+\left\vert x-\psi \left(
u\right) \right\vert ^{2}\right) d\left\vert \lambda \right\vert \left(
u\right) \\
&\leq &\frac{2C}{\chi }\int_{0}^{\tau }E\left( u\right) B_{\varepsilon
}\left( u\right) d\left\vert \lambda \right\vert \left( u\right) +\frac{2C}{
\varepsilon }\int_{0}^{\tau }E\left( u\right) \left\vert x-\psi \left(
u\right) \right\vert ^{2}d\left\vert \lambda \right\vert \left( u\right) ,
\end{eqnarray*}
and
\begin{eqnarray*}
I_{3} &\leq &\frac{C}{\varepsilon }\int_{0}^{\tau }E\left( u\right)
\left\vert y_{u}-\phi \left( u\right) -\left( -\lambda \left( u\right)
\right) \right\vert d\left\vert \lambda \right\vert \left( u\right) \\
&=&\frac{C}{\varepsilon }\int_{0}^{\tau }E\left( u\right) \left\vert
y_{u}-\psi \left( u\right) \right\vert d\left\vert \lambda \right\vert
\left( u\right) \\
&\leq &\frac{C}{\varepsilon }\int_{0}^{\tau }E\left( u\right) \left(
\left\vert x-\psi \left( u\right) \right\vert +\left\vert y_{u}-x\right\vert
\right) d\left\vert \lambda \right\vert \left( u\right) \\
&\leq &\frac{C}{\varepsilon }\int_{0}^{\tau }E\left( u\right) \left(
\left\vert x-\psi \left( u\right) \right\vert +d\left( u,x\right) \right)
d\left\vert \lambda \right\vert \left( u\right) .
\end{eqnarray*}
Collecting all the terms, we obtain
\begin{equation*}
B_{\varepsilon }\left( \tau \right) E\left( \tau \right) \leq \varepsilon +
\frac{C}{\varepsilon }\int_{0}^{\tau }E\left( u\right) \left( \left\vert
x-\psi \left( u\right) \right\vert +2\left\vert x-\psi \left( u\right)
\right\vert ^{2}+d\left( u,x\right) \right) d\left\vert \lambda \right\vert
\left( u\right) ,
\end{equation*}
which implies
\begin{equation*}
B_{\varepsilon }\left( \tau \right) \leq \left( \frac{2C}{\varepsilon }
\int_{0}^{\tau }E\left( u\right) \left( \left\Vert \psi \right\Vert _{0,\tau
}+\left\Vert \psi \right\Vert _{0,\tau }^{2}+K\tau ^{\widehat{\alpha }
}\right) d\left\vert \lambda \right\vert \left( u\right) +\varepsilon
\right) e^{\left( 2\left\vert \lambda \right\vert \left( \tau \right) +\tau
\right) C/\chi },
\end{equation*}
where $K$ and $\widehat{\alpha }$ are the constants from Remark \ref
{timeholder}. Now
\begin{equation*}
\int_{0}^{\tau }E\left( u\right) d\left\vert \lambda \right\vert \left(
u\right) \leq \int_{0}^{\tau }e^{-2C\left\vert \lambda \right\vert \left(
u\right) /\chi }d\left\vert \lambda \right\vert \left( u\right) \leq \frac{
\chi }{2C},
\end{equation*}
so
\begin{equation*}
B_{\varepsilon }\left( \tau \right) \leq \left( \frac{\chi }{\varepsilon }
\left( \left\Vert \psi \right\Vert _{0,\tau }+\left\Vert \psi \right\Vert
_{0,\tau }^{2}+K\tau ^{\widehat{\alpha }}\right) +\varepsilon \right)
e^{\left( 2\left\vert \lambda \right\vert \left( \tau \right) +\tau \right)
C/\chi }.
\end{equation*}
Another application of \eqref{testlemma31} gives
\begin{eqnarray*}
\left\vert \lambda \left( \tau \right) \right\vert &\leq &\frac{1}{2}\left(
\varepsilon +\frac{1}{\varepsilon }\left\vert \lambda \left( \tau \right)
\right\vert ^{2}\right) \leq \frac{\varepsilon }{2}+\frac{B_{\varepsilon
}\left( \tau \right) }{2\chi } \\
&\leq &\frac{\varepsilon }{2}+\left( \frac{1}{2\varepsilon }\left(
\left\Vert \psi \right\Vert _{0,\tau }+\left\Vert \psi \right\Vert _{0,\tau
}^{2}+K\tau ^{\widehat{\alpha }}\right) +\frac{\varepsilon }{2\chi }\right)
e^{\left( 2M+T\right) C/\chi }.
\end{eqnarray*}
Set $\varepsilon =\max \left\{ \left\Vert \psi \right\Vert _{0,\tau
}^{1/2},\tau ^{\widehat{\alpha }/2}\right\} $ so that $\varepsilon \leq
\left\Vert \psi \right\Vert _{0,\tau }^{1/2}+\tau ^{\widehat{\alpha }/2}$, $
1/\varepsilon \leq \left\Vert \psi \right\Vert _{0,\tau }^{-1/2}$ and $
1/\varepsilon \leq \tau ^{-\widehat{\alpha }/2}$. Then \eqref{apriori}
follows immediately from the above inequality. By \eqref{apriori} and the
compactness of $A$, there exists a $\hat{\tau}>0$ such that
\begin{equation*}
\max \left\{ \left\Vert \psi \right\Vert _{T_{m-1},T_{m-1}+\hat{\tau}
},\left\Vert \lambda \right\Vert _{T_{m-1},T_{m-1}+\hat{\tau}}\right\} \leq
c/3,
\end{equation*}
which implies $\left\Vert \phi \right\Vert _{T_{m-1},T_{m-1}+\hat{\tau}}\leq
2c/3$. The definition of $\left\{ T_{m}\right\} $ then implies that $
T_{m}-T_{m-1}\geq \min \left\{ \hat{\tau},c\right\} $. This proves (i) with $
L=M\left( T/\min \left\{ \hat{\tau},c\right\} +1\right) $. Part (ii) follows
from \eqref{apriori} and the bound $T_{m}-T_{m-1}\geq \min \left\{ \hat{\tau}
,c\right\} $.
$\Box $
Equipped with the results above, we are now ready to state and prove the
existence of solutions to the Skorohod problem. The proof is very similar to
the proof of Theorem 4.8 in \cite{DupuisIshii1993}, so we only sketch the
first half of the proof.
\begin{lemma}
\label{contexist}Let $\psi \in \mathcal{C}\left( \left[ 0,T\right] ,
\mathbb{R}
| 4,074 | 73,518 |
en
|
train
|
0.156.14
|
Equipped with the results above, we are now ready to state and prove the
existence of solutions to the Skorohod problem. The proof is very similar to
the proof of Theorem 4.8 in \cite{DupuisIshii1993}, so we only sketch the
first half of the proof.
\begin{lemma}
\label{contexist}Let $\psi \in \mathcal{C}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ with $\psi \left( 0\right) \in \overline{\Omega }_{0}$. Then
there exists a solution $\left( \phi ,\lambda \right) $ to the Skorohod
problem for $\left( \Omega ,\gamma ,\psi \right) $.
\end{lemma}
\noindent \textbf{Proof.} Let $\psi _{n}\in \mathcal{C}^{1}\left( \left[ 0,T
\right] ,
\mathbb{R}
^{n}\right) $ form a sequence of functions converging uniformly to $\psi $.
According to Lemma \ref{smoothexist}, there exists a solution $\left( \phi
_{n},\lambda _{n}\right) $ to the Skorohod problem for $\left( \Omega
,\gamma ,\psi _{n}\right) $. By Lemma \ref{compactest}, we may assume that
the sequence $\left\{ \lambda _{n}\right\} _{n=1}^{\infty }$ is equibounded
and equicontinuous, that is
\begin{eqnarray*}
\sup_{n}\left\vert \lambda _{n}\right\vert \left( T\right) &\leq &L<\infty ,
\\
\lim_{\left\vert s-t\right\vert \rightarrow 0}\sup_{n}\left\vert \lambda
_{n}\left( s\right) -\lambda _{n}\left( t\right) \right\vert &=&0.
\end{eqnarray*}
The Arzela-Ascoli theorem asserts the existence of a function $\lambda \in
\mathcal{C}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ such that $\left\{ \lambda _{n}\right\} $ converges uniformly
to $\lambda $. Clearly $\left\vert \lambda \right\vert \left( T\right) \leq
L $. Defining the function $\phi $ by $\phi =\psi +\lambda $, we conclude
that \eqref{SP1}-\eqref{SP3} of Definition \ref{skorohodprob} hold. To show
properties \eqref{SP4} and \eqref{SP5} in the same definition, we define the
measure $\mu _{n}$ on $\overline{\Omega }\times S\left( 0,1\right) $ as
\begin{equation*}
\mu _{n}\left( A\right) =\int_{\left[ 0,T\right] }I_{\left\{ \left( s,\phi
_{n}\left( s\right) ,\gamma \left( s,\phi _{n}\left( s\right) \right)
\right) \in A\right\} }d\left\vert \lambda _{n}\right\vert \left( s\right) ,
\end{equation*}
for every Borel set $A\subset \overline{\Omega }\times S\left( 0,1\right) $.
Introducing the notation $\overline{\Omega }_{\left[ 0,t\right] }:=\overline{
\Omega }\cap \left( \left[ 0,t\right] \times
\mathbb{R}
^{n}\right) $, we have, by definition and \eqref{SP5},
\begin{equation*}
\left\vert \lambda _{n}\right\vert \left( t\right) =\mu _{n}\left( \overline{
\Omega }_{\left[ 0,t\right] }\times S\left( 0,1\right) \right) ,
\end{equation*}
and
\begin{equation*}
\lambda _{n}\left( t\right) =\int_{\overline{\Omega }_{\left[ 0,t\right]
}\times S\left( 0,1\right) }\gamma d\mu _{n}\left( s,x,\gamma \right) ,
\end{equation*}
for all $t\in \left[ 0,T\right] $. Since $\left\vert \lambda _{n}\right\vert
\left( T\right) \leq L<\infty $ for all $n$, the Banach-Alaoglu theorem
asserts that a subsequence of $\mu _{n}$ converges to some measure $\mu $
satisfying $\mu \left( \overline{\Omega }\times S\left( 0,1\right) \right)
<\infty $. By weak convergence and the continuity of $\lambda $,
\begin{equation*}
\lambda \left( t\right) =\int_{\overline{\Omega }_{\left[ 0,t\right] }\times
S\left( 0,1\right) }\gamma d\mu \left( s,x,\gamma \right) .
\end{equation*}
Using the fact that $\left( \phi _{n},\lambda _{n}\right) $ solves the
Skorohod problem for $\left( \Omega ,\gamma ,\psi _{n}\right) $, we can draw
several conclusions regarding the properties of the measure $\mu _{n}$ and
then use weak convergence of $\mu _{n}$ to $\mu $ to deduce that $\lambda $
satisfies \eqref{SP4} and \eqref{SP5}. This procedure is carried out in the
proofs of Theorem 2.8 in \cite{Costantini1992}, Theorem 4.8 in \cite
{DupuisIshii1993} and Theorem 5.1 in \cite{NystromOnskog2010a}, so we omit
further details.
$\Box $
\setcounter{equation}{0} \setcounter{theorem}{0}
| 1,425 | 73,518 |
en
|
train
|
0.156.15
|
\section{SDEs with oblique reflection\label{RSDE}}
Using the existence of solutions $\left( \phi ,\lambda \right) $ to the
Skorohod problem for $\left( \Omega ,\gamma ,\psi \right) $, with $\psi \in
\mathcal{C}\left( \left[ 0,T\right] ,
\mathbb{R}
^{n}\right) $ and $\psi \left( 0\right) \in \overline{\Omega }_{0}$, we can
now prove existence and uniqueness of solutions to SDEs with oblique
reflection at the boundary of a bounded, time-dependent domain. To this end,
assume that the triple $\left( X,Y,k\right) $ satisfies
\begin{equation*}
Y\left( t\right) =x+\int_{0}^{t}b\left( s,X\left( s\right) \right)
ds+\int_{0}^{t}\sigma \left( s,X\left( s\right) \right) dM\left( s\right)
+k\left( t\right) ,
\end{equation*}
\begin{equation*}
X\left( t\right) \in \overline{\Omega }_{t},\quad Y\left( t\right) \in
\overline{\Omega }_{t},
\end{equation*}
\begin{equation*}
\left\vert k\right\vert \left( t\right) =\int_{\left( 0,t\right] }I_{\left\{
Y\left( s\right) \in \partial \Omega _{s}\right\} }d\left\vert k\right\vert
\left( s\right) <\infty ,\quad k\left( t\right) =\int_{\left( 0,t\right]
}\gamma \left( s\right) d|k|\left( s\right) ,
\end{equation*}
where $x\in \overline{\Omega }_{0}$ is fixed, $\gamma \left( s\right)
=\gamma \left( s,Y\left( s\right) \right) $ $d\left\vert k\right\vert $
-a.s.~and $M$ is a continuous $\mathcal{F}_{t}$-martingale satisfying
\begin{equation}
d\left\langle M_{i},M_{j}\right\rangle \left( t\right) \leq Cdt,
\label{mart}
\end{equation}
for some $C\in \left( 0,\infty \right) $. Let $\left( X^{\prime },Y^{\prime
},k^{\prime }\right) $ be a similar triple,
but with $x$ replaced by $x^{\prime}\in \overline{\Omega }_{0}$, and $
\gamma^{\prime} \left( s\right) =\gamma \left( s,Y^{\prime}\left( s\right)
\right)$ $d\left\vert k^{\prime}\right\vert $-a.s.
We shall prove uniqueness of solutions by a Picard iteration scheme and a
crucial ingredient is then the estimate provided by the following theorem.
Note that Lemma \ref{rsdetheorem} holds for a general continuous $\mathcal{F}
_{t}$-martingale satisfying \eqref{mart}, whereas in Theorem \ref{main} we
restrict our interest to $M$ being a standard Wiener process.
\begin{lemma}
\label{rsdetheorem}There exists a positive constant $C<\infty $ such that
\begin{equation*}
E\left[ \sup_{0\leq s\leq t}\left\vert Y\left( s\right) -Y^{\prime }\left(
s\right) \right\vert ^{
{\acute{}}
4}\right] \leq C\left( \left\vert x-x^{\prime }\right\vert ^{4}+\int_{0}^{t}E
\left[ \sup_{0\leq u\leq s}\left\vert X\left( u\right) -X^{\prime }\left(
u\right) \right\vert ^{4}\right] ds\right) .
\end{equation*}
\end{lemma}
\noindent \textbf{Proof.} Fix $\varepsilon >0$, let $\lambda >0$ be a
constant to be specified later, and let $w_{\varepsilon }\in \mathcal{C}
^{1,2}\left( \left[ 0,T\right] \times
\mathbb{R}
^{n}\times
\mathbb{R}
^{n},
\mathbb{R}
\right) $ and $\alpha \in \mathcal{C}^{1,2}\left( \overline{\Omega },
\mathbb{R}
\right) $ and be the functions defined in Lemma \ref{testlemma4}-\ref
{testlemma5}. Define the stopping time
\begin{equation*}
\tau =\inf \left\{ s\in \left[ 0,T\right] :\left\vert Y\left( s\right)
-Y^{\prime }\left( s\right) \right\vert \geq \delta \right\} ,
\end{equation*}
where $\delta >0$ is the constant from Remark \ref{spaceremark}. Let $B$
denote the diameter of the smallest ball containing $\bigcup\nolimits_{t\in
\left[ 0,T\right] }\overline{\Omega }_{t}$. Then, assuming without loss of
generality that $B/\delta \geq 1$, we have
\begin{equation*}
E\left[ \sup_{0\leq s\leq t}\left\vert Y\left( s\right) -Y^{\prime }\left(
s\right) \right\vert ^{4}\right] \leq \left( \frac{B}{\delta }\right) ^{4}E
\left[ \sup_{0\leq s\leq t\wedge \tau }\left\vert Y\left( s\right)
-Y^{\prime }\left( s\right) \right\vert ^{4}\right] ,
\end{equation*}
so it is sufficient to prove the theorem for $t\wedge \tau $. To simplify
the notation, however, we write $t$ in place of $t\wedge \tau $ and assume
that $\left\vert Y\left( s\right) -Y^{\prime }\left( s\right) \right\vert
<\delta $ in the proof below.
| 1,505 | 73,518 |
en
|
train
|
0.156.16
|
\mathbb{R}
^{n},
\mathbb{R}
\right) $ and $\alpha \in \mathcal{C}^{1,2}\left( \overline{\Omega },
\mathbb{R}
\right) $ and be the functions defined in Lemma \ref{testlemma4}-\ref
{testlemma5}. Define the stopping time
\begin{equation*}
\tau =\inf \left\{ s\in \left[ 0,T\right] :\left\vert Y\left( s\right)
-Y^{\prime }\left( s\right) \right\vert \geq \delta \right\} ,
\end{equation*}
where $\delta >0$ is the constant from Remark \ref{spaceremark}. Let $B$
denote the diameter of the smallest ball containing $\bigcup\nolimits_{t\in
\left[ 0,T\right] }\overline{\Omega }_{t}$. Then, assuming without loss of
generality that $B/\delta \geq 1$, we have
\begin{equation*}
E\left[ \sup_{0\leq s\leq t}\left\vert Y\left( s\right) -Y^{\prime }\left(
s\right) \right\vert ^{4}\right] \leq \left( \frac{B}{\delta }\right) ^{4}E
\left[ \sup_{0\leq s\leq t\wedge \tau }\left\vert Y\left( s\right)
-Y^{\prime }\left( s\right) \right\vert ^{4}\right] ,
\end{equation*}
so it is sufficient to prove the theorem for $t\wedge \tau $. To simplify
the notation, however, we write $t$ in place of $t\wedge \tau $ and assume
that $\left\vert Y\left( s\right) -Y^{\prime }\left( s\right) \right\vert
<\delta $ in the proof below.
Define, for all $\left( t,x,y\right) $ such that $\left( t,x\right) ,\left(
t,y\right) \in \overline{\Omega }$, the function $v$ as
\begin{equation*}
v\left( t,x,y\right) =e^{-\lambda \left( \alpha \left( t,x\right) +\alpha
\left( t,y\right) \right) }w_{\varepsilon }\left( t,x,y\right) :=u\left(
t,x,y\right) w_{\varepsilon }\left( t,x,y\right) .
\end{equation*}
The regularity of $v$ is inherited from that of $w_{\varepsilon }$ and $
\alpha $. By It\={o}'s formula we have, suppressing the $s$-dependence for $
X $, $X^{\prime }$, $Y$ and $Y^{\prime }$,
\begin{eqnarray}
&&v\left( t,Y\left( t\right) ,Y^{\prime }\left( t\right) \right) \label{Ito}
\\
&=&v\left( 0,x,x^{\prime }\right) +\int_{0}^{t}D_{s}v\left( s,Y,Y^{\prime
}\right) ds \notag \\
&&+\int_{0}^{t}\left\langle D_{x}v\left( s,Y,Y^{\prime }\right) ,b\left(
s,X\right) \right\rangle ds+\int_{0}^{t}\left\langle D_{y}v\left(
s,Y,Y^{\prime }\right) ,b\left( s,X^{\prime }\right) \right\rangle ds \notag
\\
&&+\int_{0}^{t}\left\langle D_{x}v\left( s,Y,Y^{\prime }\right) ,\sigma
\left( s,X\right) dM\left( s\right) \right\rangle +\int_{0}^{t}\left\langle
D_{y}v\left( s,Y,Y^{\prime }\right) ,\sigma \left( s,X^{\prime }\right)
dM\left( s\right) \right\rangle \notag \\
&&+\int_{0}^{t}\left\langle D_{x}v\left( s,Y,Y^{\prime }\right) ,\gamma
\left( s\right) \right\rangle d\left\vert k\right\vert \left( s\right)
+\int_{0}^{t}\left\langle D_{y}v\left( s,Y,Y^{\prime }\right) ,\gamma
^{\prime }\left( s\right) \right\rangle d\left\vert k^{\prime }\right\vert
\left( s\right) \notag \\
&&+\int_{0}^{t}\text{tr}\left( \left(
\begin{array}{c}
\sigma \left( s,X\right) \\
\sigma \left( s,X^{\prime }\right)
\end{array}
\right) ^{T}D^{2}v\left( s,Y,Y^{\prime }\right) \left(
\begin{array}{c}
\sigma \left( s,X\right) \\
\sigma \left( s,X^{\prime }\right)
\end{array}
\right) d\left\langle M\right\rangle \left( s\right) \right) . \notag
\end{eqnarray}
We define the martingale $N$ as
\begin{equation*}
N\left( t\right) =\int_{0}^{t}\left\langle D_{x}v\left( s,Y,Y^{\prime
}\right) ,\sigma \left( s,X\right) dM\left( s\right) \right\rangle
+\int_{0}^{t}\left\langle D_{y}v\left( s,Y,Y^{\prime }\right) ,\sigma \left(
s,X^{\prime }\right) dM\left( s\right) \right\rangle ,
\end{equation*}
and simplify the remaining terms in \eqref{Ito}. From \eqref{testlemma42},
\eqref{testlemma45} and the regularity of $u$, we have
\begin{equation*}
\int_{0}^{t}D_{s}v\left( s,Y,Y^{\prime }\right) ds\leq C\left( \lambda
\right) \int_{0}^{t}\left( \varepsilon +\frac{\left\vert Y-Y^{\prime
}\right\vert ^{2}}{\varepsilon }\right) ds.
\end{equation*}
Similarly, following the proof of Theorem 5.1 in \cite{DupuisIshii1993}, we
have
\begin{eqnarray}
&&\int_{0}^{t}\left\langle D_{x}v\left( s,Y,Y^{\prime }\right) ,b\left(
s,X\right) \right\rangle ds+\int_{0}^{t}\left\langle D_{y}v\left(
s,Y,Y^{\prime }\right) ,b\left( s,X^{\prime }\right) \right\rangle ds
\label{driftterms} \\
&\leq &C\left( \lambda \right) \left( \varepsilon +\int_{0}^{t}\frac{
\left\vert Y-Y^{\prime }\right\vert ^{2}}{\varepsilon }ds+\int_{0}^{t}\frac{
\left\vert X-X^{\prime }\right\vert ^{2}}{\varepsilon }ds\right) . \notag
\end{eqnarray}
A simple extension of Lemma 5.7 in \cite{DupuisIshii1993} to the
time-dependent case shows that there exists a constant $K_{1}\left( \lambda
\right) <\infty $ such that for all $t\in \left[ 0,T\right] $, $x,y\in
\overline{\Omega }_{t}$, the second order derivatives of $v$ with respect to
the spatial variables satisfy
\begin{equation*}
D^{2}v\left( t,x,y\right) \leq K_{1}\left( \lambda \right) \left( \frac{1}{
\varepsilon }\left(
\begin{array}{cc}
I & -I \\
-I & I
\end{array}
\right) +\left( \varepsilon +\frac{\left\vert x-y\right\vert ^{2}}{
\varepsilon }\right) \left(
\begin{array}{cc}
I & 0 \\
0 & I
\end{array}
\right) \right) .
\end{equation*}
Moreover, it is an easy consequence of the Lipschitz continuity of $\sigma $
that there exists a constant $K_{2}\left( \lambda \right) <\infty $ such
that for all $t\in \left[ 0,T\right] $, $x,y,\xi ,\omega \in \overline{
\Omega }_{t}$,
\begin{equation*}
\left(
\begin{array}{c}
\sigma \left( t,\xi \right) \\
\sigma \left( t,\omega \right)
\end{array}
\right) ^{T}D^{2}v\left( t,x,y\right) \left(
\begin{array}{c}
\sigma \left( t,\xi \right) \\
\sigma \left( t,\omega \right)
\end{array}
\right) \leq K_{2}\left( \lambda \right) \left( \varepsilon +\frac{1}{
\varepsilon }\left( \left\vert \xi -\omega \right\vert ^{2}+\left\vert
x-y\right\vert ^{2}\right) \right) I.
\end{equation*}
Consequently, the last term in \eqref{Ito} may be simplified to
\begin{eqnarray*}
&&\int_{0}^{t}\text{tr}\left( \left(
\begin{array}{c}
\sigma \left( s,X\right) \\
\sigma \left( s,X^{\prime }\right)
\end{array}
\right) ^{T}D^{2}v\left( s,Y,Y^{\prime }\right) \left(
\begin{array}{c}
\sigma \left( s,X\right) \\
\sigma \left( s,X^{\prime }\right)
\end{array}
\right) d\left\langle M\right\rangle \left( s\right) \right) \\
&\leq &C\left( \lambda \right) \left( \varepsilon +\int_{0}^{t}\frac{
\left\vert X-X^{\prime }\right\vert ^{2}}{\varepsilon }ds+\int_{0}^{t}\frac{
\left\vert Y-Y^{\prime }\right\vert ^{2}}{\varepsilon }ds\right) .
\end{eqnarray*}
Considering now the terms containing $\left\vert k\right\vert $ and $
\left\vert k^{\prime }\right\vert $, we see, following the proof of Theorem
5.1 in \cite{DupuisIshii1993}, that
\begin{eqnarray*}
&&\int_{0}^{t}\left\langle D_{x}v\left( s,Y,Y^{\prime }\right) ,\gamma
\left( s\right) \right\rangle d\left\vert k\right\vert \left( s\right)
+\int_{0}^{t}\left\langle D_{y}v\left( s,Y,Y^{\prime }\right) ,\gamma
^{\prime }\left( s\right) \right\rangle d\left\vert k^{\prime }\right\vert
\left( s\right) \\
&\leq &C\int_{0}^{t}u\left( s,Y,Y^{\prime }\right) \frac{\left\vert
Y-Y^{\prime }\right\vert ^{2}}{\varepsilon }d\left\vert k\right\vert \left(
s\right) +C\int_{0}^{t}u\left( s,Y,Y^{\prime }\right) \frac{\left\vert
Y-Y^{\prime }\right\vert ^{2}}{\varepsilon }d\left\vert k^{\prime
}\right\vert \left( s\right) \\
&&-\lambda \int_{0}^{t}v\left( s,Y,Y^{\prime }\right) \left\langle
D_{x}\alpha \left( s,Y\right) ,\gamma \left( s\right) \right\rangle
d\left\vert k\right\vert \left( s\right) \\
&&-\lambda \int_{0}^{t}v\left( s,Y,Y^{\prime }\right) \left\langle
D_{x}\alpha \left( s,Y^{\prime }\right) ,\gamma ^{\prime }\left( s\right)
\right\rangle d\left\vert k^{\prime }\right\vert \left( s\right) .
\end{eqnarray*}
Moreover, \eqref{testlemma41} and \eqref{alfaprop} give, since $d\left\vert
k\right\vert \left( s\right) $ is zero unless $Y\left( s\right) \in \partial
\Omega _{s}$,
\begin{equation*}
-\lambda v\left( s,Y,Y^{\prime }\right) \left\langle D_{x}\alpha \left(
s,Y\right) ,\gamma \left( s\right) \right\rangle \leq -\lambda \chi u\left(
s,Y,Y^{\prime }\right) \frac{\left\vert Y-Y^{\prime }\right\vert ^{2}}{
\varepsilon },
\end{equation*}
so, by putting $\lambda =C/\chi $ all integrals with respect to $\left\vert
k\right\vert $ and $\left\vert k^{\prime }\right\vert $ vanish. Dropping the
$\lambda $-dependence from the constants, \eqref{testlemma41} and \eqref{Ito}
give
\begin{eqnarray*}
\frac{1}{C}\frac{\left\vert Y\left( t\right) -Y^{\prime }\left( t\right)
\right\vert ^{2}}{\varepsilon } &\leq &v\left( t,Y\left( t\right) ,Y^{\prime
}\left( t\right) \right) \leq v\left( 0,x,x^{\prime }\right) +\varepsilon
+N\left( t\right) \\
&&+\int_{0}^{t}\frac{\left\vert Y-Y^{\prime }\right\vert ^{2}}{\varepsilon }
ds+\int_{0}^{t}\frac{\left\vert X-X^{\prime }\right\vert ^{2}}{\varepsilon }
ds.
\end{eqnarray*}
Now applying \eqref{testlemma42} to $v\left( 0,x,x^{\prime }\right) $,
multiplying by $\varepsilon $, squaring, taking supremum and expectations on
both sides, we obtain
\begin{eqnarray*}
E\left[ \sup_{0\leq s\leq t}\left\vert Y\left( s\right) -Y^{\prime }\left(
s\right) \right\vert ^{4}\right] &\leq &C\left( \left\vert x-x^{\prime
}\right\vert ^{4}+\varepsilon ^{4}+\varepsilon ^{2}E\left[ \sup_{0\leq s\leq
t}\left( N\left( s\right) \right) ^{2}\right] \right. \\
&&\left. +\int_{0}^{t}E\left[ \left\vert X-X^{\prime }\right\vert
^{4}+\left\vert Y-Y^{\prime }\right\vert ^{4}\right] ds\right) .
\end{eqnarray*}
Then proceeding as in \eqref{driftterms}, the Doob-Kolmogorov inequality
gives
\begin{eqnarray*}
E\left[ \sup_{0\leq s\leq t}\left( N\left( s\right) \right) ^{2}\right]
&\leq &4E\left[ \left( N\left( t\right) \right) ^{2}\right] \\
&\leq &C\int_{0}^{t}\left( \varepsilon ^{2}+E\left[ \frac{\left\vert
Y-Y^{\prime }\right\vert ^{4}}{\varepsilon ^{2}}+\frac{\left\vert
X-X^{\prime }\right\vert ^{4}}{\varepsilon ^{2}}\right] \right) ds,
\end{eqnarray*}
Letting $\varepsilon $ tend to zero, we obtain
\begin{equation*}
E\left[ \sup_{0\leq s\leq t}\left\vert Y\left( s\right) -Y^{\prime }\left(
s\right) \right\vert ^{4}\right] \leq C\left( \left\vert x-x^{\prime
}\right\vert ^{4}+\int_{0}^{t}E\left[ \left( \left\vert X-X^{\prime
}\right\vert ^{4}+\left\vert Y-Y^{\prime }\right\vert ^{4}\right) \right]
ds\right) ,
\end{equation*}
from which the requested inequality follows by a simple application of
Gronwall's inequality.
$\Box $
| 4,082 | 73,518 |
en
|
train
|
0.156.17
|
\noindent \textbf{Proof of Theorem \ref{main}.}
Given Lemma \ref{contexist} and Lemma \ref{rsdetheorem}, the proof of
Theorem \ref{main} follows exactly along the lines of the proof of Corollary
5.2 in \cite{DupuisIshii1993}, which in turn follows the same outline as in
\cite{LionsSznitman1984}, Theorem 4.3. Note that the main problem is
verifying the adaptedness property of the solutions to the reflected SDE.
This property follows from an approximation of continuous $\mathcal{F}_t$
-adapted semimartingales by bounded variation processes, for which one can
show existence of unique bounded variation solutions to the Skorohod
problem, and these bounded variation solutions will be $\mathcal{F}_t$
-adapted. We omit further details.
$\Box $
\setcounter{equation}{0} \setcounter{theorem}{0}
| 231 | 73,518 |
en
|
train
|
0.156.18
|
\section{Fully nonlinear second-order parabolic PDEs\label{PDE}}
In this section, we prove the results on partial differential equations.
First, we recall the definition of viscosity solutions. Let $E\subset
\mathbb{R}^{n+1}$ be arbitrary. If $u:E\rightarrow \mathbb{R}$, then the
parabolic superjet $\mathcal{P}_{E}^{2,+}u\left(s,z\right)$ contains all triplets $
\left( a,p,X\right) \in \mathbb{R}\times \mathbb{R}^{n}\times \mathbb{S}^{n}$
such that if $\left(s,z\right)\in E$ then
\begin{align*}
u\left(t,x\right)& \leq u\left(s,z\right)+a\left(t-s\right)+\langle p,x-z\rangle +\frac{1}{2}\langle
X\left(x-z\right),x-z\rangle \\
& +o\left(|t-s|+|x-z|^{2}\right)\quad \text{as }E\ni \left(t,x\right)\rightarrow \left(s,z\right).
\end{align*}
The parabolic subjet is defined as $\mathcal{P}_{E}^{2,-}u\left(s,z\right)=-\mathcal{P}
_{E}^{2,+}\left(-u\left(s,z\right)\right)$. The closures $\overline{\mathcal{P}}_{E}^{2,+}u\left(s,z\right)$
and $\overline{\mathcal{P}}_{E}^{2,-}u\left(s,z\right)$ are defined in analogue with
(2.6) and (2.7) in \cite{CrandallIshiiLions1992}.
A function $u\in USC(\widetilde{\Omega })$ is a \textit{viscosity subsolution}
of \eqref{huvudekvationen} in $\Omega ^{\circ }$ if, for all $\left(a,p,A\right)\in
\mathcal{P}_{\widetilde{\Omega }}^{2,+}u\left(t,x\right)$, it holds that
\begin{equation*}
a+F\left(t,x,u\left(t,x\right),p,A\right)\leq 0,\quad \text{for}\;\left(t,x\right)\in \Omega ^{\circ }.
\end{equation*}
If, in addition, for $\left(t,x\right)\in \partial \Omega $ it holds that
\begin{equation}
\min \{a+F\left(t,x,u\left(t,x\right),p,A\right),\;\langle p,\widetilde{\gamma }\left(t,x\right)\rangle
+f\left(t,x,u\left(t,x\right)\right)\}\leq 0, \label{eq:BC_viscosity_sub}
\end{equation}
then $u$ is a viscosity subsolution of \eqref{huvudekvationen}-
\eqref{randvillkor} in $\widetilde{\Omega }$. Similarly, a function $v\in
LSC(\widetilde{\Omega })$ is a \textit{viscosity supersolution} of
\eqref{huvudekvationen} in $\Omega ^{\circ }$ if, for all $\left(a,p,A\right)\in
\mathcal{P}_{\widetilde{\Omega }}^{2,-}v\left(t,x\right)$, it holds that
\begin{equation*}
a+F\left(t,x,v\left(t,x\right),p,A\right)\geq 0,\quad \text{for}\;\left(t,x\right)\in \Omega ^{\circ }.
\end{equation*}
If, in addition, for $\left(t,x\right)\in \partial \Omega $ it holds that
\begin{equation}
\max \{a+F\left(t,x,v\left(t,x\right),p,A\right),\;\langle p,\widetilde{\gamma }\left(t,x\right)\rangle
+f\left(t,x,v\left(t,x\right)\right)\}\geq 0, \label{eq:BC_viscosity_sup}
\end{equation}
then $v$ is a viscosity supersolution of \eqref{huvudekvationen}-
\eqref{randvillkor} in $\widetilde{\Omega }$. A function is a \textit{
viscosity solution} if it is both a viscosity subsolution and a viscosity
supersolution. We remark that in the definition of viscosity solutions
above, we may replace $\mathcal{P}_{\widetilde{\Omega }}^{2,+}u\left(t,x\right)$ and $
\mathcal{P}_{\widetilde{\Omega }}^{2,-}v\left(t,x\right)$ by $\overline{\mathcal{P}}_{
\widetilde{\Omega }}^{2,+}u\left(t,x\right)$ and $\overline{\mathcal{P}}_{\widetilde{
\Omega }}^{2,-}v\left(t,x\right)$, respectively. In the following, we often skip
writing \textquotedblleft viscosity" before subsolutions, supersolutions and
solutions. Note also that, given any set $E\subset \mathbb{R}^{n+1}$ and $
t\in \lbrack 0,T]$, we denote, in the following, the time sections of $E$ as
$E_{t}=\{x:\left(t,x\right)\in E\}$.
Next we give two lemmas. The first clarifies that the maximum principle for
semicontinuous functions \cite{CrandallIshii1990}, \cite
{CrandallIshiiLions1992}, holds true in time-dependent domains.
\begin{lemma}
\label{le:timdep_max} Suppose that $\mathcal{O}^{i}=\mathcal{\widehat{O}}
^{i}\cap \left( \left(0,T\right)\times \mathbb{R}^{n}\right) $ for $i=1,\dots ,k$ where
$\mathcal{\widehat{O}}^{i}$ are locally compact subsets of $\mathbb{R}^{n+1}$
. Assume that $u_{i}\in USC(\mathcal{O}^{i})$ and let $\varphi
:\left(t,x_{1},\dots ,x_{k}\right)\rightarrow \varphi \left(t,x_{1},\dots ,x_{k}\right)$ be
defined on an open neighborhood of $\{\left(t,x\right):t\in \left(0,T\right)\;\text{and}\;x_{i}\in
\mathcal{O}_{t}^{i}\;\text{for}\;i=1,\dots ,k\}$ and such that $\varphi $ is
once continuously differentiable in $t$ and twice continuously
differentiable in $\left(x_{1},\dots ,x_{k}\right)$. Suppose that $s\in \left(0,T\right)$ and $
z_{i}\in \mathcal{O}_{s}^{i}$ and
\begin{equation*}
w\left(t,x_{1},\dots ,x_{k}\right)\equiv u_{1}\left(t,x_{1}\right)+\dots +u_{k}\left(t,x_{k}\right)-\varphi
\left(t,x_{1},\dots ,x_{k}\right)\leq w\left(s,z_{1},\dots ,z_{k}\right),
\end{equation*}
for $0<t<T$ and $x_{i}\in \mathcal{O}_{t}^{i}$. Assume, moreover, that there
is an $r>0$ such that for every $M>0$ there is a $C$ such that, for $
i=1,\dots ,k$,
\begin{align}
& b_{i}\leq C,\;\text{whenever }\left( b_{i},q_{i},X_{i}\right) \in \mathcal{
P}_{\mathcal{O}^{i}}^{2,+}u_{i}\left(t,x\right)\text{ with }\left\Vert X_{i}\right\Vert
\leq M\text{ and} \notag \label{eq:besvarlig_assumption} \\
&
|x_{i}-z_{i}|+|t-s|+|u_{i}\left(t,x_{i}\right)-u_{i}\left(s,z_{i}\right)|+|q_{i}-D_{x_{i}}\varphi
\left(s,z_{1},\dots ,z_{k}\right)|\leq r.
\end{align}
Then, for each $\varepsilon >0$ there exist $\left(b_{i},X_{i}\right)$ such that
\begin{equation*}
\left(b_{i},D_{x_{i}}\varphi \left(s,z_{1},\dots ,z_{k}\right),X_{i}\right)\in \overline{\mathcal{P
}}_{\mathcal{O}^{i}}^{2,+}u_{i}\left(s,z\right),\quad \text{for}\;i=1,...,k,
\end{equation*}
\begin{equation*}
-\left( \frac{1}{\varepsilon }+||A||\right) I\leq \left(
\begin{array}{ccc}
X_{1} & \dots & 0 \\
\vdots & \ddots & \vdots \\
0 & \dots & X_{k}
\end{array}
\right) \leq A+\varepsilon A^{2},
\end{equation*}
and
\begin{equation*}
b_{1}+\dots +b_{k}=D_{t}\varphi \left(s,z_{1},\dots ,z_{k}\right),
\end{equation*}
where $A=\left( D_{x}^{2}\varphi \right) \left(s,z_{1},\dots ,z_{k}\right)$.
\end{lemma}
\noindent \textbf{Proof.} Following ideas from page 1008\textbf{\ }in \cite
{CrandallIshii1990} we let $K_{i}$ be compact neighborhoods of $\left(s,z\right)$ in $
\mathcal{O}^{i}$ and define the extended functions $\widetilde{u}_{1},\dots ,
\widetilde{u}_{k}$, $\widetilde{u}_{i}\in USC\left(\mathbb{R}^{n}\right)$ for $
i=1,\dots ,k$, by
\begin{equation*}
\widetilde{u}_{i}\left(t,x\right)=\left\{
\begin{array}{rl}
u\left(t,x\right), & \text{if}\quad \left(t,x\right)\in K_{i}, \\
-\infty , & \text{otherwise.}
\end{array}
\right.
\end{equation*}
From the definitions of sub and superjets it follows, for $i=1,\dots ,k$,
that
\begin{equation}
\mathcal{P}_{\mathbb{R}^{n+1}}^{2,+}\widetilde{u}_{i}\left(t,x\right)=\mathcal{P}_{
\mathcal{O}^{i}}^{2,+}u_{i}\left(t,x\right), \label{eq:subjet_lika_1}
\end{equation}
for $\left(t,x\right)$ in the interior of $K_{i}$ relative to $\mathcal{O}^{i}$.
Excluding the trivial case $u_{i}\left(t,x\right)=-\infty $, then the function $
\widetilde{u}_{i}\left(t,x\right)$ cannot approach $u_{i}\left(s,z\right)$ unless $\left(t,x\right)\in K_{i}$
and it follows that
\begin{equation}
\overline{\mathcal{P}}_{\mathbb{R}^{n+1}}^{2,+}\widetilde{u}_{i}\left(t,x\right)=
\overline{\mathcal{P}}_{\mathcal{O}^{i}}^{2,+}u_{i}\left(t,x\right).
\label{eq:subjet_lika_2}
\end{equation}
Setting $\widetilde{w}\left(t,x_{1},\dots ,x_{k}\right)=\widetilde{u}
_{1}\left(t,x_{1}\right)+\dots +\widetilde{u}_{k}\left(t,x_{k}\right)$ we see that $\left(s,z_{1},\dots
,z_{k}\right)$ is also a maximum of the function $\left(\widetilde{w}-\varphi
\right)\left(t,x_{1},\dots ,x_{k}\right)$. Moreover, we note that the proof of Lemma 8 in
\cite{CrandallIshii1990} still works if (27) in \cite{CrandallIshii1990} is
replaced by assumption \eqref{eq:besvarlig_assumption}. These facts,
together with \eqref{eq:subjet_lika_1} and \eqref{eq:subjet_lika_2}, allows
us to complete the proof of Lemma \ref{le:timdep_max} by using Theorem 7 in
\cite{CrandallIshii1990}.
$\Box $
Before proving the next lemma, let us note that standard arguments imply
that we can assume $\lambda >0$ in \eqref{ass_F_nondecreasing}. Indeed, if $
\lambda \leq 0$ then for $\bar{\lambda}<\lambda $ the functions $e^{\bar{
\lambda}t}u\left(t,x\right)$ and $e^{\bar{\lambda}t}v\left(t,x\right)$ are, respectively, sub- and
supersolutions of \eqref{huvudekvationen}-\eqref{randvillkor} with $
F\left(t,x,r,p,X\right)$ and $f\left(t,x,r\right)$ replaced by
\begin{equation}
-\bar{\lambda}r+e^{\bar{\lambda}t}F\left(t,x,e^{-\bar{\lambda}t}r,e^{-\bar{\lambda
}t}p,e^{-\bar{\lambda}t}X\right)\quad \text{and}\quad e^{\bar{\lambda}t}f\left(t,x,e^{-
\bar{\lambda}t}r\right). \label{assume_lambda_positive}
\end{equation}
Hence, in the following proof we assume $\lambda >0$ in
\eqref{ass_F_nondecreasing}. Next we prove the following version of the
comparison principle.
\begin{lemma}
\label{maxrand} Let $\Omega ^{\circ }$ be a time-dependent domain satisfying
\eqref{timesect}. Assume \eqref{ass_F_cont}-\eqref{ass_F_XY}. Let $u\in
USC(\widetilde{\Omega })$ be a viscosity subsolution and $v\in LSC(
\widetilde{\Omega })$ a viscosity supersolution of \eqref{huvudekvationen}
in $\Omega ^{\circ }$. Then $\sup_{\widetilde{\Omega}}u-v\leq \sup_{\partial
\Omega \cup \overline{\Omega }_{0}}\left(u-v\right)^{+}$.
\end{lemma}
| 3,661 | 73,518 |
en
|
train
|
0.156.19
|
\noindent \textbf{Proof.} Following ideas from page 1008\textbf{\ }in \cite
{CrandallIshii1990} we let $K_{i}$ be compact neighborhoods of $\left(s,z\right)$ in $
\mathcal{O}^{i}$ and define the extended functions $\widetilde{u}_{1},\dots ,
\widetilde{u}_{k}$, $\widetilde{u}_{i}\in USC\left(\mathbb{R}^{n}\right)$ for $
i=1,\dots ,k$, by
\begin{equation*}
\widetilde{u}_{i}\left(t,x\right)=\left\{
\begin{array}{rl}
u\left(t,x\right), & \text{if}\quad \left(t,x\right)\in K_{i}, \\
-\infty , & \text{otherwise.}
\end{array}
\right.
\end{equation*}
From the definitions of sub and superjets it follows, for $i=1,\dots ,k$,
that
\begin{equation}
\mathcal{P}_{\mathbb{R}^{n+1}}^{2,+}\widetilde{u}_{i}\left(t,x\right)=\mathcal{P}_{
\mathcal{O}^{i}}^{2,+}u_{i}\left(t,x\right), \label{eq:subjet_lika_1}
\end{equation}
for $\left(t,x\right)$ in the interior of $K_{i}$ relative to $\mathcal{O}^{i}$.
Excluding the trivial case $u_{i}\left(t,x\right)=-\infty $, then the function $
\widetilde{u}_{i}\left(t,x\right)$ cannot approach $u_{i}\left(s,z\right)$ unless $\left(t,x\right)\in K_{i}$
and it follows that
\begin{equation}
\overline{\mathcal{P}}_{\mathbb{R}^{n+1}}^{2,+}\widetilde{u}_{i}\left(t,x\right)=
\overline{\mathcal{P}}_{\mathcal{O}^{i}}^{2,+}u_{i}\left(t,x\right).
\label{eq:subjet_lika_2}
\end{equation}
Setting $\widetilde{w}\left(t,x_{1},\dots ,x_{k}\right)=\widetilde{u}
_{1}\left(t,x_{1}\right)+\dots +\widetilde{u}_{k}\left(t,x_{k}\right)$ we see that $\left(s,z_{1},\dots
,z_{k}\right)$ is also a maximum of the function $\left(\widetilde{w}-\varphi
\right)\left(t,x_{1},\dots ,x_{k}\right)$. Moreover, we note that the proof of Lemma 8 in
\cite{CrandallIshii1990} still works if (27) in \cite{CrandallIshii1990} is
replaced by assumption \eqref{eq:besvarlig_assumption}. These facts,
together with \eqref{eq:subjet_lika_1} and \eqref{eq:subjet_lika_2}, allows
us to complete the proof of Lemma \ref{le:timdep_max} by using Theorem 7 in
\cite{CrandallIshii1990}.
$\Box $
Before proving the next lemma, let us note that standard arguments imply
that we can assume $\lambda >0$ in \eqref{ass_F_nondecreasing}. Indeed, if $
\lambda \leq 0$ then for $\bar{\lambda}<\lambda $ the functions $e^{\bar{
\lambda}t}u\left(t,x\right)$ and $e^{\bar{\lambda}t}v\left(t,x\right)$ are, respectively, sub- and
supersolutions of \eqref{huvudekvationen}-\eqref{randvillkor} with $
F\left(t,x,r,p,X\right)$ and $f\left(t,x,r\right)$ replaced by
\begin{equation}
-\bar{\lambda}r+e^{\bar{\lambda}t}F\left(t,x,e^{-\bar{\lambda}t}r,e^{-\bar{\lambda
}t}p,e^{-\bar{\lambda}t}X\right)\quad \text{and}\quad e^{\bar{\lambda}t}f\left(t,x,e^{-
\bar{\lambda}t}r\right). \label{assume_lambda_positive}
\end{equation}
Hence, in the following proof we assume $\lambda >0$ in
\eqref{ass_F_nondecreasing}. Next we prove the following version of the
comparison principle.
\begin{lemma}
\label{maxrand} Let $\Omega ^{\circ }$ be a time-dependent domain satisfying
\eqref{timesect}. Assume \eqref{ass_F_cont}-\eqref{ass_F_XY}. Let $u\in
USC(\widetilde{\Omega })$ be a viscosity subsolution and $v\in LSC(
\widetilde{\Omega })$ a viscosity supersolution of \eqref{huvudekvationen}
in $\Omega ^{\circ }$. Then $\sup_{\widetilde{\Omega}}u-v\leq \sup_{\partial
\Omega \cup \overline{\Omega }_{0}}\left(u-v\right)^{+}$.
\end{lemma}
\noindent \textbf{Proof.} We may assume, by replacing $T>0$ by a smaller
number if necessary, that $u$ and $-v$ are bounded from above on $\widetilde{
\Omega}$. We can also assume that $\sup_{\widetilde{\Omega}}u-v$ is attained
by using the well known fact that if $u$ is a subsolution of
\eqref{huvudekvationen}, then so is
\begin{equation*}
u_{\beta }\left(t,x\right)=u\left(t,x\right)-\frac{\beta }{T-t},
\end{equation*}
for all $\beta >0$. Assume that $\sup_{\widetilde{\Omega }
}u-v=u\left(s,z\right)-v\left(s,z\right)>u\left(t,x\right)-v\left(t,x\right)$ for some $\left(s,z\right)\in \Omega ^{\circ }$ and
for all $\left(t,x\right)\in \partial \Omega \cup \overline{\Omega }_{0}$. As in
Section 5.B in \cite{CrandallIshiiLions1992}, we use the fact that if $u$ is
a viscosity subsolution, then so is $\bar{u}=u-K$ for every constant $K>0$.
Choose $K>0$ such that $\bar{u}\left(t,x\right)-v\left(t,x\right)\leq 0$ for all $\left(t,x\right)\in
\partial \Omega \cup \overline{\Omega }_{0}$ and such that $\bar{u}
\left(s,z\right)-v\left(s,z\right):=\delta >0$. Using Lemma \ref{le:timdep_max} in place of
Theorem 8.3 in \cite{CrandallIshiiLions1992} and by observing that
assumptions \eqref{ass_F_cont}-\eqref{ass_F_XY} imply (assuming $\lambda >0$
as is possible by \eqref{assume_lambda_positive}) the corresponding
assumptions in \cite{CrandallIshiiLions1992}, we see that we can proceed as
in the proof of Theorem 8.2 in \cite{CrandallIshiiLions1992} to complete the
proof by deriving a contradiction.
$\Box $
\noindent \textbf{Proof of Theorem \ref{comparison}. }In the following we
may assume, by replacing $T>0$ by a smaller number if necessary, that $u$
and $-v$ in Theorem \ref{comparison} are bounded from above on $\widetilde{
\Omega }$. We will now produce approximations of $u$ and $v$ which allow us
to deal only with the inequalities involving $F$ and not the boundary
conditions. To construct these approximating functions, we note that Lemma
\ref{testlemma5} applies with $\gamma $ replaced by $\widetilde{\gamma }$ as
well. Thus, there exists a $\mathcal{C}^{1,2}$ function $\alpha $ defined on
an open neighborhood of $\widetilde{\Omega }$ with the property that $\alpha
\geq 0$ on $\widetilde{\Omega }$ and $\left\langle D_{x}\alpha \left(t,x\right),
\widetilde{\gamma }\left(t,x\right)\right\rangle \geq 1$ for $x\in \partial \Omega _{t}$
, $t\in [0,T]$. For $\beta _{1}>0$, $\beta _{2}>0$ and $\beta _{3}>0$ we
define, for $\left(t,x\right)\in \widetilde{\Omega}$,
\begin{align}
u_{\beta _{1},\beta _{2},\beta _{3}}\left(t,x\right)& =u\left(t,x\right)-\beta _{1}\alpha
\left(t,x\right)-\beta _{2}-\frac{\beta _{3}}{T-t}, \notag \label{approxdef} \\
v_{\beta _{1},\beta _{2}}\left(t,x\right)& =v\left(t,x\right)+\beta _{1}\alpha \left(t,x\right)+\beta _{2}.
\end{align}
Given $\beta _{3},\beta _{2}>0$ there is $\beta _{1}=\beta _{1}\left(\beta
_{2}\right)\in \left(0,\beta _{2}\right)$ for which $u_{\beta _{1},\beta _{2},\beta _{3}}$
and $v_{\beta _{1},\beta _{2}}$ are sub- and supersolutions of
\eqref{huvudekvationen}-\eqref{randvillkor}, with $f\left(t,x,r\right)$ replaced by $
f\left(t,x,r\right)+\beta _{1}$ and $f\left(t,x,r\right)-\beta _{1}$, respectively. Indeed, if $
\left(a,p,X\right)\in \mathcal{P}_{\widetilde{\Omega }}^{2,+}u_{\beta _{1},\beta
_{2},\beta _{3}}\left(t,x\right)$, then
\begin{equation}
\left( a+\beta _{1}\alpha _{t}\left(t,x\right)+\frac{\beta _{3}}{\left(T-t\right)^{2}},p+\beta
_{1}D\alpha \left(t,x\right),X+\beta _{1}D^{2}\alpha \left(t,x\right)\right) \in \mathcal{P}_{
\widetilde{\Omega }}^{2,+}u\left(t,x\right). \label{eq:punkt_i_subjet}
\end{equation}
Hence, if $u$ satisfies \eqref{randvillkor}, then $\langle p+\beta
_{1}D\alpha \left(t,x\right),\widetilde{\gamma }\left(t,x\right)\rangle +f\left(t,x,u\left(t,x\right)\right)\leq 0$ and
since $\langle D\alpha \left(t,x\right),\widetilde{\gamma }\left(t,x\right)\rangle \geq 1$, $
u_{\beta _{1},\beta _{2},\beta _{3}}\leq u$ and by
\eqref{ass_f_nondecreasing} we obtain
\begin{equation}
\langle p,\widetilde{\gamma }\left(t,x\right)\rangle +f\left(t,x,u_{\beta _{1},\beta
_{2},\beta _{3}}\right)+\beta _{1}\leq 0. \label{eq:RV_uppfylld_approx}
\end{equation}
Using \eqref{eq:punkt_i_subjet} we also see that if $u$ satisfies
\eqref{huvudekvationen} then
\begin{equation*}
a+\beta _{1}\alpha _{t}\left(t,x\right)+\frac{\beta _{3}}{\left(T-t\right)^{2}}+F\left(t,x,u,p+\beta
_{1}D\alpha \left(t,x\right),X+\beta _{1}D^{2}\alpha \left(t,x\right)\right)\leq 0.
\end{equation*}
Using \eqref{ass_F_nondecreasing} and \eqref{ass_F_boundary}, assuming also
that the support of $\alpha $ lies within $U$, we have
\begin{align}
a+\beta _{1}\alpha _{t}\left(t,x\right)+F\left(t,x,u_{\beta _{1},\beta _{2},\beta
_{3}},p,X\right)+\lambda \beta _{2}& \label{eq:EQ_uppfylld_approx} \\
-m_{2}\left( |\beta _{1}D\alpha \left(t,x\right)|+||\beta _{1}D^{2}\alpha
\left(t,x\right)||\right) & \leq 0. \notag
\end{align}
From \eqref{eq:RV_uppfylld_approx} and \eqref{eq:EQ_uppfylld_approx} it
follows that, given $\beta _{2},\beta _{3}>0$, there exist $\beta _{1}\in
\left(0,\beta _{2}\right)$ such that $u_{\beta _{1},\beta _{2},\beta _{3}}$ is a
subsolution of \eqref{huvudekvationen}-\eqref{randvillkor} with $f\left(t,x,u\right)$
replaced by $f\left(t,x,u\right)+\beta _{1}$. The fact that $v_{\beta _{1},\beta _{2}}$
is a supersolution follows by a similar calculation.
| 3,359 | 73,518 |
en
|
train
|
0.156.20
|
\noindent \textbf{Proof of Theorem \ref{comparison}. }In the following we
may assume, by replacing $T>0$ by a smaller number if necessary, that $u$
and $-v$ in Theorem \ref{comparison} are bounded from above on $\widetilde{
\Omega }$. We will now produce approximations of $u$ and $v$ which allow us
to deal only with the inequalities involving $F$ and not the boundary
conditions. To construct these approximating functions, we note that Lemma
\ref{testlemma5} applies with $\gamma $ replaced by $\widetilde{\gamma }$ as
well. Thus, there exists a $\mathcal{C}^{1,2}$ function $\alpha $ defined on
an open neighborhood of $\widetilde{\Omega }$ with the property that $\alpha
\geq 0$ on $\widetilde{\Omega }$ and $\left\langle D_{x}\alpha \left(t,x\right),
\widetilde{\gamma }\left(t,x\right)\right\rangle \geq 1$ for $x\in \partial \Omega _{t}$
, $t\in [0,T]$. For $\beta _{1}>0$, $\beta _{2}>0$ and $\beta _{3}>0$ we
define, for $\left(t,x\right)\in \widetilde{\Omega}$,
\begin{align}
u_{\beta _{1},\beta _{2},\beta _{3}}\left(t,x\right)& =u\left(t,x\right)-\beta _{1}\alpha
\left(t,x\right)-\beta _{2}-\frac{\beta _{3}}{T-t}, \notag \label{approxdef} \\
v_{\beta _{1},\beta _{2}}\left(t,x\right)& =v\left(t,x\right)+\beta _{1}\alpha \left(t,x\right)+\beta _{2}.
\end{align}
Given $\beta _{3},\beta _{2}>0$ there is $\beta _{1}=\beta _{1}\left(\beta
_{2}\right)\in \left(0,\beta _{2}\right)$ for which $u_{\beta _{1},\beta _{2},\beta _{3}}$
and $v_{\beta _{1},\beta _{2}}$ are sub- and supersolutions of
\eqref{huvudekvationen}-\eqref{randvillkor}, with $f\left(t,x,r\right)$ replaced by $
f\left(t,x,r\right)+\beta _{1}$ and $f\left(t,x,r\right)-\beta _{1}$, respectively. Indeed, if $
\left(a,p,X\right)\in \mathcal{P}_{\widetilde{\Omega }}^{2,+}u_{\beta _{1},\beta
_{2},\beta _{3}}\left(t,x\right)$, then
\begin{equation}
\left( a+\beta _{1}\alpha _{t}\left(t,x\right)+\frac{\beta _{3}}{\left(T-t\right)^{2}},p+\beta
_{1}D\alpha \left(t,x\right),X+\beta _{1}D^{2}\alpha \left(t,x\right)\right) \in \mathcal{P}_{
\widetilde{\Omega }}^{2,+}u\left(t,x\right). \label{eq:punkt_i_subjet}
\end{equation}
Hence, if $u$ satisfies \eqref{randvillkor}, then $\langle p+\beta
_{1}D\alpha \left(t,x\right),\widetilde{\gamma }\left(t,x\right)\rangle +f\left(t,x,u\left(t,x\right)\right)\leq 0$ and
since $\langle D\alpha \left(t,x\right),\widetilde{\gamma }\left(t,x\right)\rangle \geq 1$, $
u_{\beta _{1},\beta _{2},\beta _{3}}\leq u$ and by
\eqref{ass_f_nondecreasing} we obtain
\begin{equation}
\langle p,\widetilde{\gamma }\left(t,x\right)\rangle +f\left(t,x,u_{\beta _{1},\beta
_{2},\beta _{3}}\right)+\beta _{1}\leq 0. \label{eq:RV_uppfylld_approx}
\end{equation}
Using \eqref{eq:punkt_i_subjet} we also see that if $u$ satisfies
\eqref{huvudekvationen} then
\begin{equation*}
a+\beta _{1}\alpha _{t}\left(t,x\right)+\frac{\beta _{3}}{\left(T-t\right)^{2}}+F\left(t,x,u,p+\beta
_{1}D\alpha \left(t,x\right),X+\beta _{1}D^{2}\alpha \left(t,x\right)\right)\leq 0.
\end{equation*}
Using \eqref{ass_F_nondecreasing} and \eqref{ass_F_boundary}, assuming also
that the support of $\alpha $ lies within $U$, we have
\begin{align}
a+\beta _{1}\alpha _{t}\left(t,x\right)+F\left(t,x,u_{\beta _{1},\beta _{2},\beta
_{3}},p,X\right)+\lambda \beta _{2}& \label{eq:EQ_uppfylld_approx} \\
-m_{2}\left( |\beta _{1}D\alpha \left(t,x\right)|+||\beta _{1}D^{2}\alpha
\left(t,x\right)||\right) & \leq 0. \notag
\end{align}
From \eqref{eq:RV_uppfylld_approx} and \eqref{eq:EQ_uppfylld_approx} it
follows that, given $\beta _{2},\beta _{3}>0$, there exist $\beta _{1}\in
\left(0,\beta _{2}\right)$ such that $u_{\beta _{1},\beta _{2},\beta _{3}}$ is a
subsolution of \eqref{huvudekvationen}-\eqref{randvillkor} with $f\left(t,x,u\right)$
replaced by $f\left(t,x,u\right)+\beta _{1}$. The fact that $v_{\beta _{1},\beta _{2}}$
is a supersolution follows by a similar calculation.
To complete the proof of the comparison principle, it is sufficient to prove
that
\begin{equation*}
\max_{\widetilde{\Omega }}\left(u_{\beta _{1},\beta _{2},\beta _{3}}-v_{\beta
_{1},\beta _{2}}\right)\leq 0,
\end{equation*}
holds for all $\beta _{2}>0$ and $\beta _{3}>0$. Assume that
\begin{equation*}
\sigma =\max_{\widetilde{\Omega }}\left(u_{\beta _{1},\beta _{2},\beta
_{3}}-v_{\beta _{1},\beta _{2}}\right)>0.
\end{equation*}
We will derive a contradiction for any $\beta _{3}$ if $\beta _{2}$ (and
hence $\beta _{1}$) is small enough. To simplify notation, we write, in the
following, $u,v$ in place of $u_{\beta _{1},\beta _{2},\beta _{3}},v_{\beta
_{1},\beta _{2}}$. By Lemma \ref{maxrand}, $u\left(0,\cdot \right)\leq v\left(0,\cdot \right)$,
upper semicontinuity of $u-v$ and boundedness from above of $u-v$, we
conclude that for any $\beta _{3}>0$
\begin{equation}
\sigma =\left(u-v\right)\left(s,z\right),\quad \text{for some }z\in \partial \Omega _{s}\text{ and
}s\in \left( 0,T\right) . \label{sigma}
\end{equation}
Let $\widetilde{B}\left(\left(s,z\right),\delta \right)=\{\left(t,x\right):\left\vert \left(t,x\right)-\left(s,z\right)\right\vert
\leq \delta \}$ and define
\begin{equation*}
E:=\widetilde{B}\left(\left(s,z\right),\delta \right)\cap \widetilde{\Omega }.
\end{equation*}
By Remark \ref{spaceremark}, there exists $\theta \in \left(0,1\right)$ such that
\begin{equation}
\left\langle x-y,\widetilde{\gamma }\left( t,x\right) \right\rangle \geq
-\theta \left\vert x-y\right\vert ,\quad \text{for all }\left( t,x\right)
\in E\setminus \Omega ^{\circ }\text{ and }\left( t,y\right) \in E.
\label{make_use_of_cone}
\end{equation}
By decreasing $\delta $ if necessary, we may assume that
\eqref{ass_F_boundary} holds in $E$. From now on, we restrict our attention
to events in the set $E$. By Lemma \ref{testlemma4} we obtain, for any $
\theta \in \left(0,1\right)$, a family $\left\{ w_{\varepsilon }\right\} _{\varepsilon
>0}$ of functions $w_{\varepsilon }\in \mathcal{C}^{1,2}\left( \left[ 0,T
\right] \times
\mathbb{R}
^{n}\times
\mathbb{R}
^{n},
\mathbb{R}
\right) $ and positive constants $\chi ,C$ (independent of $\varepsilon $)
such that \eqref{testlemma41}, \eqref{testlemma42}, \eqref{testlemma45}-
\eqref{testlemma47} as well as
\begin{equation}
\left\langle D_{x}w_{\varepsilon }\left( t,x,y\right) ,\widetilde{\gamma }
\left( t,x\right) \right\rangle \geq -C\frac{\left\vert x-y\right\vert ^{2}}{
\varepsilon },\quad \text{if\quad }\left\langle x-y,\widetilde{\gamma }
\left( t,x\right) \right\rangle \geq -\theta \left\vert x-y\right\vert ,
\label{test3}
\end{equation}
\begin{equation}
\left\langle D_{y}w_{\varepsilon }\left( t,x,y\right) ,\widetilde{\gamma }
\left( t,y\right) \right\rangle \geq -C\frac{\left\vert x-y\right\vert ^{2}}{
\varepsilon },\quad \text{if\quad }\left\langle y-x,\widetilde{\gamma }
\left( t,y\right) \right\rangle \geq -\theta \left\vert x-y\right\vert ,
\label{test4}
\end{equation}
hold. Note that \eqref{test3} and \eqref{test4} are direct analogues to
\eqref{testlemma43} and \eqref{testlemma44} but with $\gamma $ replaced by $
\widetilde{\gamma }$.
Let $\varepsilon >0$ be given and define
\begin{equation*}
\Phi \left(t,x,y\right)=u\left(t,x\right)-v\left(t,y\right)-\varphi \left(t,x,y\right),
\end{equation*}
where
\begin{equation*}
\varphi \left(t,x,y\right)=w_{\varepsilon }\left(t,x,y\right)+f\left(s,z,u\left(s,z\right)\right)\langle y-x,\widetilde{
\gamma }\left(s,z\right)\rangle +\beta _{1}|x-z|^{2}+\left(t-s\right)^{2}.
\end{equation*}
Let $\left(t_{\varepsilon },x_{\varepsilon },y_{\varepsilon }\right)$ be a maximum
point of $\Phi $. From \eqref{testlemma41} and \eqref{testlemma42} we have
\begin{align}
\sigma & -C\varepsilon \leq \Phi \left(s,z,z\right)\leq \Phi \left(t_{\varepsilon
},x_{\varepsilon },y_{\varepsilon }\right)\leq u\left(t_{\varepsilon },x_{\varepsilon
}\right)-v\left(t_{\varepsilon },y_{\varepsilon }\right)-\chi \frac{\left\vert x_{\varepsilon
}-y_{\varepsilon }\right\vert ^{2}}{\varepsilon }
\label{eq:maxet_gor_att_allt_konvergerar} \\
& -f\left(s,z,u\left(s,z\right)\right)\langle y_{\varepsilon }-x_{\varepsilon },\widetilde{\gamma }
\left(s,z\right)\rangle -\beta _{1}|x_{\varepsilon }-z|^{2}-\left(t_{\varepsilon }-s\right)^{2}.
\notag
\end{align}
From this we first see that
\begin{equation*}
|x_{\varepsilon }-y_{\varepsilon }|\rightarrow 0\qquad \text{as}\qquad
\varepsilon \rightarrow 0.
\end{equation*}
Therefore, using the upper semi-continuity of $u-v$ and
\eqref{eq:maxet_gor_att_allt_konvergerar} we also obtain
\begin{align}
\frac{|x_{\varepsilon }-y_{\varepsilon }|^{2}}{\varepsilon }& \rightarrow
0,\qquad x_{\varepsilon },y_{\varepsilon }\rightarrow z,\qquad
t_{\varepsilon }\rightarrow s, \notag \label{as_ep_to_0} \\
u\left(t_{\varepsilon },x_{\varepsilon }\right)& \rightarrow u\left(s,z\right),\qquad
v\left(t_{\varepsilon },y_{\varepsilon }\right)\rightarrow v\left(s,z\right),
\end{align}
as $\varepsilon \rightarrow 0$. In the following we assume $\varepsilon $ to
be so small that $\left(t_{\varepsilon },x_{\varepsilon }\right)\in E$
We introduce the notation
\begin{align*}
\bar{p}& =D_{x}\varphi \left(t_{\varepsilon },x_{\varepsilon },y_{\varepsilon
}\right)=D_{x}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon },y_{\varepsilon
}\right)-f\left(s,z,u\left(s,z\right)\right)\widetilde{\gamma }\left(s,z\right)+2\beta _{1}\left( x_{\varepsilon
}-z\right) , \\
\bar{q}& =D_{y}\varphi \left(t_{\varepsilon },x_{\varepsilon },y_{\varepsilon
}\right)=D_{y}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon },y_{\varepsilon
}\right)+f\left(s,z,u\left(s,z\right)\right)\widetilde{\gamma }\left(s,z\right),
\end{align*}
and observe that
\begin{align}
& \langle \bar{p},\widetilde{\gamma }\left(t_{\varepsilon },x_{\varepsilon
}\right)\rangle +f\left(t_{\varepsilon },x_{\varepsilon },u\left(t_{\varepsilon
},x_{\varepsilon }\right)\right) \notag \\
=& \langle D_{x}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right),\widetilde{\gamma }\left(t_{\varepsilon },x_{\varepsilon
}\right),\rangle +f\left(t_{\varepsilon },x_{\varepsilon },u\left(t_{\varepsilon
},x_{\varepsilon }\right)\right) \notag \\
& -f\left(s,z,u\left(s,z\right)\right)\langle \widetilde{\gamma }\left(s,z\right),\widetilde{\gamma }
\left(t_{\varepsilon },x_{\varepsilon }\right)\rangle +2\beta _{1}\langle
x_{\varepsilon }-z,\widetilde{\gamma }\left(t_{\varepsilon },x_{\varepsilon
}\right)\rangle , \label{peq}
\end{align}
and
\begin{eqnarray}
&&-\langle \bar{q},\widetilde{\gamma }\left(t_{\varepsilon },y_{\varepsilon
}\right)\rangle +f\left(t_{\varepsilon },y_{\varepsilon },v\left(t_{\varepsilon
},x_{\varepsilon }\right)\right) \notag \\
&=&-\langle D_{y}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right),\widetilde{\gamma }\left(t_{\varepsilon },y_{\varepsilon
}\right)\rangle +f\left(t_{\varepsilon },y_{\varepsilon },v\left(t_{\varepsilon
},y_{\varepsilon }\right)\right) \notag \\
&&-f\left(s,z,u\left(s,z\right)\right)\left\langle \widetilde{\gamma }\left(s,z\right) ,\widetilde{
\gamma }\left(t_{\varepsilon },y_{\varepsilon }\right)\right\rangle. \label{qeq}
\end{eqnarray}
| 4,064 | 73,518 |
en
|
train
|
0.156.21
|
We introduce the notation
\begin{align*}
\bar{p}& =D_{x}\varphi \left(t_{\varepsilon },x_{\varepsilon },y_{\varepsilon
}\right)=D_{x}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon },y_{\varepsilon
}\right)-f\left(s,z,u\left(s,z\right)\right)\widetilde{\gamma }\left(s,z\right)+2\beta _{1}\left( x_{\varepsilon
}-z\right) , \\
\bar{q}& =D_{y}\varphi \left(t_{\varepsilon },x_{\varepsilon },y_{\varepsilon
}\right)=D_{y}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon },y_{\varepsilon
}\right)+f\left(s,z,u\left(s,z\right)\right)\widetilde{\gamma }\left(s,z\right),
\end{align*}
and observe that
\begin{align}
& \langle \bar{p},\widetilde{\gamma }\left(t_{\varepsilon },x_{\varepsilon
}\right)\rangle +f\left(t_{\varepsilon },x_{\varepsilon },u\left(t_{\varepsilon
},x_{\varepsilon }\right)\right) \notag \\
=& \langle D_{x}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right),\widetilde{\gamma }\left(t_{\varepsilon },x_{\varepsilon
}\right),\rangle +f\left(t_{\varepsilon },x_{\varepsilon },u\left(t_{\varepsilon
},x_{\varepsilon }\right)\right) \notag \\
& -f\left(s,z,u\left(s,z\right)\right)\langle \widetilde{\gamma }\left(s,z\right),\widetilde{\gamma }
\left(t_{\varepsilon },x_{\varepsilon }\right)\rangle +2\beta _{1}\langle
x_{\varepsilon }-z,\widetilde{\gamma }\left(t_{\varepsilon },x_{\varepsilon
}\right)\rangle , \label{peq}
\end{align}
and
\begin{eqnarray}
&&-\langle \bar{q},\widetilde{\gamma }\left(t_{\varepsilon },y_{\varepsilon
}\right)\rangle +f\left(t_{\varepsilon },y_{\varepsilon },v\left(t_{\varepsilon
},x_{\varepsilon }\right)\right) \notag \\
&=&-\langle D_{y}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right),\widetilde{\gamma }\left(t_{\varepsilon },y_{\varepsilon
}\right)\rangle +f\left(t_{\varepsilon },y_{\varepsilon },v\left(t_{\varepsilon
},y_{\varepsilon }\right)\right) \notag \\
&&-f\left(s,z,u\left(s,z\right)\right)\left\langle \widetilde{\gamma }\left(s,z\right) ,\widetilde{
\gamma }\left(t_{\varepsilon },y_{\varepsilon }\right)\right\rangle. \label{qeq}
\end{eqnarray}
Using \eqref{smooth_gamma}, \eqref{f_kontinuerlig},
\eqref{ass_f_nondecreasing} and \eqref{as_ep_to_0}-\eqref{qeq} we see that
if $\varepsilon $ is small enough, then
\begin{align}
& \langle D_{x}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right),\widetilde{\gamma }\left(t_{\varepsilon },x_{\varepsilon
}\right)\rangle \geq -\frac{\beta _{1}}{2} \notag
\label{boundary_cond_elimination_2} \\
& \implies \langle \bar{p},\widetilde{\gamma }\left(t_{\varepsilon
},x_{\varepsilon }\right)\rangle +f\left(t_{\varepsilon },x_{\varepsilon
},u\left(t_{\varepsilon },x_{\varepsilon }\right)\right)+\beta _{1}>0, \notag \\
& \langle D_{y}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right),\widetilde{\gamma }\left(t_{\varepsilon },y_{\varepsilon
}\right)\rangle \geq -\frac{\beta _{1}}{2} \notag \\
& \implies -\langle \bar{q},\widetilde{\gamma }\left(t_{\varepsilon
},y_{\varepsilon }\right)\rangle +f\left(t_{\varepsilon },y_{\varepsilon
},v\left(t_{\varepsilon },x_{\varepsilon }\right)\right)-\beta _{1}<0.
\end{align}
Moreover, from \eqref{make_use_of_cone}-\eqref{test4}, we also have
\begin{align}
& \langle D_{x}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right),\widetilde{\gamma }\left(t_{\varepsilon },x_{\varepsilon
}\right)\rangle \geq -C\frac{|x_{\varepsilon }-y_{\varepsilon }|^{2}}{\varepsilon }
,\quad \text{if }x_{\varepsilon }\in \partial \Omega _{t_{\varepsilon }},
\notag \label{boundary_cond_elimination_3} \\
& \langle D_{y}w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right),\widetilde{\gamma }\left(t_{\varepsilon },y_{\varepsilon
}\right)\rangle \geq -C\frac{|x_{\varepsilon }-y_{\varepsilon }|^{2}}{\varepsilon }
,\quad \text{if }y_{\varepsilon }\in \partial \Omega _{t_{\varepsilon }}.
\end{align}
Using \eqref{boundary_cond_elimination_2} and
\eqref{boundary_cond_elimination_3}, it follows by the definition of
viscosity solutions that if $\varepsilon $ is small enough, say $
0<\varepsilon <\varepsilon _{\beta _{1}}$, then
\begin{equation}
a+F\left(t_{\varepsilon },x_{\varepsilon },u\left(t_{\varepsilon },x_{\varepsilon }\right),
\bar{p},X\right)\leq 0\leq -b+F\left(t_{\varepsilon },y_{\varepsilon },v\left(t_{\varepsilon
},y_{\varepsilon }\right),-\bar{q},-Y\right), \label{flyttauppskiten}
\end{equation}
whenever
\begin{equation*}
\left( a,\bar{p},X\right) \in \overline{\mathcal{P}}_{\widetilde{\Omega }
}^{2,+}u\left(t_{\varepsilon },x_{\varepsilon }\right)\quad \text{and}\quad \left( -b,-
\bar{q},-Y\right) \in \overline{\mathcal{P}}_{\widetilde{\Omega }
}^{2,-}v\left(t_{\varepsilon },y_{\varepsilon }\right).
\end{equation*}
We next intend to use Lemma \ref{le:timdep_max} to show the existence of
such matrices $X$, $Y$ and numbers $a,b$. Hence, we have to verify condition
\eqref{eq:besvarlig_assumption}. To do so, we observe that
\eqref{boundary_cond_elimination_2} holds true with $\bar{p}$ and $\bar{q}$
replaced by any $p$ and $q$ satisfying $|\bar{p}-p|\leq r$ and $|\bar{q}
-q|\leq r$ if we choose $r=r\left(\varepsilon \right)$ small enough. It follows that
also \eqref{flyttauppskiten} holds with these $p$ and $q$ and we can
conclude
\begin{equation*}
a\leq -F\left(t_{\varepsilon },x_{\varepsilon },u\left(t_{\varepsilon },x_{\varepsilon
}\right),p,X\right)\leq C\quad \text{and}\quad b\leq F\left(t_{\varepsilon },y_{\varepsilon
},v\left(t_{\varepsilon },y_{\varepsilon }\right),-q,-Y\right)\leq C,
\end{equation*}
for some $C=C\left(\varepsilon \right)$ whenever $\left(a,p,X\right)$ and $\left( b,q,Y\right) $
is as in \eqref{eq:besvarlig_assumption}. Hence, condition
\eqref{eq:besvarlig_assumption} holds and Lemma \ref{le:timdep_max} gives
the existence of $X,Y\in \mathbb{S}^{n}$ and $a,b\in \mathbb{R}$ such that
\begin{align}
& -\left( \frac{1}{\varepsilon }+||A||\right) I\leq \left(
\begin{array}{cc}
X & 0 \\
0 & Y
\end{array}
\right) \leq A+\varepsilon A^{2}, \notag \label{eq:result_from_CIL92} \\
& \left(a,\bar{p},X\right)\in \overline{\mathcal{P}}_{\widetilde{\Omega }
}^{2,+}u\left(t_{\varepsilon },x_{\varepsilon }\right),\quad \left(-b,-\bar{q},-Y\right)\in
\overline{\mathcal{P}}_{\widetilde{\Omega }}^{2,-}v\left(t_{\varepsilon
},y_{\varepsilon }\right), \notag \\
& a+b=D_{t}\varphi \left( t_{\varepsilon },x_{\varepsilon },y_{\varepsilon
}\right) =D_{t}w_{\varepsilon }\left( t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right) +2\left(t_{\varepsilon }-s\right),
\end{align}
where $A=D_{x,y}^{2}\left( w_{\varepsilon }\left(t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right)+\beta _{1}|x_{\varepsilon }-z|^{2}\right) $. Using
\eqref{ass_F_nondecreasing}, \eqref{testlemma45} and \eqref{flyttauppskiten}
we obtain, by recalling that we can assume $\lambda >0$ in
\eqref{ass_F_nondecreasing}, that
\begin{eqnarray*}
0 &\geq &D_{t}w_{\varepsilon }\left( t_{\varepsilon },x_{\varepsilon
},y_{\varepsilon }\right) +2\left(t_{\varepsilon }-s\right) \\
&&+F\left(t_{\varepsilon },x_{\varepsilon },u\left(t_{\varepsilon },x_{\varepsilon }\right),
\bar{p},X\right)-F\left(t_{\varepsilon },y_{\varepsilon },v\left(t_{\varepsilon
},y_{\varepsilon }\right),-\bar{q},-Y\right) \\
&\geq &-C\frac{|x_{\varepsilon }-y_{\varepsilon }|^{2}}{\varepsilon }
+2\left(t_{\varepsilon }-s\right)+\lambda \left(u\left(t_{\varepsilon },x_{\varepsilon
}\right)-v\left(t_{\varepsilon },y_{\varepsilon }\right)\right) \\
&&+F\left(t_{\varepsilon },x_{\varepsilon },u\left(t_{\varepsilon },x_{\varepsilon }\right),
\bar{p},X\right)-F\left(t_{\varepsilon },y_{\varepsilon },u\left(t_{\varepsilon
},x_{\varepsilon }\right),-\bar{q},-Y\right).
\end{eqnarray*}
Next, assumption \eqref{ass_F_boundary} gives
\begin{eqnarray}
0 &\geq &-C\bar{s}+2\left(t_{\varepsilon }-s\right)+\lambda \left(u\left(t_{\varepsilon
},x_{\varepsilon }\right)-v\left(t_{\varepsilon },y_{\varepsilon }\right)\right) \notag \\
&&+F\left(t_{\varepsilon },x_{\varepsilon },u\left(t_{\varepsilon },x_{\varepsilon }\right),-
\bar{q},X-C\bar{s}I\right)-F\left(t_{\varepsilon },y_{\varepsilon },u\left(t_{\varepsilon
},x_{\varepsilon }\right),-\bar{q},-Y+C\bar{s}I\right) \notag \\
&&-m_{2}\left(|\bar{p}+\bar{q}|+C\bar{s}\right)-m_{2}\left(C\bar{s}\right),
\label{sista_med_extrasteg}
\end{eqnarray}
where we use the notation $\bar{s}=|x_{\varepsilon }-y_{\varepsilon
}|^{2}/\varepsilon $. Note that since the eigenvalues of $\varepsilon A^{2}$
are given by $\varepsilon \lambda ^{2}$, where $\lambda $ is an eigenvalue
to $A$, and since $\lambda $ is bounded, $A+\varepsilon A^{2}\leq CA$.
Hence, by \eqref{testlemma47} we obtain
\begin{equation*}
A+\varepsilon A^{2}\leq \frac{C}{\varepsilon }\left(
\begin{array}{cc}
I & -I \\
-I & I
\end{array}
\right) +C\bar{s}I_{2n},
\end{equation*}
and since $||A||\leq C/\varepsilon $ for some large $C$, we also conclude
that \eqref{eq:result_from_CIL92} implies
\begin{equation*}
-\frac{C}{\varepsilon }I_{2n}\leq \left(
\begin{array}{cc}
X-C\bar{s}I & 0 \\
0 & Y-C\bar{s}I
\end{array}
\right) \leq \frac{C}{\varepsilon }\left(
\begin{array}{cc}
I & -I \\
-I & I
\end{array}
\right) .
\end{equation*}
Using the above inequality, assumption \eqref{ass_F_XY},
\eqref{sista_med_extrasteg}, the definition of $\bar{q}$ and
\eqref{testlemma46} we have
\begin{eqnarray*}
0 &\geq &-C\bar{s}+2\left(t_{\varepsilon }-s\right)+\lambda \left(u\left(t_{\varepsilon
},x_{\varepsilon }\right)-v\left(t_{\varepsilon },y_{\varepsilon }\right)\right) \\
&&-m_{1}\left(C|x_{\varepsilon }-y_{\varepsilon }|+2C\bar{s}\right)-m_{2}\left(|\bar{p}+\bar{
q}|+C\bar{s}\right)-m_{2}\left(C\bar{s}\right),
\end{eqnarray*}
when $0<\varepsilon <\varepsilon _{\beta _{1}}$ and $u\left(t_{\varepsilon
},x_{\varepsilon }\right)\geq v\left(t_{\varepsilon },y_{\varepsilon }\right)$. Sending first
$\varepsilon $ and then $\beta _{2}$ to zero (the latter implies $\beta
_{1}\rightarrow 0$) and using \eqref{testlemma46} we obtain a contradiction.
This completes the proof of the comparison principle in Theorem \ref
{comparison}.
$\Box $
Using the same methodology as in the proof of Theorem \ref{comparison}, we
are now able to prove the comparison principle for mixed boundary conditions
stated in Corollary \ref{maxrand_partial}. This result will be an important
ingredient in the proof of Theorem \ref{existence}.
\noindent \textbf{Proof of Corollary \ref{maxrand_partial}.} If $u$ is a
viscosity subsolution, then so is $u-K$ for all $K>0$. It thus suffices to
prove that if $u\leq v$ on $\left(\partial \Omega \setminus G\right)\cup \overline{
\Omega }_{0}$, then $u\leq v$ in $\widetilde{\Omega }$. If $G=\partial
\Omega $, then this implication and its proof is identical to Theorem \ref
{comparison}. If $G\subset \partial \Omega $ is arbitrary, then we know by
assumption that $u\leq v$ on $\partial \Omega \setminus G$ and so the point $
\left(s,z\right)$ defined in \eqref{sigma} must belong to the set $G$ where the
boundary condition is satisfied. Hence, we can follow the proof of Theorem
\ref{comparison} and conclude that $u\leq v$ in $\widetilde{\Omega }$. $
\Box $
\noindent \textbf{Proof of Theorem \ref{existence}. }We will prove existence
using Perron's method. In particular, we show that the supremum of all
subsolutions to the initial value problem given by
\eqref{initial_value_problem} is indeed a solution to the same problem. To
ensure that the supremum is taken over a nonempty set, we need to find at
least one subsolution to the problem. We also need to know that the supremum
is finite. This is obtained by producing a supersolution, which, due to the
comparison principle, provides an upper bound for the supremum.
| 4,002 | 73,518 |
en
|
train
|
0.156.22
|
Using the same methodology as in the proof of Theorem \ref{comparison}, we
are now able to prove the comparison principle for mixed boundary conditions
stated in Corollary \ref{maxrand_partial}. This result will be an important
ingredient in the proof of Theorem \ref{existence}.
\noindent \textbf{Proof of Corollary \ref{maxrand_partial}.} If $u$ is a
viscosity subsolution, then so is $u-K$ for all $K>0$. It thus suffices to
prove that if $u\leq v$ on $\left(\partial \Omega \setminus G\right)\cup \overline{
\Omega }_{0}$, then $u\leq v$ in $\widetilde{\Omega }$. If $G=\partial
\Omega $, then this implication and its proof is identical to Theorem \ref
{comparison}. If $G\subset \partial \Omega $ is arbitrary, then we know by
assumption that $u\leq v$ on $\partial \Omega \setminus G$ and so the point $
\left(s,z\right)$ defined in \eqref{sigma} must belong to the set $G$ where the
boundary condition is satisfied. Hence, we can follow the proof of Theorem
\ref{comparison} and conclude that $u\leq v$ in $\widetilde{\Omega }$. $
\Box $
\noindent \textbf{Proof of Theorem \ref{existence}. }We will prove existence
using Perron's method. In particular, we show that the supremum of all
subsolutions to the initial value problem given by
\eqref{initial_value_problem} is indeed a solution to the same problem. To
ensure that the supremum is taken over a nonempty set, we need to find at
least one subsolution to the problem. We also need to know that the supremum
is finite. This is obtained by producing a supersolution, which, due to the
comparison principle, provides an upper bound for the supremum.
To find the supersolution, let, for some constants $A$ and $B$ to be chosen
later,
\begin{equation*}
\widehat{v}=A\alpha \left(t,x\right)+B,\quad \text{for}\;\left(t,x\right) \in \widetilde{\Omega},
\end{equation*}
where $\alpha \left(t,x\right)$ is the function guaranteed by Lemma \ref{testlemma5}.
By \eqref{f_kontinuerlig}, \eqref{ass_f_nondecreasing} and the boundedness
of $\Omega ^{\circ }$, we can find $A>0$ such that
\begin{equation*}
\langle D\widehat{v}\left(t,x\right),\widetilde{\gamma }\left(t,x\right)\rangle +f\left(t,x,\widehat{v}
\left(t,x\right)\right)\geq A+f\left(t,x,0\right)\geq 0,
\end{equation*}
for $\left(t,x\right)\in \partial \Omega $. Moreover, since the support of $\alpha $
lies in $U$, we have, with $\lambda $ and $m_{2}$ defined in
\eqref{ass_F_nondecreasing} and \eqref{ass_F_boundary},
\begin{eqnarray*}
&&D_{t}\widehat{v}\left(t,x\right)+F\left(t,x,\widehat{v}\left(t,x\right),D\widehat{v}\left(t,x\right),D^{2}
\widehat{v}\left(t,x\right)\right) \\
&\geq &-A\sup_{U}\{|D_{t}\alpha \left(t,x\right)|\}+B\lambda +F\left(t,x,0,0,0\right) \\
&&-\sup_{U}m_{2}\left( A\left( |D\alpha \left(t,x\right)|+||D^{2}\alpha \left(t,x\right)||\right)
\right) .
\end{eqnarray*}
By \eqref{ass_F_cont}, the boundedness of $\Omega ^{\circ }$ and by
recalling that we can assume $\lambda >0$, we see that taking $B$ large
enough, $\widehat{v}$ is a classical supersolution of
\eqref{initial_value_problem}. Hence, using \eqref{F_fundamental} and
Proposition 7.2 in \cite{CrandallIshiiLions1992}, $\widehat{v}$ is also a
viscosity supersolution. Next, we observe that $\check{u}=-\widehat{v}$ is a
viscosity subsolution $\check{u}$ to the problem given by
\eqref{initial_value_problem}.
We now apply Perron's method by defining our solution candidate as
\begin{equation*}
\widetilde{w}:=\sup \{w\left(x\right):\text{$w\in USC(\widetilde{\Omega })$ is a
viscosity subsolution of \eqref{initial_value_problem}}\}.
\end{equation*}
In the following we let $u^{\ast }$ and $u_{\ast }$ denote the upper and
lower semicontinuous envelopes of a function $u$, respectively. By the
comparison principle and by construction we obtain
\begin{equation}
\check{u}_{\ast }\leq \widetilde{w}_{\ast }\leq \widetilde{w}^{\ast }\leq
\widehat{v}^{\ast }\quad \text{on}\;\widetilde{\Omega }.
\label{eq:by_construction}
\end{equation}
Let us assume for the moment that $\widetilde{w}^{\ast }$ satisfies the
initial condition of being a subsolution and that $\widetilde{w}_{\ast }$
satisfies the initial condition of being a supersolution, that is
\begin{equation}
\widetilde{w}^{\ast }\left(0,x\right)\leq g\left(x\right)\leq \widetilde{w}_{\ast }\left(0,x\right),\quad
\text{for all }x\in \overline{\Omega }_{0}. \label{eq:initial_assumed}
\end{equation}
We can then proceed as in \cite{CrandallIshiiLions1992} (see also \cite
{Barles1993} and \cite{Ishii1987}) to show that $\widetilde{w}^{\ast }$ is a
viscosity subsolution and $\widetilde{w}_{\ast }$ is a viscosity
supersolution of the initial value problem in \eqref{initial_value_problem}.
Using the comparison principle again, we then have $\widetilde{w}_{\ast
}\geq \widetilde{w}^{\ast }$ and so by \eqref{eq:by_construction} $
\widetilde{w}_{\ast }=\widetilde{w}^{\ast }$ is the requested viscosity
solution. To complete the proof of Theorem \ref{existence}, it hence
suffices to prove \eqref{eq:initial_assumed}. This will be achieved by
constructing families of explicit viscosity sub- and supersolutions.
We first show that the subsolution candidate $\widetilde{w}^{\ast }$
satisfies the initial conditions for all $x\in \Omega _{0}$. To this end, we
define, for arbitrary $z\in \Omega _{0}$ and $\varepsilon >0$, the barrier
function
\begin{equation*}
V_{z,\varepsilon }\left(t,x\right)=g\left(z\right)+\varepsilon +B|x-z|^{2}+Ct,\quad \text{for}
\;\left(t,x\right)\in \left[ 0,T\right] \times \mathbb{R}^{n},
\end{equation*}
where $B$ and $C$ are constants, which may depend on $z$ and $\varepsilon $,
to be chosen later. We first observe that, by continuity of $g$ and
boundedness of $\Omega _{0}$, we can, for any $\varepsilon >0$, choose $B$
so large that $V_{z,\varepsilon }\left(0,x\right)\geq g\left(x\right)$, for all $x\in \overline{
\Omega }_{0}$. Moreover, since $\widetilde{w}$ is bounded on $\overline{
\Omega }$, we conclude, by increasing $B$ and $C$ if necessary, that we also
have
\begin{equation*}
V_{z,\varepsilon }\left(t,x\right)\geq \widetilde{w}\left(t,x\right),\quad \text{for}\;\left(t,x\right)\in
\partial \Omega \cup \overline{\Omega }_{0}.
\end{equation*}
A computation shows that, for $z$, $\varepsilon $, $B$ given, we can choose
the constant $C$ so large that $V_{z,\epsilon }$ is a classical
supersolution of \eqref{huvudekvationen} in $[0,\infty )\times \mathbb{R}
^{n} $. Hence, by \eqref{F_fundamental}, $V_{z,\epsilon }$ is also a
continuous viscosity supersolution of \eqref{huvudekvationen} in $\Omega
^{\circ }$. By the maximum principle in Lemma \ref{maxrand} applied to $
V_{z,\varepsilon }$ and each component in the definition of $\widetilde{w}$,
we obtain
\begin{equation}
V_{z,\varepsilon }\left(t,x\right)\geq \widetilde{w}\left(t,x\right),\quad \text{for}\;\left(t,x\right)\in
\widetilde{\Omega }. \label{eq:first_barrier_downpush_inside}
\end{equation}
It follows that $\widetilde{w}^{\ast }\leq V_{z,\varepsilon }^{\ast
}=V_{z,\varepsilon }$ in this set and hence the initial condition in $\Omega
_{0}$ follows since for any $x\in \Omega _{0}$
\begin{equation}
\widetilde{w}^{\ast }\left(0,x\right)\leq \inf_{\varepsilon ,z}V_{z,\varepsilon
}\left(0,x\right)=g\left(x\right). \label{eq:first_barrier_downpush_inside_final}
\end{equation}
To prove that the supersolution candidate $\widetilde{w}_{\ast }$ satisfies
the initial condition in $\Omega _{0}$, we proceed similarly by studying a
family of subsolutions of the form
\begin{equation*}
U_{z,\epsilon }\left(t,x\right)=g\left(z\right)-B|x-z|^{2}-\varepsilon -Ct.
\end{equation*}
We next prove that $\widetilde{w}^{\ast }$ satisfies the boundary conditions
for each $x\in \partial \Omega _{0}$. In this case the barriers above will
not work as we cannot ensure that they exceed $\widetilde{w}^{\ast }$ on $
\partial \Omega $. Instead, we will construct barriers that are sub- and
supersolutions only locally, near the boundary, during a short time
interval. These local barriers are useful due to the maximum principle for
mixed boundary conditions proved in Corollary \ref{maxrand_partial}. To
construct the local barriers, fix $\widehat{z}\in \partial \Omega _{0}$ and
let $z\left(t\right)$ be the H\"{o}lder continuous function
\begin{equation*}
z\left(t\right)=\widehat{z}-K\widetilde{\gamma }\left(0,\widehat{z}\right)t^{\widehat{\alpha }},
\end{equation*}
where $\widehat{\alpha }$ is the H\"{o}lder exponent from \eqref{tempholder}
and $K$ is a constant depending on the H\"{o}lder constant and the shape of
the exterior cones in \eqref{boundarylip}. It follows that $z\left(t\right)$ stays
inside of $\Omega $ for a short time and that $z\left(0\right)=\widehat{z}$. Consider,
for $\varepsilon >0$, the barrier function
\begin{equation*}
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)=g\left(\widehat{z}\right)+A\left( \alpha
\left(t,x\right)-\alpha \left(0,\widehat{z}\right)\right) +e^{\left(\widehat{C}/\chi \right)\alpha
\left(t,x\right)}w_{\varepsilon }\left(t,x,z\left(t\right)\right)+B+Ct^{\widehat{\alpha }},
\end{equation*}
whenever $\left(t,x\right)\in \lbrack 0,T]\times \mathbb{R}^{n}$, where $\widehat{C}$
and $\chi $ are the constants from Lemma \ref{testlemma4} and $A,B$ and $C$
are constants to be chosen later, possibly depending on $\widehat{z}$ and $
\varepsilon $. We first show that for any choice of $A$, we can find $B$
such that
\begin{equation}
g\left(x\right)\leq \widetilde{V}_{\varepsilon ,\widehat{z}}\left(0,x\right),\quad \text{for all }
x\in \overline{\Omega }_{0}\text{\quad and\quad }\inf_{\varepsilon }
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(0,\widehat{z}\right)=g\left(\widehat{z}\right).
\label{eq:sup_barrier_above_g}
\end{equation}
Indeed, to prove the left inequality in \eqref{eq:sup_barrier_above_g},
observe that by \eqref{testlemma41} we have $\chi \left\vert x-\widehat{z}
\right\vert ^{2}/\varepsilon \leq w_{\varepsilon }\left( 0,x,\widehat{z}
\right) $. Moreover, by the continuity of $g\left( \cdot \right) -A\alpha
\left( 0,\cdot \right) $ in $\overline{\Omega }_{0}$, we can find $B$,
depending on $\varepsilon $ and $A$, so that
\begin{equation*}
g\left(x\right)-g\left(\widehat{z}\right)-A\left( \alpha \left(0,x\right)-\alpha \left(0,\widehat{z}\right)\right) \leq
B+\chi \frac{\left\vert x-\widehat{z}\right\vert ^{2}}{\varepsilon }.
\end{equation*}
This proves the left inequality in \eqref{eq:sup_barrier_above_g}. Finally,
it is no restriction to assume that $B\rightarrow 0$ as $\varepsilon
\rightarrow 0$, and this implies the right inequality in
\eqref{eq:sup_barrier_above_g}.
| 3,508 | 73,518 |
en
|
train
|
0.156.23
|
We next prove that $\widetilde{w}^{\ast }$ satisfies the boundary conditions
for each $x\in \partial \Omega _{0}$. In this case the barriers above will
not work as we cannot ensure that they exceed $\widetilde{w}^{\ast }$ on $
\partial \Omega $. Instead, we will construct barriers that are sub- and
supersolutions only locally, near the boundary, during a short time
interval. These local barriers are useful due to the maximum principle for
mixed boundary conditions proved in Corollary \ref{maxrand_partial}. To
construct the local barriers, fix $\widehat{z}\in \partial \Omega _{0}$ and
let $z\left(t\right)$ be the H\"{o}lder continuous function
\begin{equation*}
z\left(t\right)=\widehat{z}-K\widetilde{\gamma }\left(0,\widehat{z}\right)t^{\widehat{\alpha }},
\end{equation*}
where $\widehat{\alpha }$ is the H\"{o}lder exponent from \eqref{tempholder}
and $K$ is a constant depending on the H\"{o}lder constant and the shape of
the exterior cones in \eqref{boundarylip}. It follows that $z\left(t\right)$ stays
inside of $\Omega $ for a short time and that $z\left(0\right)=\widehat{z}$. Consider,
for $\varepsilon >0$, the barrier function
\begin{equation*}
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)=g\left(\widehat{z}\right)+A\left( \alpha
\left(t,x\right)-\alpha \left(0,\widehat{z}\right)\right) +e^{\left(\widehat{C}/\chi \right)\alpha
\left(t,x\right)}w_{\varepsilon }\left(t,x,z\left(t\right)\right)+B+Ct^{\widehat{\alpha }},
\end{equation*}
whenever $\left(t,x\right)\in \lbrack 0,T]\times \mathbb{R}^{n}$, where $\widehat{C}$
and $\chi $ are the constants from Lemma \ref{testlemma4} and $A,B$ and $C$
are constants to be chosen later, possibly depending on $\widehat{z}$ and $
\varepsilon $. We first show that for any choice of $A$, we can find $B$
such that
\begin{equation}
g\left(x\right)\leq \widetilde{V}_{\varepsilon ,\widehat{z}}\left(0,x\right),\quad \text{for all }
x\in \overline{\Omega }_{0}\text{\quad and\quad }\inf_{\varepsilon }
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(0,\widehat{z}\right)=g\left(\widehat{z}\right).
\label{eq:sup_barrier_above_g}
\end{equation}
Indeed, to prove the left inequality in \eqref{eq:sup_barrier_above_g},
observe that by \eqref{testlemma41} we have $\chi \left\vert x-\widehat{z}
\right\vert ^{2}/\varepsilon \leq w_{\varepsilon }\left( 0,x,\widehat{z}
\right) $. Moreover, by the continuity of $g\left( \cdot \right) -A\alpha
\left( 0,\cdot \right) $ in $\overline{\Omega }_{0}$, we can find $B$,
depending on $\varepsilon $ and $A$, so that
\begin{equation*}
g\left(x\right)-g\left(\widehat{z}\right)-A\left( \alpha \left(0,x\right)-\alpha \left(0,\widehat{z}\right)\right) \leq
B+\chi \frac{\left\vert x-\widehat{z}\right\vert ^{2}}{\varepsilon }.
\end{equation*}
This proves the left inequality in \eqref{eq:sup_barrier_above_g}. Finally,
it is no restriction to assume that $B\rightarrow 0$ as $\varepsilon
\rightarrow 0$, and this implies the right inequality in
\eqref{eq:sup_barrier_above_g}.
We next show that $\widetilde{V}_{\varepsilon ,\widehat{z}}$ satisfies the
boundary condition in a small neighborhood of $\widehat{z}$ in ${\partial
\Omega }$. To do so, let $E_{\widehat{z}}=\left(0,\kappa \right)\times B\left(\widehat{z}
,\rho \right)$ for some $\kappa ,\rho >0$ to be chosen. We intend to find $\kappa $
, $\rho $, $A$ and $C$ such that
\begin{equation}
\langle D_{x}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right),\widetilde{\gamma
}\left(t,x\right)\rangle +f\left(t,x,\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)\right)\geq
0,\quad \text{for}\;\left(t,x\right)\in E_{\widehat{z}}\cap \partial \Omega .
\label{eq:sup_barrier_RV}
\end{equation}
First, observe that $\alpha $ is differentiable in time on $\overline{\Omega
}$. Therefore, by taking $C$ large enough and by using
\eqref{eq:sup_barrier_above_g} we ensure that
\begin{equation*}
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)\geq g\left(\widehat{z}\right),\quad \text{
for}\;\left(t,x\right)\in \overline{\Omega }.
\end{equation*}
In general, the choice of $C$ will depend on $A$, but it is evident from the
next inequality that this will not give rise to circular reasoning. By
\eqref{ass_f_nondecreasing} and the boundedness of $\overline{\Omega }$, we
can choose $A$ so that
\begin{equation*}
f\left(t,x,\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)\right)\geq f\left(t,x,g\left(\widehat{z}
\right)\right)\geq -A,\quad \text{for}\;\left(t,x\right)\in \overline{\Omega }.
\end{equation*}
Thus, the boundary condition in \eqref{eq:sup_barrier_RV} will follow if we
can prove
\begin{equation}
\langle D_{x}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right),\widetilde{\gamma
}\left(t,x\right)\rangle \geq A,\quad \text{for}\;\left(t,x\right)\in E_{\widehat{z}}\cap \partial
\Omega . \label{eq:RV_andra_halvan}
\end{equation}
To this end, choose $\rho $ and $\kappa $ so small that
\begin{equation}
\left\langle x-z\left(t\right),\widetilde{\gamma }\left( t,x\right) \right\rangle \geq
-\theta \left\vert x-z\left(t\right)\right\vert \quad \text{whenever}\;x\in B\left(
\widehat{z},\rho \right) \cap \partial \Omega _{t},\;t\in \left[ 0,\kappa
\right] .
\end{equation}
Inequality \eqref{test3} then holds with $y=z\left( t\right) $ for all $
\left(t,x\right)\in E_{\widehat{z}}\cap \partial \Omega $. Together with the properties
of $\alpha $, this gives
\begin{eqnarray*}
&&\langle D_{x}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right),\widetilde{
\gamma }\left(t,x\right)\rangle \\
&=&A\langle D_{x}\alpha \left(t,x\right),\widetilde{\gamma }\left(t,x\right)\rangle +e^{\left(\widehat{C
}/\chi \right)\alpha \left(t,x\right)} \\
&&\cdot \left\langle D_{x}w_{\varepsilon }\left( t,x,z\left(t\right)\right)
+w_{\varepsilon }\left(t,x,z\left(t\right)\right)\frac{\widehat{C}}{\chi }D_{x}\alpha \left(t,x\right),
\widetilde{\gamma }\left(t,x\right)\right\rangle \\
&\geq &A-\widehat{C}\frac{|x-z\left(t\right)|^{2}}{\varepsilon }+\chi \frac{|x-z\left(t\right)|^{2}
}{\varepsilon }\frac{\widehat{C}}{\chi }=A,\quad \text{for}\;\left(t,x\right)\in E_{
\widehat{z}}\cap \partial \Omega .
\end{eqnarray*}
This proves \eqref{eq:RV_andra_halvan} and hence the boundary condition
\eqref{eq:sup_barrier_RV} follows.
We now show that for $C$ large enough, $\widetilde{V}_{\varepsilon ,\widehat{
z}}$ is a supersolution to \eqref{huvudekvationen}, that is
\begin{equation}
D_{t}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)+F\left(t,x,\widetilde{V}
_{\varepsilon ,\widehat{z}}\left(t,x\right),D_{x}\widetilde{V}_{\varepsilon ,\widehat{z}
}\left(t,x\right),D_{x}^{2}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)\right)\geq 0,\quad
\text{for }\left(t,x\right)\in \Omega ^{0}.
\label{eq:ekvationen_f�r_den_sista_barri�ren}
\end{equation}
With $D_{s}$ and $D_{\eta }$ denoting differentiation with respect to the
first and third arguments of $w_{\varepsilon }$, respectively, we have
\begin{eqnarray}
D_{t}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right) &=&AD_{t}\alpha \left(t,x\right)+e^{\left(
\widehat{C}/\chi \right)\alpha \left(t,x\right)}\frac{\widehat{C}}{\chi }D_{t}\alpha \left(
t,x\right) w_{\varepsilon }\left( t,x,z\left(t\right)\right) +e^{\left(\widehat{C}/\chi
\right)\alpha \left(t,x\right)} \notag \\
&&\cdot \left( D_{s}w_{\varepsilon }\left( t,x,z\left(t\right)\right) -2K\widehat{
\alpha }\left\langle D_{\eta }w_{\varepsilon }\left(t,x,z\left(t\right)\right),\widetilde{\gamma }
\left(0,\widehat{z}\right)\right\rangle t^{\widehat{\alpha }-1}\right) \notag \\
&&+C\widehat{\alpha }t^{\widehat{\alpha }-1}. \label{eq:timederiv}
\end{eqnarray}
Moreover, by \eqref{ass_F_nondecreasing} with $\lambda =0$ and by
\eqref{ass_F_boundary} we have
\begin{eqnarray}
&&F\left(t,x,\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right),D_{x}\widetilde{V}
_{\varepsilon ,\widehat{z}}\left(t,x\right),D_{x}^{2}\widetilde{V}_{\varepsilon ,
\widehat{z}}\left(t,x\right)\right) \notag \\
&\geq &F\left( t,x,g\left(\widehat{z}\right),0,0\right) -\sup_{\Omega }m_{2}\left(
|D_{x}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)|+||D_{x}^{2}\widetilde{V}
_{\varepsilon ,\widehat{z}}\left(t,x\right)||\right) . \label{spacederiv}
\end{eqnarray}
By \eqref{testlemma45}-\eqref{testlemma47}, \eqref{eq:timederiv} and
\eqref{spacederiv}, we can find $C$ so that
\eqref{eq:ekvationen_f�r_den_sista_barri�ren} is satisfied. Hence, using
\eqref{F_fundamental} and Proposition 7.2. in \cite{CrandallIshiiLions1992},
$\widetilde{V}_{\varepsilon ,z}$ is a viscosity supersolution in $\Omega $
which satisfies the boundary condition \eqref{randvillkor} on $E_{\widehat{z}
}\cap \partial \Omega $ in the viscosity sense.
We now perform the localized comparison. From the construction of $
\widetilde{w}$, it is clear that $\widetilde{w}\left(0,x\right)\leq g\left( x\right) $,
for all $x\in \overline{\Omega }_{0}$. Combined with the left inequality in
\eqref{eq:sup_barrier_above_g}, this yields
\begin{equation}
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(0,x\right)\geq \widetilde{w}\left(0,x\right),\quad
\text{for }x\in \overline{\Omega }_{0}\text{. } \label{eq:bottom_comparison}
\end{equation}
Moreover, for some constant $K$ depending on $g$, $\alpha $, $\widehat{z}$, $
A$, $\kappa $ and $\rho $, we have
\begin{equation*}
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)\geq -K+\chi \frac{\left\vert
x-z\left( t\right) \right\vert ^{2}}{\varepsilon }+B,\quad \text{for}
\;\left( t,x\right) \in \left( \partial E_{\widehat{z}}\setminus \partial
\Omega \right) \cap \left( \left[ 0,\kappa \right) \times \mathbb{R}
^{n}\right) .
\end{equation*}
Since $\widetilde{w}$ is bounded, we can conclude, by increasing $B$ if
necessary, that
\begin{equation}
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)\geq \widetilde{w}\left(t,x\right),\quad
\text{for}\;\left(t,x\right)\in \left(\partial E_{\widehat{z}}\setminus \partial \Omega
\right)\cap \left( \lbrack 0,\kappa )\times
\mathbb{R}
| 3,565 | 73,518 |
en
|
train
|
0.156.24
|
We now show that for $C$ large enough, $\widetilde{V}_{\varepsilon ,\widehat{
z}}$ is a supersolution to \eqref{huvudekvationen}, that is
\begin{equation}
D_{t}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)+F\left(t,x,\widetilde{V}
_{\varepsilon ,\widehat{z}}\left(t,x\right),D_{x}\widetilde{V}_{\varepsilon ,\widehat{z}
}\left(t,x\right),D_{x}^{2}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)\right)\geq 0,\quad
\text{for }\left(t,x\right)\in \Omega ^{0}.
\label{eq:ekvationen_f�r_den_sista_barri�ren}
\end{equation}
With $D_{s}$ and $D_{\eta }$ denoting differentiation with respect to the
first and third arguments of $w_{\varepsilon }$, respectively, we have
\begin{eqnarray}
D_{t}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right) &=&AD_{t}\alpha \left(t,x\right)+e^{\left(
\widehat{C}/\chi \right)\alpha \left(t,x\right)}\frac{\widehat{C}}{\chi }D_{t}\alpha \left(
t,x\right) w_{\varepsilon }\left( t,x,z\left(t\right)\right) +e^{\left(\widehat{C}/\chi
\right)\alpha \left(t,x\right)} \notag \\
&&\cdot \left( D_{s}w_{\varepsilon }\left( t,x,z\left(t\right)\right) -2K\widehat{
\alpha }\left\langle D_{\eta }w_{\varepsilon }\left(t,x,z\left(t\right)\right),\widetilde{\gamma }
\left(0,\widehat{z}\right)\right\rangle t^{\widehat{\alpha }-1}\right) \notag \\
&&+C\widehat{\alpha }t^{\widehat{\alpha }-1}. \label{eq:timederiv}
\end{eqnarray}
Moreover, by \eqref{ass_F_nondecreasing} with $\lambda =0$ and by
\eqref{ass_F_boundary} we have
\begin{eqnarray}
&&F\left(t,x,\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right),D_{x}\widetilde{V}
_{\varepsilon ,\widehat{z}}\left(t,x\right),D_{x}^{2}\widetilde{V}_{\varepsilon ,
\widehat{z}}\left(t,x\right)\right) \notag \\
&\geq &F\left( t,x,g\left(\widehat{z}\right),0,0\right) -\sup_{\Omega }m_{2}\left(
|D_{x}\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)|+||D_{x}^{2}\widetilde{V}
_{\varepsilon ,\widehat{z}}\left(t,x\right)||\right) . \label{spacederiv}
\end{eqnarray}
By \eqref{testlemma45}-\eqref{testlemma47}, \eqref{eq:timederiv} and
\eqref{spacederiv}, we can find $C$ so that
\eqref{eq:ekvationen_f�r_den_sista_barri�ren} is satisfied. Hence, using
\eqref{F_fundamental} and Proposition 7.2. in \cite{CrandallIshiiLions1992},
$\widetilde{V}_{\varepsilon ,z}$ is a viscosity supersolution in $\Omega $
which satisfies the boundary condition \eqref{randvillkor} on $E_{\widehat{z}
}\cap \partial \Omega $ in the viscosity sense.
We now perform the localized comparison. From the construction of $
\widetilde{w}$, it is clear that $\widetilde{w}\left(0,x\right)\leq g\left( x\right) $,
for all $x\in \overline{\Omega }_{0}$. Combined with the left inequality in
\eqref{eq:sup_barrier_above_g}, this yields
\begin{equation}
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(0,x\right)\geq \widetilde{w}\left(0,x\right),\quad
\text{for }x\in \overline{\Omega }_{0}\text{. } \label{eq:bottom_comparison}
\end{equation}
Moreover, for some constant $K$ depending on $g$, $\alpha $, $\widehat{z}$, $
A$, $\kappa $ and $\rho $, we have
\begin{equation*}
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)\geq -K+\chi \frac{\left\vert
x-z\left( t\right) \right\vert ^{2}}{\varepsilon }+B,\quad \text{for}
\;\left( t,x\right) \in \left( \partial E_{\widehat{z}}\setminus \partial
\Omega \right) \cap \left( \left[ 0,\kappa \right) \times \mathbb{R}
^{n}\right) .
\end{equation*}
Since $\widetilde{w}$ is bounded, we can conclude, by increasing $B$ if
necessary, that
\begin{equation}
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)\geq \widetilde{w}\left(t,x\right),\quad
\text{for}\;\left(t,x\right)\in \left(\partial E_{\widehat{z}}\setminus \partial \Omega
\right)\cap \left( \lbrack 0,\kappa )\times
\mathbb{R}
^{n}\right) . \label{supbarrierdominaterest}
\end{equation}
Now, let $\kappa $ be so small that for some $\widetilde{\varepsilon }>0$,
it holds that
\begin{equation}
\left\vert x-z\left( t\right) \right\vert >\widetilde{\varepsilon }>0\quad
\text{whenever}\;\left( t,x\right) \in \left( \partial E_{\widehat{z}
}\setminus \partial \Omega \right) \cap \left( \left[ 0,\kappa \right)
\times \mathbb{R}^{n}\right) . \label{eq:extra_sista_z(t)_egenskap}
\end{equation}
This choice is possible by the definition of $z\left(t\right)$ and by the properties of
the domain. Inequality \eqref{eq:extra_sista_z(t)_egenskap} implies that it
is no restriction to assume that $B\rightarrow 0$ as $\varepsilon
\rightarrow 0$, which is necessary. By means of \eqref{eq:sup_barrier_RV},
\eqref{eq:bottom_comparison} and \eqref{supbarrierdominaterest}, we can use
Corollary \ref{maxrand_partial} to make comparison in $E_{\widehat{z}}\cap
\overline{\Omega }$ of the supersolution $\widetilde{V}_{\varepsilon ,
\widehat{z}}$ with each subsolution in the definition of $\widetilde{w}$.
Hence
\begin{equation*}
\widetilde{V}_{\varepsilon ,\widehat{z}}\left(t,x\right)\geq \widetilde{w}\left(t,x\right),\quad
\text{for}\;\left(t,x\right)\in \overline{E}_{\widehat{z}}\cap \overline{\Omega },
\end{equation*}
and, as a consequence, $\widetilde{V}_{\varepsilon ,\widehat{z}}=\widetilde{V
}_{\varepsilon ,\widehat{z}}^{\ast }\geq \widetilde{w}^{\ast }$ in $
\overline{E}_{\widehat{z}}\cap \overline{\Omega }$. Thus, for any $x\in
\partial \Omega _{0}$,
\begin{equation*}
\widetilde{w}^{\ast }\left(0,x\right)\leq \inf_{\varepsilon ,\widehat{z}}\widetilde{V}
_{\varepsilon ,\widehat{z}}\left(0,x\right)=g\left(x\right).
\end{equation*}
To prove that $\widetilde{w}_{\ast }$ satisfies the initial condition on $
\partial \Omega _{0}$, we proceed similarly by constructing a family of
subsolutions of the form
\begin{equation*}
\tilde{U}_{\varepsilon ,\widehat{z}}\left(t,x\right)=g\left(\widehat{z}\right)-A\left( \alpha
\left(t,x\right)-\alpha \left(0,\widehat{z}\right)\right) -e^{\left(\widehat{C}/\chi \right)\alpha
\left(t,x\right)}w_{\varepsilon }\left(t,x,z\left(t\right)\right)-B-Ct^{\widehat{\alpha }}.
\end{equation*}
This completes the proof of Theorem \ref{existence}. $
\Box $\vspace{
0.2cm}
\end{document}
| 2,193 | 73,518 |
en
|
train
|
0.157.0
|
\begin{document}
\title{Thermal state truncation by using quantum scissors device}
\author{Hong-xia Zhao$^{1}$, Xue--xiang Xu$^{2,\dag }$, Hong-chun Yuan$^{3}$}
\affiliation{$^{1}$Information Engineering college, Jiangxi University of technology,
Nanchang 330098, China \\
$^{2}$Center for Quantum Science and Technology, Jiangxi Normal University,
Nanchang 330022, China\\
$^{3}$College of Electrical and Optoelectronic Engineering, Changzhou
Institute of Technology, Changzhou 213002, China\\
$^{\dag }$Corresponding author: [email protected] }
\begin{abstract}
A non-Gaussian state being a mixture of the vacuum and single-photon states
can be generated by truncating a thermal state in a quantum scissors device
of Pegg et al. [Phys. Rev. Lett. 81 (1998) 1604]. In contrast to the thermal
state, the generated state shows nonclassical property including the
negativity of Wigner function. Besides, signal amplification and
signal-to-noise ratio enhancement can be achieved.
\textbf{PACS: }42.50,Dv, 03.67.-a, 05.30.-d, 03.65.Wj
\textbf{Keywords:} quantum scissor; thermal state; signal amplification;
signal-to-noise ratio; Wigner function; parity
\end{abstract}
\maketitle
\section{Introduction}
The generation of quantum states is a prerequisite for universal quantum
information processing (QIP) \cite{1}. Quantum states are usually classified
into discrete-variable (DV) and continuous-variable (CV) descriptions \cite
{2}. In the CV quantum regime, there are two classes of quantum states that
play an important role in QIP: Gaussian and non-Gaussian states, referring
to their character of wave function or Wigner function \cite{3,4}. In
general, Gaussian states are relatively easy to generate and manipulate
using current standard optical technology\cite{5}.
However, in the recent several decades, some probabilistic schemes are
proposed to generate and manipulate non-Gaussian states \cite{6,6a,6b}. Many
schemes work in postselection \cite{7}, that is, the generated state is
accepted conditionally on a measurement outcome. The typical examples
include photon addition and subtraction \cite{8}, and noise addition \cite{9}
. Among them, an interesting scheme was based on the quantum-scissors
devices. In 1998, Pegg, Phillips and Barnett proposed this quantum state
truncation scheme, which change an optical state $\gamma _{0}\left\vert
0\right\rangle +\gamma _{1}\left\vert 1\right\rangle +\gamma _{2}\left\vert
2\right\rangle +\cdots $ into qubit optical state $\gamma _{0}\left\vert
0\right\rangle +\gamma _{1}\left\vert 1\right\rangle $. The device is then
called a quantum scissors device (QSD), while the effect is referred to as
optical state truncation via projection synthesis. This quantum mechanical
phenomenon was actually a nonlocal effect relying on entanglement because no
light from the input mode can reach the output mode \cite{10}. After its
proposal, an experiment of quantum scissors was realized by Babichev, Ries
and Lvovsky \cite{11} by applying the experimentally feasible proposal of
Ref. \cite{11a1,11a2,11a3}. The QSD was also applied and generalized to
generate not only qubits but also qutrits \cite{11b} and qudits \cite
{11c1,11c2} of any dimension. Similar quantum state can be also generated
via a four-wave mixing process in a cavity \cite{11d}.
Following these works on QSD, Ferreyrol et al. implemented a
nondeterministic optical noiseless amplifier for a coherent state \cite{12}.
Moreover, heralded noiseless linear amplifications were designed and
realized \cite{13,14,15}. Recently, an experimental demonstration of a
practical nondeterministic quantum optical amplification scheme was
presented to achieve amplification of known sets of coherent states with
high fidelity \cite{16}. By the way, many systems transmitting signals using
quantum states could benefit from amplification. In fact, any attempt to
amplify signal must introduce noise inevitably. In other words, perfect
deterministic amplification of an unknown quantum signal is impossible. In
addition, Miranowicz et. al. studied the phase-space interference of quantum
states optically truncated by QSD \cite{16a}.
Inspired by the above works, we generate a non-Gaussian mixed state by using
a Gaussian thermal state as the input state of the quantum scissors in this
paper. This process transform an input thermal state into an incoherent
mixture of only zero-photon and single-photon components. The success
probability of such event is studied. Some properties of the generated
state, such as signal amplification, signal-to-noise ratio and the
negativity of the Wigner function, are investigated in detail. The paper is
organized as follows. In section II, we outline the framework of QSD and
introduce the scheme of thermal state truncation. Quantum state is derived
explicitly and the probability is discussed. Subsequently, some statistical
properties, such as average photon number, intensity gain, signal-to-noise
ratio, are investigated in section III. In addition, we study the Wigner
function and the parity for the output state in section IV. Conclusions are
summarized in the final section.
\section{Thermal state truncation scheme}
In this section, we outline the basic framework of quantum scissors
device and introduce our scheme of thermal state truncation.
\subsection{Framework of \textbf{quantum scissors device}}
QSD mainly includes two beam splitters (BSs) and three channels, as shown in
Fig.1. Three channels are described by the optical modes $a$, $b$, and $c$
in terms of their respective creation (annihilation) operators $a^{\dag }$($
a $), $b^{\dag }$($b$) and $c^{\dag }$($c$). Since every channel have an
input port and an output port, the QSD have six ports. The interaction
including several key stages as follows. Firstly, the channel $a$ and the
channel $c$\ are correlated through an asymmetrical beam splitter (A-BS),
whose operation can be described by the unitary operator $B_{1}=e^{\theta
\left( a^{\dag }c-ac^{\dag }\right) }$ with the transmissivity $T=\cos
^{2}\theta $. After that, the channel $b$ and the channel $c$\ are then
correlated through another symmetrical beam splitter (S-BS, also 50:50 BS),
whose operation can be described by the unitary operator $B_{2}=e^{\frac{\pi
}{4}\left( b^{\dag }c-bc^{\dag }\right) }$. Moreover, among these six ports,
four ports are fixed with special processes as follows: (1) Injecting the
auxiliary single-photon $\left\vert 1\right\rangle $ in the input port of
channel $a$; (2) Injecting the auxiliary zero-photon $\left\vert
0\right\rangle $ in the input port of channel $c$; (3) Detecting the
single-photon $\left\vert 1\right\rangle $ in the output port of channel $b$
; and (4) Detecting the zero-photon $\left\vert 0\right\rangle $ in the
output port of channel $c$.
QSD leaves only one input port (i.e., the input port in channel $b$) and one
output port (i.e., the output port in channel $a$). Injecting an appropriate
input state in the input port, one can generate a new quantum state in the
output port. Many previous theoretical and experimental schemes have used
the pure states as the input states to generated quantum states. Here, our
proposed scheme use a mixed state as the input state to generate quantum
state.
\subsection{Thermal state truncation}
Using a mixed state (i.e., thermal state) as the input state, we shall
generate another mixed state in our present protocol. The input thermal
state is given by
\begin{equation}
\rho _{th}=\sum_{n=0}^{\infty }\frac{\bar{n}^{n}}{\left( \bar{n}+1\right)
^{n+1}}\left\vert n\right\rangle \left\langle n\right\vert , \label{1}
\end{equation}
where $\bar{n}$\ is the average number of the thermal photons \cite{17}.
Therefore, the output generated state can be expressed as
\begin{eqnarray}
\rho _{out} &=&\frac{1}{p_{d}}\left\langle 0_{c}\right\vert \left\langle
1_{b}\right\vert B_{2}\{\rho _{th}\otimes \notag \\
&&[B_{1}(\left\vert 1_{a}\right\rangle \left\langle 1_{a}\right\vert \otimes
\left\vert 0_{c}\right\rangle \left\langle 0_{c}\right\vert )B_{1}^{\dag
}]\}B_{2}^{\dag }\left\vert 1_{b}\right\rangle \left\vert 0_{c}\right\rangle
\label{2}
\end{eqnarray}
where $p_{d}$ is the success probability.
\begin{figure}
\caption{(Colour online) Conceptual scheme of "quantum scissors device"
(QSD) for thermal state truncation. The auxiliary single-photon $\left\vert
1\right\rangle \left\langle 1\right\vert $ in channel $a$ and the auxiliary
single-photon $\left\vert 0\right\rangle \left\langle 0\right\vert $ in
channel $c$ generates an entangled state between the modes $a$ and $c$\
after passing through an asymmetrical beam splitter (A-BS) with the
transmissivity $T$. The input mode $b$ (accompanied by the input thermal
state $\protect\rho _{th}
\label{Fig1}
\end{figure}
The explicit density operator in Eq.(\ref{2}) can further be expressed as
\begin{equation}
\rho _{out}=p_{0}\left\vert 0\right\rangle \left\langle 0\right\vert
+p_{1}\left\vert 1\right\rangle \left\langle 1\right\vert , \label{3}
\end{equation}
where $p_{0}=\left( 1-T\right) \left( \bar{n}+1\right) /\left( \bar{n}
+1-T\right) $ and $p_{1}=\bar{n}T/\left( \bar{n}+1-T\right) $ are,
respectively, the zero-photon distribution probability and the one-photon
distribution probability. Obviously, the output state is an incoherent
mixture of a vacuum state $\left\vert 0\right\rangle \left\langle
0\right\vert $ and a one-photon state $\left\vert 1\right\rangle
\left\langle 1\right\vert $ with certain ratio coefficients $p_{0}$, $p_{1}$
. If $T=0$, then $\rho _{out}\rightarrow \left\vert 0\right\rangle
\left\langle 0\right\vert $; while for $T=1$, then $\rho _{out}\rightarrow
\left\vert 1\right\rangle \left\langle 1\right\vert $.
From another point of view, the output generated state in Eq.(\ref{3})
remains only the first two terms of the input thermal state in Eq.(\ref{1}),
which can also be considered as an truncation from the input thermal state.
However, the corresponding coefficients of these terms are changed.
Moreover, the output generated state carry the information of the input
thermal state because it also depend on the thermal parameter $\bar{n}$.
Since no light from the input port reaches the output port, this process
also mark the nonlocal quantum effect of the operation for the quantum
scissors.
From present protocol, we easily obtain $p_{d}$ as follows
\begin{equation}
p_{d}=\allowbreak \frac{\bar{n}+1-T}{2\left( \bar{n}+1\right) ^{2}}.
\label{4}
\end{equation}
For a given $\bar{n}$, it can be shown that $p_{d}$ is a linear decreasing
function of $T$.
\begin{figure}
\caption{(Colour online) Probability of successfully generating the output
state as a function of the beam-splitter transmissivity according to the
model presented in the text. The average photon number of the input thermal
state $\bar{n}
\label{Fig2}
\end{figure}
In Fig.2, we plot $p_{d}$ as a function of $T$\ for different $\bar{n}$. For
instance, when $\bar{n}=1$, we have $p_{d}|_{\bar{n}=1}=0.25-0.125T$ (see
the green line in Fig.2); when $\bar{n}=0$, we have $p_{d}|_{\bar{n}
=0}=\allowbreak 0.5-0.5T$ (see the black line in Fig.2). The results on the
success probability provide a theoretical reference for experimental
realization.
| 3,352 | 9,150 |
en
|
train
|
0.157.1
|
\section{Statistical properties of the generated state}
By adjusting the interaction parameters, i.e., the thermal parameter $\bar{n}
$ of the input state and the transmission parameter $T$ of the A-BS, one can
obtain different output states with different figures of merits. Some
statistical properties, such as average photon number, intensity gain and
signal-to-noise ratio, are studied in this section. As the reference, we
will compare the properties of the output state with those of the input
state.
\subsection{Average photon number and intensity gain}
Using the definition of the average photon number, we have $\left\langle
\hat{n}\right\rangle _{\rho _{th}}=\bar{n}$ for the input thermal state and
\begin{equation}
\left\langle \hat{n}\right\rangle _{\rho _{out}}=\frac{\bar{n}T}{\bar{n}+1-T}
. \label{5}
\end{equation}
for the output generated state. Here $\hat{n}$\ is the operator of the
photon number \cite{18}.
In Fig.3, we plot $\left\langle \hat{n}\right\rangle _{\rho _{out}}$ as a
function of $T$\ for different $\bar{n}$. Two extreme cases, such as, e.g.
(1) $\left\langle \hat{n}\right\rangle _{\rho _{out}}\equiv 0$ if $\bar{n}=0$
or $T=0$, and (2) $\left\langle \hat{n}\right\rangle _{\rho _{out}}\equiv 1$
if $T=1$ for any $\bar{n}\neq 0$, are always hold. No matter how large the
input thermal parameter $\left\langle \hat{n}\right\rangle _{\rho _{th}}$
is, there always exists $\left\langle \hat{n}\right\rangle _{\rho _{out}}\in
\lbrack 0,1]$. Moreover, $\left\langle \hat{n}\right\rangle _{\rho _{out}}$
is an increasing function of $T$ for a given nonzero $\bar{n}$.
\begin{figure}
\caption{(Colour online) Average photon number of the output state as a
function of the beam-splitter transmissivity. The average photon number of
the input thermal state $\bar{n}
\label{Fig3}
\end{figure}
In order to describe signal amplification, we define the intensity gain as $
g=\left\langle \hat{n}\right\rangle _{\rho _{out}}/\left\langle \hat{n}
\right\rangle _{\rho _{th}}$, which is related with the intensity $
\left\langle \hat{n}\right\rangle _{\rho _{out}}$ of the output field with
that ($\left\langle \hat{n}\right\rangle _{\rho _{th}}$) of the input field.
Therefore we have
\begin{equation}
g=\allowbreak \frac{T}{\bar{n}+1-T}. \label{6}
\end{equation}
If $g>1$, then there exist signal amplification.
\begin{figure}
\caption{(Colour online) Intensity gain of the output state as a function of
the beam-splitter transmissivity. The average photon number of the input
thermal state $\bar{n}
\label{Fig4}
\end{figure}
Fig.4 shows the intensity gain $g$ as a function of $T$ for different $\bar{n
}$. If $\bar{n}\geq 1$, $g$ is impossible to exceed 1, which means no
amplification. In other words, the amplification happens only for the cases $
\bar{n}<1$ with $T\in (\left( \bar{n}+1\right) /2,1]$.
\subsection{Signal to noise ratio}
Signal-to-noise ratio (abbreviated SNR or S/N) is a measure used in science
and engineering that compares the level of a desired signal to the level of
background noise \cite{19}. Here we are interesting to the effect that the
process has on the noise of these states. Typically this is shown by
calculating the variance of the photon number and forming the SNR, defined
by $SNR=\left\langle \hat{n}\right\rangle /\sqrt{\left\langle \hat{n}
^{2}\right\rangle -\left\langle \hat{n}\right\rangle ^{2}}$. From the
definition, we have $\left\langle \hat{n}\right\rangle |_{\rho _{th}}=\bar{n}
$, $\left\langle \hat{n}^{2}\right\rangle |_{\rho _{th}}=\bar{n}+2\bar{n}^{2}
$, and then $SNR|_{\rho _{th}}=\bar{n}/\sqrt{\bar{n}+\bar{n}^{2}}$ for the
input thermal state $\rho _{th}$. While for our generated state $\rho _{out}$
, we find $\left\langle \hat{n}\right\rangle |_{\rho _{out}}=\left\langle
\hat{n}^{2}\right\rangle |_{\rho _{out}}=p_{1}$ and
\begin{equation}
SNR|_{\rho _{out}}=\sqrt{\allowbreak \frac{\bar{n}T}{\left( 1-T\right)
\left( \bar{n}+1\right) }}. \label{7}
\end{equation}
\begin{figure}
\caption{(Colour online) Signal to noise ratio of the output states (curve
line) as a function of the beam-splitter transmissivity, compared to their
corresponding thermal states (straight line). Here $\bar{n}
\label{Fig5}
\end{figure}
As is shown in Fig.5, we see the SNR for the output states, as compared to
their corresponding thermal states, of the same fixed average photon number.
It is found that a clear enhancement (corresponding to its input thermal
state) can be seen for $T>0.5$. Moreover, although the SNR of the input
thermal state is always smaller than 1:1, the SNR higher than 1:1 for the
output generated state can be found in larger $T$ $\left( >\left( \bar{n}
+1\right) /\left( 2\bar{n}+1\right) \right) $.
\section{Wigner function and Parity of the generated state}
The negative Wigner function is a witness of the nonclassicality of a
quantum state \cite{20,21,22}. For a single-mode density operator $\rho $,
the Wigner function in the coherent state representation $\left\vert
z\right\rangle $ can be expressed as $W(\beta )=\frac{2e^{2\left\vert \beta
\right\vert ^{2}}}{\pi }\int \frac{d^{2}z}{\pi }\left\langle -z\right\vert
\rho \left\vert z\right\rangle e^{-2\left( z\beta ^{\ast }-z^{\ast }\beta
\right) }$, where $\beta =\left( q+ip\right) /\sqrt{2}$. Therefore we easily
obtain $W_{\rho _{th}}(\beta )=2/\left( \pi \left( 2\bar{n}+1\right) \right)
e^{-2\left\vert \beta \right\vert ^{2}/\left( 2\bar{n}+1\right) }$ for the
input thermal state and
\begin{equation}
W_{\rho _{out}}(\beta )=p_{0}W_{\left\vert 0\right\rangle \left\langle
0\right\vert }(\beta )+p_{1}W_{\left\vert 1\right\rangle \left\langle
1\right\vert }(\beta ) \label{8}
\end{equation}
for the output generated state with $W_{\left\vert 0\right\rangle
\left\langle 0\right\vert }(\beta )=\frac{2}{\pi }e^{-2\left\vert \beta
\right\vert ^{2}}$ \ and $W_{\left\vert 1\right\rangle \left\langle
1\right\vert }(\beta )=\frac{2}{\pi }(4\left\vert \beta \right\vert
^{2}-1)e^{-2\left\vert \beta \right\vert ^{2}}$.
As we all know, the thermal state is a Gaussian state, whose Wigner function
have no negative region. However, our output generated states have lost the
Gaussian characters because of the non-Gaussian forms of their Wigner
functions.\ In addition, the Wigner function will exhibit negative in some
region satisfying the following condition $\left\vert \beta \right\vert
^{2}<[2T\bar{n}-\left( \bar{n}+1-T\right) ]/\left( 4\bar{n}T\right) $. In
Fig.6, we plot the Wigner functions of the output generated states for two
different cases, where the negative region is found for case with large $T$.
\begin{figure}
\caption{(Colour online) Wigner function of the output state with (a) $\bar{n
}
\label{Fig6}
\end{figure}
Since the Wigner function of the output state is symmetrical in $x$ and $p$
space, one can determine the fact\ whether the Wigner function have negative
region by seeing $W_{\rho _{out}}(\beta =0)$. As Gerry pointed out that the
Wigner function at the origin is the expectation value of the parity
operator $\Pi =\left( -1\right) ^{\hat{n}}$, that is $\left\langle \Pi
\right\rangle =\frac{\pi }{2}W(0)$ \cite{23}. Thus, we have $\left\langle
\Pi \right\rangle _{\rho _{th}}=1/\left( 2\bar{n}+1\right) $ for the input
thermal state and
\begin{equation}
\left\langle \Pi \right\rangle _{\rho _{out}}=\frac{\bar{n}+1-T-2T\bar{n}}{
\allowbreak \bar{n}+1-T}, \label{9}
\end{equation}
for the output generated state. Fig.7 show $\left\langle \Pi \right\rangle
_{\rho _{out}}$ as a function of $T$ for different $\allowbreak \bar{n}$.
\begin{figure}
\caption{(Colour online) Parity of the output state \textbf{as a function of
the beam-splitter transmissivity}
\label{Fig7}
\end{figure}
Photon number states are assigned a parity of $+1$ if their photon number is
even and a parity of $-1$ if odd \cite{24}. According to Eq.(\ref{3}), we
verify $\left\langle \Pi \right\rangle _{\rho _{out}}=p_{0}-p_{1}$. If the
condition $T>\left( \bar{n}+1\right) /\left( 1+2\bar{n}\right) $ is hold,
then there exist $\left\langle \Pi \right\rangle _{\rho _{out}}<0$, which
means that the Wigner function must exhibit negative region in the phase
space.
| 2,705 | 9,150 |
en
|
train
|
0.157.2
|
\section{Wigner function and Parity of the generated state}
The negative Wigner function is a witness of the nonclassicality of a
quantum state \cite{20,21,22}. For a single-mode density operator $\rho $,
the Wigner function in the coherent state representation $\left\vert
z\right\rangle $ can be expressed as $W(\beta )=\frac{2e^{2\left\vert \beta
\right\vert ^{2}}}{\pi }\int \frac{d^{2}z}{\pi }\left\langle -z\right\vert
\rho \left\vert z\right\rangle e^{-2\left( z\beta ^{\ast }-z^{\ast }\beta
\right) }$, where $\beta =\left( q+ip\right) /\sqrt{2}$. Therefore we easily
obtain $W_{\rho _{th}}(\beta )=2/\left( \pi \left( 2\bar{n}+1\right) \right)
e^{-2\left\vert \beta \right\vert ^{2}/\left( 2\bar{n}+1\right) }$ for the
input thermal state and
\begin{equation}
W_{\rho _{out}}(\beta )=p_{0}W_{\left\vert 0\right\rangle \left\langle
0\right\vert }(\beta )+p_{1}W_{\left\vert 1\right\rangle \left\langle
1\right\vert }(\beta ) \label{8}
\end{equation}
for the output generated state with $W_{\left\vert 0\right\rangle
\left\langle 0\right\vert }(\beta )=\frac{2}{\pi }e^{-2\left\vert \beta
\right\vert ^{2}}$ \ and $W_{\left\vert 1\right\rangle \left\langle
1\right\vert }(\beta )=\frac{2}{\pi }(4\left\vert \beta \right\vert
^{2}-1)e^{-2\left\vert \beta \right\vert ^{2}}$.
As we all know, the thermal state is a Gaussian state, whose Wigner function
have no negative region. However, our output generated states have lost the
Gaussian characters because of the non-Gaussian forms of their Wigner
functions.\ In addition, the Wigner function will exhibit negative in some
region satisfying the following condition $\left\vert \beta \right\vert
^{2}<[2T\bar{n}-\left( \bar{n}+1-T\right) ]/\left( 4\bar{n}T\right) $. In
Fig.6, we plot the Wigner functions of the output generated states for two
different cases, where the negative region is found for case with large $T$.
\begin{figure}
\caption{(Colour online) Wigner function of the output state with (a) $\bar{n
}
\label{Fig6}
\end{figure}
Since the Wigner function of the output state is symmetrical in $x$ and $p$
space, one can determine the fact\ whether the Wigner function have negative
region by seeing $W_{\rho _{out}}(\beta =0)$. As Gerry pointed out that the
Wigner function at the origin is the expectation value of the parity
operator $\Pi =\left( -1\right) ^{\hat{n}}$, that is $\left\langle \Pi
\right\rangle =\frac{\pi }{2}W(0)$ \cite{23}. Thus, we have $\left\langle
\Pi \right\rangle _{\rho _{th}}=1/\left( 2\bar{n}+1\right) $ for the input
thermal state and
\begin{equation}
\left\langle \Pi \right\rangle _{\rho _{out}}=\frac{\bar{n}+1-T-2T\bar{n}}{
\allowbreak \bar{n}+1-T}, \label{9}
\end{equation}
for the output generated state. Fig.7 show $\left\langle \Pi \right\rangle
_{\rho _{out}}$ as a function of $T$ for different $\allowbreak \bar{n}$.
\begin{figure}
\caption{(Colour online) Parity of the output state \textbf{as a function of
the beam-splitter transmissivity}
\label{Fig7}
\end{figure}
Photon number states are assigned a parity of $+1$ if their photon number is
even and a parity of $-1$ if odd \cite{24}. According to Eq.(\ref{3}), we
verify $\left\langle \Pi \right\rangle _{\rho _{out}}=p_{0}-p_{1}$. If the
condition $T>\left( \bar{n}+1\right) /\left( 1+2\bar{n}\right) $ is hold,
then there exist $\left\langle \Pi \right\rangle _{\rho _{out}}<0$, which
means that the Wigner function must exhibit negative region in the phase
space.
\section{Conclusion}
In summary, we have applied the QSD of Pegg, Philips and Barnett to truncate
a thermal field to a completely mixed qubit state, i.e., a mixture of the
vacuum and single-photon state. The explicit expression was derived in
Schrodinger picture and the success probability of such event was discussed.
The output generated state depend on two interaction parameters, i.e., the
input thermal parameter and the transmissivity of the A-BS. It is shown that
the success probability is a linear decreasing function of the
transmissivity for any given input parameter. Some nonclassical properties
of the qubit state were analyzed including intensity amplification,
singal-to-noise ratio, and the non-positive Wigner function. It was shown
that the average photon number of the output state can be adjusted between 0
and 1. The intensity amplification will happen only for small-intensity
thermal field ($\allowbreak \bar{n}<1$) and large-transmissivity ($T>\left(
\bar{n}+1\right) /2$). The SNR of the output state can be enhanced by the
operation for a given input thermal state at larger values of $T$ ($>0.5$).
The SNR higher than unity can be found in the range of $T$ $>\left( \bar{n}
+1\right) /\left( 2\bar{n}+1\right) $. In addition, the negativity of the
Wigner function appears only for proper $T>\left( \bar{n}+1\right) /\left(
1+2\bar{n}\right) $.
\begin{acknowledgments}
We would like to thank Li-yun Hu and Bi-xuan Fan for their great helpful
discussions. This work was supported by the Research Foundation of the
Education Department of Jiangxi Province of China (Nos. GJJ151150 and
GJJ150338) and the Natural Science Foundation of Jiangxi Province of China
(20151BAB202013) as well as the National Natural Science Foundation of
China(Grants No. 11264018 and No. 11447002).
\end{acknowledgments}
\textbf{Appendix A: Derivation of the density operator in Eq.(\ref{3})}
In this appendix, we provide a detailed process of deriving the explicit
expression of the output generated state in Schrodinger picture.
Substituting $\left\vert 1_{a}\right\rangle =\frac{d}{ds_{1}}e^{s_{1}a^{\dag
}}\left\vert 0_{a}\right\rangle |_{s_{1}=0}$, $\left\langle 1_{a}\right\vert
=\frac{d}{dh_{1}}\left\langle 0_{a}\right\vert \exp e^{h_{1}a}|_{h_{1}=0}$, $
\left\vert 1_{b}\right\rangle =\frac{d}{ds_{2}}e^{s_{2}b^{\dag }}\left\vert
0_{b}\right\rangle |_{s_{2}=0}$, $\left\langle 1_{b}\right\vert =\frac{d}{
dh_{2}}\left\langle 0_{b}\right\vert e^{h_{2}b}|_{h_{2}=0}$, as well as
\begin{equation*}
\rho _{th}=\frac{1}{\bar{n}}\int \frac{d^{2}\alpha }{\pi }e^{-\left( \frac{1
}{\bar{n}}+1\right) \left\vert \alpha \right\vert ^{2}}e^{\alpha b^{\dag
}}\left\vert 0_{b}\right\rangle \left\langle 0_{b}\right\vert e^{\alpha
^{\ast }b},
\end{equation*}
into Eq.(\ref{2}), we have
\begin{eqnarray*}
\rho _{out} &=&\frac{d^{4}}{\bar{n}p_{d}ds_{1}dh_{1}dh_{2}ds_{2}} \\
&&\int \frac{d^{2}\alpha }{\pi }e^{-\left( \frac{1}{\bar{n}}+1\right)
\left\vert \alpha \right\vert ^{2}}\left\langle 0_{c}\right\vert
\left\langle 0_{b}\right\vert e^{h_{2}b} \\
&&e^{s_{1}\allowbreak ta^{\dag }+\frac{\alpha -s_{1}r}{\sqrt{2}}b^{\dag }-
\frac{\alpha +s_{1}r}{\sqrt{2}}c^{\dag }}\left\vert 0_{a}\right\rangle
\left\vert 0_{b}\right\rangle \left\vert 0_{c}\right\rangle \\
&&\left\langle 0_{c}\right\vert \left\langle 0_{b}\right\vert \left\langle
0_{a}\right\vert e^{\allowbreak h_{1}ta+\allowbreak \frac{\alpha ^{\ast
}-h_{1}r}{\sqrt{2}}b-\frac{\alpha ^{\ast }+h_{1}r}{\sqrt{2}}c} \\
&&e^{s_{2}b^{\dag }}\left\vert 0_{b}\right\rangle \left\vert
0_{c}\right\rangle |_{s_{1}=s_{2}=h_{1}=h_{2}=0}
\end{eqnarray*}
where we have used the following transformations
\begin{eqnarray*}
B_{1}aB_{1}^{\dag } &=&at-cr,\text{ \ }B_{1}cB_{1}^{\dag }=ar+ct, \\
B_{2}bB_{2}^{\dag } &=&\frac{b-c}{\sqrt{2}},\text{ \ }B_{2}cB_{2}^{\dag }=
\frac{b+c}{\sqrt{2}}.
\end{eqnarray*}
and $B_{1}\left\vert 0_{a}\right\rangle \left\vert 0_{c}\right\rangle
=\left\vert 0_{a}\right\rangle \left\vert 0_{c}\right\rangle $, $
B_{2}\left\vert 0_{b}\right\rangle \left\vert 0_{c}\right\rangle =\left\vert
0_{b}\right\rangle \left\vert 0_{c}\right\rangle $, as well as their
conjugations. In addition, $t=\cos \theta $ and $r=\sin \theta $\ are\ the
transmission coefficient and the reflection coefficient\ of the A-BS,
respectively. After detailed calculation, we obtain
\begin{eqnarray*}
\rho _{out} &=&\frac{d^{4}}{\left( \bar{n}+1\right)
p_{d}ds_{1}dh_{1}dh_{2}ds_{2}} \\
&&e^{\frac{\bar{n}}{2\left( \bar{n}+1\right) }s_{2}h_{2}-\frac{r}{\sqrt{2}}
\left( h_{1}s_{2}+rh_{2}s_{1}\right) } \\
&&e^{s_{1}ta^{\dag }}\left\vert 0_{a}\right\rangle \left\langle
0_{a}\right\vert e^{\allowbreak h_{1}ta}|_{s_{1}=s_{2}=h_{1}=h_{2}=0}
\end{eqnarray*}
Using $\left\vert 0_{a}\right\rangle \left\langle 0_{a}\right\vert =$ $
:e^{-a^{\dag }a}:$ and making the derivative in the normal ordering form
(denoted by $:\cdots :$), we have
\begin{equation*}
\rho _{out}=:\left( p_{0}+p_{1}a^{\dag }a\right) \exp \left( -a^{\dag
}a\right) :
\end{equation*}
Thus the density operator in Eq.(\ref{3}) is obtained.
\end{document}
| 3,093 | 9,150 |
en
|
train
|
0.158.0
|
\begin{document}
\title{Controlling Reversibility in Reversing Petri Nets with Application to Wireless Communications}
\author{Anna Philippou \and Kyriaki Psara }
\institute{Department of Computer Science,
University of Cyprus\\
\email{\{annap,kpsara01\}@cs.ucy.ac.cy} }
\author{
Anna Philippou\inst{1},
Kyriaki Psara\inst{1},
and Harun Siljak\inst{2}
}
\institute{
Department of Computer Science, University of Cyprus\\
\email{
\{annap,kpsara01\}@cs.ucy.ac.cy}
\and
CONNECT Centre, Trinity College Dublin,
\email
{[email protected]}
}
\maketitle
\begin{abstract}
Petri nets are a
formalism for modelling and reasoning about the behaviour of distributed systems. Recently, a reversible approach to Petri nets, Reversing Petri Nets (RPN), has been proposed,
allowing transitions to
be reversed spontaneously in or out of causal order.
In this work
we propose an approach for controlling the reversal of actions
of an RPN, by associating transitions with conditions whose satisfaction/violation
allows the execution of transitions in the forward/reversed direction, respectively.
We illustrate the
framework with a model of a novel, distributed algorithm for antenna selection in distributed
antenna arrays.
\end{abstract}
\pagestyle{plain}
\section{Introduction}\label{sec:Introduction}
Reversibility is a phenomenon that occurs in a variety of systems,
e.g., biochemical systems
and quantum computations. At the same time, it is
often a desirable system property.
To begin with, technologies based on reversible
computation are considered to be the only way to potentially improve the energy
efficiency of computers beyond the fundamental
Landauer limit.
Further applications
are encountered in programming languages,
concurrent transactions, and fault-tolerant systems, where
in case of an error a system should reverse back to a safe state.
As such, reversible computation has been an active topic of research
in recent years and its interplay with concurrency is being investigated
within a variety of theoretical models of computation.
The notion of causally-consistent reversibility was first introduced
in the process calculus
RCCS~\cite{RCCS}, advocating that a transition can be undone only if all its effects, if any,
have been undone beforehand. Since then the study of reversibility continued in the context of process calculi~\cite{TransactionsRCCS,Algebraic,LaneseLMSS13,LaneseMS16,CardelliL11},
event structures~\cite{ConRev}, and Petri nets~\cite{PetriNets,RPNs,RPNtoCPN}.
A distinguishing feature between the cited approaches
is that of {\emph controlling} reversibility: while various
frameworks make no restriction as to when a transition can be reversed (uncontrolled
reversibility), it can be argued that some means of controlling the conditions
of transition reversal is often useful in practice. For
instance, when dealing with fault recovery,
reversal should only be triggered when a fault is encountered. Based
on this observation, a number of strategies for controlling reversibility have
been proposed: \cite{TransactionsRCCS} introduces the concept of irreversible actions, and \cite{DBLP:conf/rc/LaneseMS12} introduces compensations to deal with irreversible actions in the context of programming abstractions for distributed systems.
Another approach for controlling reversibility is proposed in~\cite{ERK} where
an external entity is employed for capturing the order in
which transitions can be executed in the forward or the backward direction. In another line of work,~\cite{LaneseMSS11} defines a roll-back primitive for reversing computation, and in~\cite{LaneseLMSS13}
roll-back is extended with the possibility of
specifying the alternatives to be taken on resuming the
forward execution.
Finally, in~\cite{statistical} the authors associate
the direction of action reversal with energy parameters
capturing environmental conditions of the modelled systems.
In this work we focus on the framework of reversing Petri nets (RPNs)~\cite{RPNs}, which we
extend with a mechanism for controlling reversibility. This control is enforced with
the aid of conditions associated with transitions, whose satisfaction/violation acts as
a guard for executing the transition
in the forward/backward direction, respectively.
The conditions are
enunciated within a simple logical language expressing
properties relating to available tokens. The mechanism may capture environmental conditions, e.g., changes in temperature, or
the presence of faults.
We present a causal-consistent semantics of the framework.
Note that conditional transitions can also be found in existing Petri net models, e.g., in~\cite{CPN}, a
Petri-net model that associates transitions and arcs with expressions.
We conclude with the model of a novel antenna selection (AS) algorithm which inspired our framework. Centralized AS in DM MIMO (distributed, massive, multiple input, multiple output) systems \cite{gao2015massive} is computationally complex, demands a large information exchange, and the communication channel between antennas and users changes rapidly. We introduce an RPN-based, distributed, time-evolving solution with reversibility, asynchronous execution and local condition tracking for reliable performance and fault tolerance.
\section{Reversing Petri Nets}\label{sec:ReversingPetriNets}
In this section we extend the reversing Petri nets of~\cite{RPNs}
by associating transitions with conditions that control their execution
and reversal, and allow tokens to carry data values of specific types (clauses (2), (6) and (7) in the following definition). We introduce a causal-consistent semantics for the framework.
\begin{definition}{\rm
A \emph{\PN}(RPN) is a tuple $(P,T, \Sigma, A, B, F, C, I)$ where:
\begin{enumerate}
\item $P$ is a finite set of \emph{places} and
$T$ is a finite set of \emph{transitions}.
\item $\Sigma$ forms a finite set of data types with $V$ the associated
set of data values.
\item $A$ is a finite set of \emph{bases} or \emph{tokens} ranged over by $a, b,\ldots$.
$\overline{A} =
\{\overline{a}\mid a\in A\}$ contains a ``negative" instance for each token and
we write ${\cal{A}}=A \cup \overline{A}$.
\item $B\subseteq A\times A$ is a set of undirected \emph{bonds} ranged over by
$\beta,\gamma,\ldots$.
We use the notation $a \bond b$ for a bond $(a,b)\in B$. $\overline{B} =
\{\overline{\beta}\mid \beta\in B\}$
contains a ``negative" instance for each bond and we write ${\cal{B}}=B \cup
\overline{B}$.
\item $F : (P\times T \cup T \times P)\rightarrow 2^{{\cal{A}}\cup {\cal{B}}}$
is a set of directed labelled \emph{arcs}.
\item $C:T\rightarrow$ COND is a function that assigns a condition to each
transition $t$ such that $type(C(t))=Bool$.
\item $I : A \rightarrow V$ is a function that associates a
data value from $V$ to each token $a$ such that $type(I(a))=type(a)$.
\end{enumerate}
}\end{definition}
RPNs are built on the basis of a set of \emph{tokens} or \emph{bases} which correspond to the basic entities that occur in a system. Tokens have a type from the set $\Sigma$, and we write $type(e)$ to denote the type of a token or expression in the language.
Values of these types are associated to tokens of an \RPN via function $I$.
Tokens may occur as stand-alone elements but as computation proceeds they may also merge together to form \emph{bonds}.
Transitions represent events and are associated with conditions COND
defined over the data values associated with the tokens
of the model and functions/predicates over the associated data types.
\emph{Places} have the standard meaning. Directed arcs connect places to transitions and vice
versa and are labelled by a subset of ${\cal{A}}\cup {\cal{B}}$. Intuitively, these labels express the requirements for a transition to fire when placed on arcs incoming the transition, and the effects of the transition when placed on the
outgoing arcs. Graphically, a Petri net is a directed bipartite graph where tokens are indicated by $\bullet$, places by circles, transitions by boxes, and bonds by lines between tokens.
The association of tokens to places is called a \emph{marking} such that $M: P\rightarrow 2^{A\cup B}$ where $a \bond b \in M(x)$, for some $x\in P$, implies
$a,b\in M(x)$.
In addition, we employ the notion of a \emph{history}, which assigns a memory to each
transition
$H : T\rightarrow 2^\mathbb{N}$.
Intuitively, a history of $H(t) = \emptyset$ for some $t \in T$ captures that the transition has not taken place, and a history of $k\in H(t)$,
captures that the transition was executed as the $k^{th}$ transition occurrence and it has not been reversed.
Note that $|H(t)|>1$ may
arise due to cycles in a model. A pair of a marking and a history, $\state{M}{H}$, describes a \emph{state} of a RPN
with $\state{M_0}{H_0}$ the initial state, where $H_0(t) = \emptyset $ for all $t\in T$.
We introduce the following notations. We write
$\circ t = \{x\in P\mid F(x,t)\neq \emptyset\}$ and
$ t\circ = \{x\in P\mid F(t,x)\neq \emptyset\}$
for the incoming and outgoing places of transition
$t$, respectively. Furthermore, we write
$\guard{t} = \bigcup_{x\in P} F(x,t)$
and $\effects{t} = \bigcup_{x\in P} F(t,x)$.
Finally,
we define $\connected(a,C)$, where $a$ is a token and $C\subseteq A\cup B$ a set of connections,
to be the tokens connected
to $a$ via a sequence of bonds in $B$, together with the bonds creating these connections.
In what follows we assume that: (1) transitions do not
erase tokens ($A\cap \guard{t} = A\cap \effects{t}$), and
(2) tokens/bonds cannot be cloned into more than one outgoing places of a transition
($F(t,x) \cap F(t,y)=\emptyset$ for all $x,y \in P, x\neq y$). Furthermore, we assume
for all $a\in A, |\multiset{x| a\in M_0(x)}|=1$, i.e., there exists exactly one base of each type in $M_0$. Note that we extend
the exposition of~\cite{RPNs} by allowing transitions to break bonds and by permitting cyclic structures.
\subsection{Forward execution}
For a transition to be forward-enabled in an \RPN the following must hold:
\begin{definition}\label{forward}{\rm
Consider a \RPN $(P,T, \Sigma, A, B, F, C, I)$, a transition $t$, and a state $\state{M}{H}$. We say that
$t$ is \emph{forward-enabled} in $\state{M}{H}$ if:
\begin{enumerate}
\item If $a\in F(x,t)$ (resp. $\beta\in F(x,t)$) for some $x\in\circ t$, then $a\in M(x)$ (resp. $\beta\in M(x)$), and if
$\overline{a}\in F(x,t)$
(resp. $\overline{\beta} \in F(x,t)$)
for some $x\in\circ t$, then $a\not\in M(x)$ (resp. $\beta\not\in M(x)$),
\item If $\beta\in F(t,x)$ for some $x\in t\circ$ and $\beta\in M(y)$ for some $y\in \circ t$ then $\beta\in F(y,t)$,
\item $E(C(t))$= True.
\end{enumerate}
}\end{definition}
Thus, $t$ is enabled in state $\state{M}{H}$ if (1) all tokens and bonds required for the
transition are available in $t$'s incoming places and none of the tokens/bonds whose absence
is required exists in $t$'s incoming place,
(2) if a pre-existing bond appears in an outgoing arc of a transition, then it is also a
precondition of the transition to fire, and
(3) the transition's condition $C(t)$ evaluates to true. We write $E(c)$ for the value of the condition based on the assignment function $I$.
When a transition $t$ is executed in the forward direction, all tokens and bonds occurring in its outgoing arcs are relocated from the input to the output places along with their connected components. The history of $t$ is extended accordingly:
\begin{definition}{\rm \label{forw}
Given a \RPN $(P,T, \Sigma, A, B, F, C, I)$, a state $\langle M, H\rangle$, and a transition $t$ enabled in $\state{M}{H}$, we write $\state{M}{H}
\trans{t} \state{M'}{H'}$
where:
\[
\begin{array}{rcl}
M'(x) & = &
M(x)-\bigcup_{a\in F(x,t)}\connected(a,M(x)) \\
&&\cup \bigcup_{ a\in F(t,x), y\in\circ{t}}\connected(a,M(y)-\guard{t} \cup F(t,x))
\end{array}
\]
and $H'(t') =
H(t')\cup \{ \max( \{0\} \cup\bigcup_{t''\in T} H(t''))
+1\},$ if $t' = t $, and
$H(t')$, otherwise.
}\end{definition}
| 3,828 | 7,332 |
en
|
train
|
0.158.1
|
\subsection{Forward execution}
For a transition to be forward-enabled in an \RPN the following must hold:
\begin{definition}\label{forward}{\rm
Consider a \RPN $(P,T, \Sigma, A, B, F, C, I)$, a transition $t$, and a state $\state{M}{H}$. We say that
$t$ is \emph{forward-enabled} in $\state{M}{H}$ if:
\begin{enumerate}
\item If $a\in F(x,t)$ (resp. $\beta\in F(x,t)$) for some $x\in\circ t$, then $a\in M(x)$ (resp. $\beta\in M(x)$), and if
$\overline{a}\in F(x,t)$
(resp. $\overline{\beta} \in F(x,t)$)
for some $x\in\circ t$, then $a\not\in M(x)$ (resp. $\beta\not\in M(x)$),
\item If $\beta\in F(t,x)$ for some $x\in t\circ$ and $\beta\in M(y)$ for some $y\in \circ t$ then $\beta\in F(y,t)$,
\item $E(C(t))$= True.
\end{enumerate}
}\end{definition}
Thus, $t$ is enabled in state $\state{M}{H}$ if (1) all tokens and bonds required for the
transition are available in $t$'s incoming places and none of the tokens/bonds whose absence
is required exists in $t$'s incoming place,
(2) if a pre-existing bond appears in an outgoing arc of a transition, then it is also a
precondition of the transition to fire, and
(3) the transition's condition $C(t)$ evaluates to true. We write $E(c)$ for the value of the condition based on the assignment function $I$.
When a transition $t$ is executed in the forward direction, all tokens and bonds occurring in its outgoing arcs are relocated from the input to the output places along with their connected components. The history of $t$ is extended accordingly:
\begin{definition}{\rm \label{forw}
Given a \RPN $(P,T, \Sigma, A, B, F, C, I)$, a state $\langle M, H\rangle$, and a transition $t$ enabled in $\state{M}{H}$, we write $\state{M}{H}
\trans{t} \state{M'}{H'}$
where:
\[
\begin{array}{rcl}
M'(x) & = &
M(x)-\bigcup_{a\in F(x,t)}\connected(a,M(x)) \\
&&\cup \bigcup_{ a\in F(t,x), y\in\circ{t}}\connected(a,M(y)-\guard{t} \cup F(t,x))
\end{array}
\]
and $H'(t') =
H(t')\cup \{ \max( \{0\} \cup\bigcup_{t''\in T} H(t''))
+1\},$ if $t' = t $, and
$H(t')$, otherwise.
}\end{definition}
\subsection{Causal order reversing}
We now move on to {\em causal-order reversibility}. The following definition enunciates
that a transition $t$ is $co$-enabled (`$co$' standing for causal-order reversing) if it
has been previously executed and all the tokens on the outgoing arcs of the
transition are available in its outplaces. Furthermore, to handle causality in
the presence of cycles, clause (1) additionally requires that all bonds involved in the connected components of such tokens have been constructed by transitions $t'$ that have preceded $t$. Furthermore, clause (2) of the definition requires that
the condition of the transition is not satisfied.
\begin{definition}\label{co-enabled}{\rm
Consider a RPN $(P,T, \Sigma, A, B, F, C, I)$, a state $\state{M}{H}$, and a transition $t\in T$ with $k=\max( H(t))$. Then $t$ is $co$-enabled in $\state{M}{H}$ if:
(1) for all $a\in F(t,y)$ then $a\in M(y)$, and
if $\connected(a,M(y))\cap \effects{t'} \neq \emptyset$ for some $t'\in T$ with $k'\in H(t')$, then $k'\leq k$, and,
(2) $E(C(t))$= False.
}\end{definition}
When a transition $t$ is reversed all tokens and bonds in the
pre-conditions of $t$, as well as their connected components,
are transferred to $t$'s incoming places.
\begin{definition}\label{br-def}{\rm
Given a \RPN
a state $\langle M, H\rangle$, and a transition $t$ $co$-enabled in $\state{M}{H}$ with history $k\in H(t)$, we write $ \state{M}{H}
\rtrans{t} \state{M'}{H'}$
where:
\[
\begin{array}{rcl}
M'(x) & = &
M(x)- \bigcup_{a\in F(t,x)}\connected(a,M(x))
\\ && \cup\bigcup_{ y \in t\circ, a\in F(x,t)}\connected(a,M(y)-\effects{t} \cup F(x,t))
\end{array}
\]
and $H'(t') = H(t')-\{k\}$ if $t' = t$, and $H(t')$, otherwise.
}\end{definition}
\section{Case Study: Antenna Selection in DM MIMO}\label{sec:Case study}
The search for a suitable set of antennas is a sum capacity maximization problem:
\begin{equation}
\mathcal{C}=\max_{\mathbf{P},\mathbf{H_{c}}}\log_{2}
\det\left(\mathbf{I}+\rho\frac{N_R}{N_{TS}} \mathbf{H_{c}}\mathbf{P}\mathbf{H_{c}}^{H}\right)\label{capac}
\end{equation}
where $\rho$ is the signal to noise ratio, $N_{TS}$ the number of antennas
selected from a total of $N_T$ antennas, $N_{R}$ the number of users,
$\mathbf{I}$ the $N_{TS}\times N_{TS}$ identity matrix, $\mathbf{P}$ a diagonal
$N_{R}\times N_{R}$ power matrix. $\mathbf{H_{c}}$ is the $N_{TS}\times N_{R}$
submatrix of $N_{T}\times N_{R}$ channel matrix $\mathbf{H}$ \cite{gao2015massive}. Instead of
centralized AS, in our approach (\ref{capac}) is calculated locally for small
sets of antennas (neighborhoods), switching on only antennas which improve the
capacity: in Fig.~\ref{mechanism}(a), antenna $A_{i-1}$ will not be selected.
\begin{figure}
\caption{RPN for antenna selection in DM MIMO (large antenna array).}
\label{mechanism}
\end{figure}
In the \RPN interpretation, we present the antennas by places $A_1,\ldots,A_n$, where $n=N_T$, and the overlapping neighbourhoods
by places $M_1,\ldots,M_h$. These places are connected together via transitions
$t_{i,j}$, connecting $A_i$, $A_j$ and $M_k$,
whenever there is a connection link between antennas $A_i$ and $A_j$. The transition captures that, based on the neighbourhood knowledge in place $M_k$, antenna $A_i$ may be preferred
over $A_j$ or vice versa (the transition may be reversed).
To implement the intended mechanism, we employ three types of tokens.
First, we have the power tokens $p_1,\ldots,p_l$,
where $l$ is the number of enabled antennas.
If token $p$ is located on place $A_i$, antenna $A_i$ is considered to be on.
Transfer of these tokens results into new antenna selections, ideally converging
to a locally optimal solution.
Second, tokens $m_1,\ldots,m_h$, each represent one neighborhood.
Finally, $a_1,\ldots,a_n$, represent the
antennas. The tokens are used as follows:
Given transition $t_{i,j}$ between antenna places $A_i$ and $A_j$ in
neighbourhood $M_k$, transition $t_{i,j}$ is enabled if token $p$ is
available on $A_i$, token $a_j$ on $A_j$, and bond $(a_i,m_k)$
on $M_k$, i.e.,
$F(A_i,t_{i,j}) = \{p\}$, $F(A_j,t_{i,j})= \{a_j\}$, and
$F(M_k,t_{i,j})=\{(a_i,m_k)\}$. This configuration
captures that antennas $A_i$ and $A_j$ are on and off, respectively.
(Note that the bonds between token $m_k$ and tokens of type $a$
in $M_k$ capture the active antennas in the neighbourhood.)
Then, the effect of the transition
is to break the bond $(a_i,m_k)$, and release token $a_i$ to place
$A_i$, transferring the power token to $A_j$, and creating the bond
$(a_j,m_k)$ on $M_k$, i.e.,
$F(t_{i,j}, A_i) = \{a_i\}$, $F(t_{i,j},A_j)= \{p\}$, and $F(t_{i,j},M_k)
= \{(a_j,m_k)\}$.
The mechanism achieving this for two antennas can be seen in Fig.~\ref{mechanism}(b).
Finally, to capture the transition's condition, an antenna token $a_i$ is associated with data vector $I(a_i) =
\mathbf{h}_i$, $type(\mathbf{h}_i)= \mathbb{R}^2$ ($=\mathbb{C}$), i.e., the
corresponding row of $\mathbf{H}$.
The condition constructs the matrix $\mathbf{H}_c$ of
(\ref{capac}) by collecting the
data vectors $\mathbf{h}_i$ associated with the antenna tokens $a_i$
in place $M_k$:
$\mathbf{H}_c=(\mathbf{h}_1,...,\mathbf{h}_n)^T$ where
$\mathbf{h}_i=I(a_i)$ if $a_i\in M_k$, otherwise $\mathbf{h}_i=(0\;\ldots\;0)$.
The transition $t_{i,j}$ will occur if the
sum capacity calculated for all currently active antennas
(including $a_i$), $\mathcal{C}_{a_i}$, is less than the sum capacity calculated for the same
neighbourhood with the antenna $A_i$ replaced by $A_j$,
$\mathcal{C}_{a_j}$, i.e., $\mathcal{C}_{a_i}<\mathcal{C}_{a_{j}}$. Note that if
the condition is violated, the transition may be executed in the reverse direction.
\remove{
\begin{figure}
\caption{Antenna selection on massive-MIMO}
\label{grid}
\end{figure}
}
\remove{
\begin{figure}
\caption{Memory mechanism on massive-MIMO after the execution of transition $t_{10}
\label{mechanism1}
\end{figure}
}
Results of the RPN-based approach on an array consisting of $64$ antennas serving $16$ users, varying the number of selected antennas from $16$ to $64$ are shown in Fig. \ref{resant} \cite{SPP19}. If we run five RPN models in parallel and select the one with the best performance for the final selection, the results are consistently superior to those of a centralised (greedy) algorithm, and if we run just one (equivalent to the average of the performance of these five models) the results are on par with those of the centralised algorithm.
\begin{figure}
\caption{Results of antenna selection on a distributed 64 antenna array.}
\label{resant}
\end{figure}
\section{Conclusions}\label{sec:Conclusions}
We have extended RPNs with conditions
that control reversibility by determining the direction of transition execution,
and we have applied our framework to model an AS algorithm. Preliminary results show superior performance to centralised approaches.
Our experience strongly
suggests that resource management can be studied and understood
in terms of RPNs as, along with their visual nature, they offer
a number of relevant features.
In subsequent work, we plan to extend RPNs for allowing multiple tokens
of the same base/type to occur in a model
and for developing out-of-causal-order reversibility semantics in the presence of
conditional transitions as well as the destruction of bonds.
\noindent{\bf{Acknowledgents:}} This work was partially supported by the European COST Action
IC 1405: Reversible Computation - Extending Horizons of Computing, Science Foundation Ireland (SFI) and
European Regional Development Fund under Grant Number 13/RC/2077, and the EU Horizon 2020
research \& innovation programme under the Marie Sklodowska-Curie grant agreement No 713567.
\small
\end{document}
| 3,504 | 7,332 |
en
|
train
|
0.159.0
|
\begin{document}
\title{A Quantum Model for Autonomous Learning Automata}
\author{Michael Siomau}
\email{[email protected]}
\affiliation{Physics Department, Jazan University, P.O.~Box 114,
45142 Jazan, Kingdom of Saudi Arabia}
\date{\today}
\begin{abstract}
The idea of information encoding on quantum bearers and its
quantum-mechanical processing has revolutionized our world and
brought mankind on the verge of enigmatic era of quantum
technologies. Inspired by this idea, in present paper we search for
advantages of quantum information processing in the field of machine
learning. Exploiting only basic properties of the Hilbert space,
superposition principle of quantum mechanics and quantum
measurements, we construct a quantum analog for Rosenblatt's
perceptron, which is the simplest learning machine. We demonstrate
that the quantum perceptron \cor{is superior} its classical
counterpart in learning capabilities. In particular, we show that
the quantum perceptron is able to learn an arbitrary (Boolean)
logical function, perform the classification on previously unseen
classes and even recognize the superpositions of learned classes --
the task of high importance in applied medical engineering.
\end{abstract}
\pacs{03.67.Ac, 87.19.ll, 87.85.E-}
\maketitle
\section{\label{sec:1} Introduction}
During last few decades, we have been witnessing unification of
quantum physics and classical information science that resulted in
constitution of new disciplines -- quantum information and quantum
computation \cite{Nielsen:00,Georgescu:13}. While processing of
information, which is encoded in systems exhibiting quantum
properties suggests, for example, unconditionally secure quantum
communication \cite{Gisin:02} and superdense coding
\cite{Vedral:02}, computers that operate according to the laws of
quantum mechanics offer efficient solving of problems that are
intractable on conventional computers \cite{Childs:10}. Having
paramount practical importance, these announced technological
benefits have indicated the main directions of the research in the
field of quantum information and quantum computation, somehow
leaving aside other potential applications of quantum physics in
information science. So far, for instance, very little attention has
been paid on possible advantages of quantum information processing
in such areas of modern information science as machine learning
\cite{Kecman:01} and artificial intelligence \cite{Russel:09}.
\cor{Using standard quantum computation formalism, it has been shown
that machine learning governed by quantum mechanics has certain
advantages over classical learning \cite{Menneer:95, Andrecut:02,
Kouda:05, Zhou:06, Zhou:07, Manzano:09}. These advantages, however,
are strongly coupled with more sophisticated optimization procedure
than in the classical case, and thus require an efficiently working
quantum computer \cite{Ladd:10} to handle the optimization. This
paper, in contrast, presents a new approach to machine learning,
which, in the simplest case, does not require any optimization at
all.}
Our focus is on perceptron, which is the simplest learning machine.
Perceptron is a model of neuron that was originally introduced by
Rosenblatt \cite{Rosenblat:57} to perform visual perception tasks,
which, in mathematical terms, result in solution of the
\textit{linear classification problem}. There are two essential
stages of the perceptron functioning: supervised learning session
and new data classification. During the first stage, the perceptron
is given a labeled set of examples. Its task is of inferring weights
of a linear function according to some error-correcting rule.
Subsequently, this function is utilized for classification of new
previously unseen data.
In spite of its very simple internal structure and learning rule,
the perceptron's capabilities are seriously limited
\cite{Minsky:69}. Perceptron can not provide the classification, if
there is an overlap in the data or if the data can not be linearly
separated. It is also incapable of learning complex logical
functions, such as XOR function. Moreover, by its design, the
perceptron can distinguish only between previously seen classes and,
therefore, can not resolve the situation when the input belongs to
none of the learned classes, or represents a superposition of seen
classes.
In this paper we show that all the mentioned problems can be, in
principle, overcome by \cor{a quantum analog for perceptron}. There
are also two operational stages for the quantum perceptron. During
the learning stage all the data are formally represented through
quantum states of physical systems. This representation allows
expanding the data space to a physical Hilbert space. It is
important to note, that there is no need to involve real physical
systems during this stage. Thus, the learning is essentially a
classical procedure. The subject of the learning is a set of
positive operator valued measurements (POVM) \cite{Nielsen:00}. The
set is constructed by making superpositions of the training data in
a way that each operator is responsible for detection of one
particular class. This procedure is linear and does not require
solving equations or optimizing parameters. When the learning is
over, there are two possibilities to achieve the required
classification of new data. First, new data are encoded into the
states of real quantum systems, which are measured by detectors
adjusted in accordance with the learned POVM. Second, new data may
be formally encoded into the states of quantum systems and processed
with the POVM. Both mentioned ways allow to achieve the
classification.
This paper is organized as follows. In the next section, we first
overview the classical perceptron and discuss the origin of the
restrictions on its learning capabilities. After this, in
Section~\ref{sec:2b}, we introduce the quantum perceptron and show
its properties. We demonstrate, in Section~\ref{sec:3}, three
examples of how the quantum perceptron \cor{is superior} its
classical counterpart in learning capabilities: complex logical
function learning, classification of new data on previously unseen
classes and recognition of superpositions of classes. We conclude in
Section~\ref{sec:4}.
| 1,508 | 10,490 |
en
|
train
|
0.159.1
|
\section{\label{sec:2} Basic Constructions}
\subsection{\label{sec:2a} Rosenblatt's Perceptron}
Operational structure of the classical perceptron is simple. Given
an input vector $\textbf{x}$ (which is usually called a feature
vector) consisting of $n$ features, perceptron computes a weighted
sum of its components $f(\textbf{x}) = \sum_i a_i x_i$, where
weights $a_i$ have been previously learned. The output from a
perceptron is given by $o = {\rm sign} (f(\textbf{x}))$, where ${\rm
sign(...)}$ is the Heaviside function
\begin{equation}
\label{sign}
{\rm sign} (y) = \{
\begin{array}{cc}
+1 & y>0 \\
-1 & y \leq 0 \\
\end{array} \, .
\end{equation}
Depending on the binary output signal $o \in \{ +1,-1\}$, the input
feature vector $\textbf{x}$ is classified between two feature
classes, one of which is associated with output $o=+1$ and the other
with output $o=-1$.
As we have mentioned above, the perceptron needs to be trained
before its autonomous operation. During the training, a set of P
training data pairs $\{ \textbf{x}_i, d_i, i=1,...,P \}$ is given,
where $\textbf{x}_i$ are the $n$-dimensional feature vectors and
$d_i$ are desired binary outputs. Typically, at the beginning of the
learning procedure the initial weights $a_i$ of the linear function
are generated randomly. When a data pair is chosen from the training
set, the output $o_i = {\rm sign} (f(\textbf{x}_i))$ is computed
from the input feature vector $\textbf{x}_i$ and is compared to the
desired output $d_i$. If the actual and the desired outputs match
$o_i = d_i$, the weights $a_i$ are left without change and the next
pair from the data set is taken for the analysis. If $o_i \ne d_i$,
the weights $a_i$ of the linear function are to be changed according
to the error-correcting rule $\textbf{a}^\prime = \textbf{a} +
\epsilon \textbf{a} = \textbf{a} + (d_i - o_i) \textbf{x}_i$, which
is applied hereafter and until the condition $o_i = d_i$ is met.
The training procedure has clear geometric interpretation. The
weights $a_i$ of the linear function define a $\left( n-1
\right)$-dimensional hyperplane in the $n$-dimensional feature
space. The training procedure results in a hyperplane that divides
the feature space on two subspaces, so that each feature class
occupies one of the subspaces. Due to this interpretation, the
origin of the restrictions on learning capabilities of the classical
perceptron becomes visible: a hyperplane that separates the two
classes may not exist. The simplest example of two classes that can
not be linearly separated is XOR logical function of two variables,
which is given by the truth table
\begin{equation}
\label{XOR}
\begin{array}{ccccc}
x_1 & \; 0 & \; 0 & \; 1 & \; 1 \\
x_2 & \; 0 & \; 1 & \; 0 & \; 1 \\
f & \; 0 & \; 1 & \; 1 & \; 0 \\
o & -1 & +1 & +1 & -1 \\
\end{array} \, .
\end{equation}
A schematic representation of this function in the two-dimensional
feature space is shown in Fig.~\ref{fig-2}.
\begin{figure}
\caption{The feature space of XOR function is two-dimensional and
discrete (each feature takes only values 0 and 1). There is no line
(a one-dimensional hyperplane) that separates black and grey points.
Classical perceptron is incapable of classifying the input feature
vectors and, therefore, can not learn XOR function.}
\label{fig-2}
\end{figure}
There are, however, limitations on the learning capabilities of the
perceptron even in the case when the separating hyperplane exists.
As we mentioned above the hyperplane divides the feature space on
two subspaces, in spite of the fact that the feature classes occupy
two particular hypervolumes. This enforces the classification on the
two learned classes even so the given feature is essentially
different from the classes, i.e. form a new class.
It is very important to note that certain tasks undoable by
Rosenblatt's perceptron, such as complex logical functions learning
and classifying data with an overlap, can be performed in the
framework of more sophisticated classical learning models, for
example, by support vector machines \cite{Kecman:01}. However, these
classical implementations always demand \textit{nonlinear}
optimization, which complicates rapidly with growth of the feature
space. This effect is known as the curse of dimensionality of the
classical learning models \cite{Kecman:01}. In the next section, we
present a new model for the learning machine, which, however, is
\textit{linear}, but \cor{is superior} Rosenblatt's perceptron in
its learning capabilities.
\subsection{\label{sec:2b} \cor{A quantum analog for Rosenblatt's perceptron}}
As its classical counterpart, quantum perceptron is to be trained to
perform the classification task. Suppose, we are given a set of $K$
training data pairs consisting of feature vectors $\{\textbf{x}_k,
d_k, k=1,...,K \}$ with the desired binary outputs $d \in \{ +1,-1
\}$; and each feature vector consists of $n$ features $\textbf{x} =
\{ x_1, x_2, ..., x_n \}$. \cor{Let us suppose that each feature is
restricted in a certain interval, so that all features can be
normalized to the unit interval $x_k^\prime \in \left[ 0,1\right]$
for $k=1,...,n$. This allows us to represent the input feature
vectors through the states of a (discrete) $2^n$-dimensional quantum
system, so that $\ket{\textbf{x}} =
\ket{x_1^\prime,x_2^\prime,...,x_n^\prime}$. With this quantum
representation we have extended the classical $n$-dimensional
feature space to $2^n$-dimensional Hilbert space of the quantum
system. We shall drop "primes" hereafter assuming that the features
are normalized.}
\cor{Let us construct a projection operator
$\ket{\textbf{x}}\bra{\textbf{x}}$ for each given feature vector
$\ket{\textbf{x}}$. With the help of these projectors, let us define
two operators
\begin{eqnarray}
\label{operators-def}
\nonumber
P_{-1} & = & \frac{1}{N_{-1}} \sum_{d=-1}
\ket{\textbf{x}}\bra{\textbf{x}} \, ,
\\[0.1cm]
P_{+1} & = & \frac{1}{N_{+1}} \sum_{d=+1}
\ket{\textbf{x}}\bra{\textbf{x}} \, ,
\end{eqnarray}
where $N_{-1}$ and $N_{-1}$ are normalization factors. All feature
vectors that correspond to the output $d=-1$ are summed in the
operator $P_{-1}$, while all feature vectors corresponding $d=+1$
are collected in $P_{+1}$.} The construction of these operators
concludes the learning procedure.
There are only four possibilities of how the operators $P_{-1}$ and
$P_{+1}$ may be related:
\textit{A.} Operators $P_{-1}$ and $P_{+1}$ are orthogonal $P_{-1}
P_{+1} =0$ and form a complete set $P_{-1} + P_{+1} = I$, where $I$
is the identity operator. This means that there was no overlap
between the training data, and the two classes $P_{-1}$ and $P_{+1}$
occupy the whole feature space. As the result any input feature
vector can be classified between the two classes with no mistake.
This situation can be simulated in principle by the classical
perceptron.
\textit{B.} Operators $P_{-1}$ and $P_{+1}$ are orthogonal $P_{-1}
P_{+1} =0$, but do not form a complete set $P_{-1} + P_{+1} \ne I$.
This is an extremely interesting case. The third operator must be
defined as $P_{0} = I - P_{-1} - P_{+1}$ to fulfill the POVM
competence requirement. The operator $P_{0}$ is, moreover,
orthogonal to $P_{-1}$ and $P_{+1}$, because $P_{-1} P_{+1} =0$.
When operating autonomously, the quantum perceptron generates three
outputs $d \in \{ +1, 0, -1 \}$, namely that the feature vector
belongs to the one of the previously seen classes $d \in \{ +1,-1
\}$ or it is essentially different from the learned classes $d = 0$
-- it belongs to a new previously unseen class. The classification
on previously unseen classes is an extremely hard learning problem,
which can not be done by classical perceptron neither by the most of
the classical perceptron networks \cite{Kecman:01}. Quantum
perceptron is capable of performing this task. Moreover, there will
be no mistake in the classification between the three classes
because of the orthogonality of the operators $P_{-1}, P_{+1}$ and
$P_0$.
\textit{C.} Operators $P_{-1}$ and $P_{+1}$ are not orthogonal
$P_{-1} P_{+1} \ne 0$, but form a complete set $P_{-1} + P_{+1} =
I$. In this case all the input data can be classified between the
two classes with some nonzero probability of mistake. This is the
case of probabilistic classification, which can not be done by the
classical perceptron, although can be performed by more
sophisticated classical learning models.
\textit{D.} The most general case is when operators $P_{-1}$ and
$P_{+1}$ are not orthogonal $P_{-1} P_{+1} \ne 0$ and do not form a
complete set $P_{-1} + P_{+1} \ne I$. One again defines the third
operator $P_0 = I - P_{-1} - P_{+1}$, which this time is not
orthogonal to $P_{-1}$ and $P_{+1}$. In this situation, quantum
perceptron classifies all the input feature vectors on three
classes, one of which is a new class, with some nonzero probability
of mistake. This situation can not be simulated by the classical
perceptron.
The quantum perceptron learning rule may have the following
geometric interpretation. In contrast to the classical perceptron,
which constructs a hyperplane separating the feature space on two
subspaces, quantum perceptron constructs two (hyper-)volumes in the
physical Hilbert space. These volumes are defined by the POVM
operators (\ref{operators-def}). During the autonomous functioning,
the POVM operators project the given feature vector $\ket{\psi}$ to
one of the volumes (or to the space unoccupied by them) allowing us
to perform the desired classification. For example, if $\bra{\psi}
P_{-1} \ket{\psi} \neq 0$, while $\bra{\psi} P_{+1} \ket{\psi} = 0$
and $\bra{\psi} P_{0} \ket{\psi} = 0$, the feature vector
$\ket{\psi}$ belongs to the class $d= -1$, and the probability of
misclassification equals zero. If, in contrast, $\bra{\psi} P_{-1}
\ket{\psi} \neq 0$, $\bra{\psi} P_{+1} \ket{\psi} \neq 0$ and
$\bra{\psi} P_{0} \ket{\psi} = 0$, the feature vector belongs to the
two classes with degrees defined by the corresponding expectation
values $\bra{\psi} P_{-1} \ket{\psi}$ and $\bra{\psi} P_{+1}
\ket{\psi}$. In the latter situation, one may perform a
probabilistic classification according to the expectation values.
\cor{We would like to stress that the construction of the operators
(\ref{operators-def}) is no way unique. There may be more
sophisticated ways to construct the POVM set in order to ensure a
better performance of the learning model for a classification
problem at hand. In fact, our construction is the simplest liner
model for a quantum learning machine. Only in this sense the
presented quantum perceptron is the analog for Rosenblatt's
perceptron, while their learning rules are essentially different. }
As we mentioned in the Introduction, there are two ways to achieve
the desired classification with the POVM. One may get real physical
systems involved or use the POVM operators as purely mathematical
instrument. In order of clarity, the advantages of the first of
these approaches will be discusses in Section~\ref{sec:3a} on
particular examples, while in the rest of the next section we use
the quantum perceptron as pure mathematical tool.
| 3,272 | 10,490 |
en
|
train
|
0.159.2
|
\section{\label{sec:3} Applications}
In spite of the extreme simplicity of its learning rule, quantum
perceptron may perform a number of tasks infeasible for classical
(Rosenblatt) perceptron. In this section we give three examples of
such tasks. We start with logical function learning. Historically,
the fact that classical perceptron can not learn an arbitrary
logical function was the main limitation on the learning
capabilities of this linear model \cite{Minsky:69}. We show that
quantum perceptron, in contrast, is able of learning an arbitrary
logical function irrespective of its kind and order. In
Section~\ref{sec:3b}, we show that quantum perceptron can, in
certain cases, perform the classification without previous training,
the so-called unsupervised learning task. Classical perceptron, in
contrast, can not perform this task by construction. Finally, in
Section~\ref{sec:3c} we show that quantum perceptron may recognize
superpositions of previously learned classes. This task is of
particular interests in applied medical engineering, where
simultaneous and proportional myoelectric control of artificial limb
is a long desired goal \cite{Jiang:12}.
\subsection{\label{sec:3a} Logical Function Learning}
Let us consider a particular example of logical function -- XOR,
which is given by the truth table (\ref{XOR}). During the learning
session, we are given a set of four training data pairs
$\{\textbf{x}_i, d_i, i=1,...,4 \}$, where the feature vector
consists of two features $\textbf{x} \in \{ x_1, x_2 \}$, and the
desired output $d \in \{ +1,-1 \}$ is a binary function. Let us
represent the input features through the states of a two-dimensional
quantum system -- qubit, so that each feature is given by one of the
basis states $\ket{x_i} \in \{ \ket{0}, \ket{1} \}$ for $i=1,2$,
where $\{ \ket{0}, \ket{1} \}$ denotes the computational basis
\cor{for each feature}. In the above representation, the feature
vector $\textbf{x}$ is given by one of the four two-qubit states
$\ket{x_1, x_2}$. Following the procedure, which is described in
Section~\ref{sec:2b}, the POVM operators are constructed as
\begin{eqnarray}
\label{pure-operators}
\nonumber
P_{-1} & = & \ket{0,0} \bra{0,0} + \ket{1,1} \bra{1,1} \, ,
\\[0.1cm]
P_{+1} & = & \ket{0,1} \bra{0,1} + \ket{1,0} \bra{1,0} \, .
\end{eqnarray}
During its autonomous operation, quantum perceptron may be given
four basis states $\ket{x_1, x_2} \in \{ \ket{0,0}, \ket{0,1},
\ket{1,0}, \ket{1,1} \}$ as inputs. Since $\bra{x_1, x_2} P_{-1}
\ket{x_1, x_2} \neq 0$ only for $\ket{x_1, x_2} \in \{ \ket{0,0},
\ket{1,1} \}$, these states are classified to $d= -1$, while the
other two states $\{ \ket{0,1}, \ket{1,0}\}$ are classified to $d=
+1$. The fact that the operators $P_{-1}$ and $P_{+1}$ are
orthogonal ensures zero probability of misclassification, while the
completeness of the set of operators guarantees classification of
any input. Conclusively, the quantum perceptron has learned XOR
function.
The successful XOR function learning by quantum perceptron is the
consequence of the representation of the classical feature vector
$\textbf{x}$ through the two-qubit states. In the classical
representation, the feature vectors can not be linearly separated on
a plane, see Fig.~\ref{fig-2}. In the quantum representation, four
mutually orthogonal states $\ket{x_1, x_2} $ in the four-dimensional
Hilbert space can be separated on two classes in an arbitrary
fashion. This implies that an arbitrary logical function of two
variables can be learned by quantum perceptron. For example,
learning of logical AND function leads to the construction of
operators $P_{-1} = \ket{0,0}\bra{0,0} + \ket{0,1}\bra{0,1} +
\ket{1,0}\bra{1,0}$ and $P_{+1} = \ket{1,1} \bra{1,1}$. Moreover, an
arbitrary logical function of an arbitrary number of inputs
(arbitrary order) also can be learned by quantum perceptron, because
the number of inputs of such a function growth exponentially as
$2^n$ with the order of the function $n$ and exactly as fast as
dimensionality of the Hilbert space that is needed to represent the
logical function.
\cor{In the above discussion the need to use real quantum systems
has not emerged.} Let us now consider a situation, when one can
benefit from utilizing real quantum systems. Let us slightly modify
the problem of XOR learning. In real-life learning tasks the
training data may be corrupted by noise \cite{Kecman:01}. In some
cases, noise may lead to overlapping of the training data, which
result in misclassification of feature vectors during the training
stage and during further autonomous functioning. For example, if,
during the XOR learning, there is a finite small probability
$\delta$ that feature $x_1$ takes a wrong binary value, but the
other feature and the desired output are not affected by noise,
after a big number of trainings (which are usually required in case
of learning from noisy data), the POVM operators are given by
\begin{eqnarray}
\label{noisy-operators}
{P'}_{-1} & = & (1-\delta) \left( \ket{0,0} \bra{0,0} + \ket{1,1}
\bra{1,1} \right)
\nonumber \\[0.1cm]
& & \hspace{0.5cm} + \; \delta \left(\ket{0,1} \bra{0,1} + \ket{1,0}
\bra{1,0} \right) \, ,
\nonumber \\[0.1cm]
{P'}_{+1} & = & (1-\delta) \left( \ket{0,1} \bra{0,1} + \ket{1,0}
\bra{1,0}\right)
\nonumber \\[0.1cm]
& & \hspace{0.5cm} + \; \delta \left( \ket{0,0} \bra{0,0} +
\ket{1,1} \bra{1,1}\right) \, .
\end{eqnarray}
Operators ${P'}_{-1}$ and ${P'}_{+1}$ are not orthogonal ${P'}_{-1}
{P'}_{+1} \ne 0$ in contrast to operators (\ref{pure-operators}),
\cor{but still form a complete set.} This means that during the
autonomous operation of the quantum perceptron, the input feature
vectors can be misclassified. Nevertheless, each feature is
classified between the two classes and, on average, most of the
feature vectors are classified correctly. This means that quantum
perceptron simulates XOR function with a degree of accuracy given by
$1 - \delta$.
If we use real physical systems to encode feature vectors during
autonomous functioning of the perceptron and measure the states of
the systems with experimental setup adjusted in accordance with the
POVM (\ref{noisy-operators}), we can perform a probabilistic
classification. Moreover, we can exactly (in probabilistic sense)
reproduce fluctuations that have been observed during the training.
In certain sense such learning is too accurate and may be of use in
some cases. Anyway, classical perceptron can not do any similar
task.
\cor{It is, however, important to note that practical simulation of
quantum perceptron with real physical systems may not be always
possible. In Section \ref{sec:2b} we discussed situations when
operators $P_{-1}$ and $P_{+1}$ do not form a compete set, and
constructed the third operator $P_0 = I - P_{-1} - P_{+1}$. It is
possible in principle that the constructed operator $P_0$ is
negative, i.e. unphysical. This means that the classification
problem at hand can not be physically simulated with our linear
model, although the problem may be treated mathematically with the
quantum perceptron approach.}
\cor{In this section we have seen how quantum representation and
quantum measurements contribute to advanced learning abilities of
the quantum perceptron. Even without these features, however,
quantum perceptron is superior its classical counterpart in learning
capabilities due to specific algebraic structure of the POVM
operators. In the following sections we provide two examples, where
advanced learning relays only on the structure of the POVM set.}
\subsection{\label{sec:3b} Unsupervised Learning}
The (supervised) learning stage, has been embedded into quantum
perceptron by analogy with classical perceptron. Surprisingly,
however, that the learning rule of the quantum perceptron allows to
perform learning tasks beyond supervised learning paradigm. Suppose,
for example, that we are given an unlabeled set of feature vectors
and need to find a possible structure of this set, i.e. we need to
answer whether there are any feature classes in the set. The
following protocol allows us to resolve such an unsupervised
learning task under certain conditions.
\cor{Being given the first feature vector $\ket{\textbf{x}_1}$ from
the set, let us define two classes with the POVM operators
\begin{eqnarray}
\label{operators-unsup-learn}
\nonumber
P^{(0)}_{-1} & = & \ket{\textbf{x}_1}\bra{\textbf{x}_1} \, ,
\\[0.1cm]
P^{(0)}_{+1} & = & I - P^{(0)}_{-1} \, .
\end{eqnarray}
where $I$ is the identity operator. Here, the class $d=+1$ is
formally defined as "not $d=-1$". The next given feature vector
$\ket{\textbf{x}_2}$ is tested to belong to one of these classes. If
$\bra{\textbf{x}_2} P^{(0)}_{-1} \ket{\textbf{x}_2} >
\bra{\textbf{x}_2} P^{(0)}_{+1} \ket{\textbf{x}_2}$, the feature
vector $\ket{\textbf{x}_2}$ is close enough to $\ket{\textbf{x}_1}$
and thus belongs to class $d=-1$. In this case the POVM operators
(\ref{operators-unsup-learn}) are updated to
\begin{eqnarray}
\label{operators-unsup-learn-first-it-2}
\nonumber
P^{(1)}_{-1} & = & \ket{\textbf{x}_1}\bra{\textbf{x}_1} +
\ket{\textbf{x}_2}\bra{\textbf{x}_2}\, ,
\\[0.1cm]
P^{(1)}_{+1} & = & I - P_{-1} \, .
\end{eqnarray}
If, in contrast, $\bra{\textbf{x}_2} P^{(0)}_{-1} \ket{\textbf{x}_2}
\geq \bra{\textbf{x}_2} P^{(0)}_{+1} \ket{\textbf{x}_2}$, the
feature vector $\ket{\textbf{x}_2}$ is distant sufficiently from
$\ket{\textbf{x}_1}$ and therefore can be assigned a new class
$d=+1$. Due to the first representative of the $d=+1$ class, we may
update the formal definition of the $P^{(0)}_{+1}$ introducing a new
POVM set
\begin{eqnarray}
\label{operators-unsup-learn-first-it-2}
\nonumber
P^{(1)}_{-1} & = & \ket{\textbf{x}_1}\bra{\textbf{x}_1} \, ,
\\[0.1cm]
P^{(1)}_{+1} & = & \ket{\textbf{x}_2}\bra{\textbf{x}_2} \, .
\end{eqnarray}
This procedure is repeated iteratively until all the feature vectors
are classified between the two classes $d=-1$ and $d=+1$.}
The above protocol will work if only there are at least two feature
vectors $\ket{\textbf{x}}$ and $\ket{\textbf{y}}$ in the given
feature set such as $\bra{\textbf{x}} (I-2P) \ket{\textbf{x}} \geq
0$, where $P = \ket{\textbf{y}} \bra{\textbf{y}}$. In the opposite
case, unsupervised learning within the protocol is not possible.
Moreover, the classification crucially depends on order of examples,
because first seen feature vectors define the classes. This
situation is, however, typical for unsupervised learning models
\cite{Kecman:01}. \cor{To reduce the dependence of the
classification on the order of the feature vectors appearance, it is
possible to repeat the learning many times taking different order of
the input feature vectors, and compare the results of the
classification.} In spite of the above limitations, the unsupervised
classification can be in principle performed by the quantum
perceptron, while this task is undoable for the classical
perceptron.
| 3,336 | 10,490 |
en
|
train
|
0.159.3
|
\subsection{\label{sec:3b} Unsupervised Learning}
The (supervised) learning stage, has been embedded into quantum
perceptron by analogy with classical perceptron. Surprisingly,
however, that the learning rule of the quantum perceptron allows to
perform learning tasks beyond supervised learning paradigm. Suppose,
for example, that we are given an unlabeled set of feature vectors
and need to find a possible structure of this set, i.e. we need to
answer whether there are any feature classes in the set. The
following protocol allows us to resolve such an unsupervised
learning task under certain conditions.
\cor{Being given the first feature vector $\ket{\textbf{x}_1}$ from
the set, let us define two classes with the POVM operators
\begin{eqnarray}
\label{operators-unsup-learn}
\nonumber
P^{(0)}_{-1} & = & \ket{\textbf{x}_1}\bra{\textbf{x}_1} \, ,
\\[0.1cm]
P^{(0)}_{+1} & = & I - P^{(0)}_{-1} \, .
\end{eqnarray}
where $I$ is the identity operator. Here, the class $d=+1$ is
formally defined as "not $d=-1$". The next given feature vector
$\ket{\textbf{x}_2}$ is tested to belong to one of these classes. If
$\bra{\textbf{x}_2} P^{(0)}_{-1} \ket{\textbf{x}_2} >
\bra{\textbf{x}_2} P^{(0)}_{+1} \ket{\textbf{x}_2}$, the feature
vector $\ket{\textbf{x}_2}$ is close enough to $\ket{\textbf{x}_1}$
and thus belongs to class $d=-1$. In this case the POVM operators
(\ref{operators-unsup-learn}) are updated to
\begin{eqnarray}
\label{operators-unsup-learn-first-it-2}
\nonumber
P^{(1)}_{-1} & = & \ket{\textbf{x}_1}\bra{\textbf{x}_1} +
\ket{\textbf{x}_2}\bra{\textbf{x}_2}\, ,
\\[0.1cm]
P^{(1)}_{+1} & = & I - P_{-1} \, .
\end{eqnarray}
If, in contrast, $\bra{\textbf{x}_2} P^{(0)}_{-1} \ket{\textbf{x}_2}
\geq \bra{\textbf{x}_2} P^{(0)}_{+1} \ket{\textbf{x}_2}$, the
feature vector $\ket{\textbf{x}_2}$ is distant sufficiently from
$\ket{\textbf{x}_1}$ and therefore can be assigned a new class
$d=+1$. Due to the first representative of the $d=+1$ class, we may
update the formal definition of the $P^{(0)}_{+1}$ introducing a new
POVM set
\begin{eqnarray}
\label{operators-unsup-learn-first-it-2}
\nonumber
P^{(1)}_{-1} & = & \ket{\textbf{x}_1}\bra{\textbf{x}_1} \, ,
\\[0.1cm]
P^{(1)}_{+1} & = & \ket{\textbf{x}_2}\bra{\textbf{x}_2} \, .
\end{eqnarray}
This procedure is repeated iteratively until all the feature vectors
are classified between the two classes $d=-1$ and $d=+1$.}
The above protocol will work if only there are at least two feature
vectors $\ket{\textbf{x}}$ and $\ket{\textbf{y}}$ in the given
feature set such as $\bra{\textbf{x}} (I-2P) \ket{\textbf{x}} \geq
0$, where $P = \ket{\textbf{y}} \bra{\textbf{y}}$. In the opposite
case, unsupervised learning within the protocol is not possible.
Moreover, the classification crucially depends on order of examples,
because first seen feature vectors define the classes. This
situation is, however, typical for unsupervised learning models
\cite{Kecman:01}. \cor{To reduce the dependence of the
classification on the order of the feature vectors appearance, it is
possible to repeat the learning many times taking different order of
the input feature vectors, and compare the results of the
classification.} In spite of the above limitations, the unsupervised
classification can be in principle performed by the quantum
perceptron, while this task is undoable for the classical
perceptron.
\subsection{\label{sec:3c} Simultaneous and Proportional Myoelectric Control}
The problem of signal classification has found remarkable
applications in medical engineering. It is known that muscle
contraction in human body is governed by electrical neural signals.
These signals can be acquired by different means \cite{Parker:04},
but are typically summarized into so-called electromyogram (EMG). In
principle, processing the EMG, one may predict muscular response to
the neural signals and subsequent respond of the body. This idea is
widely used in many applications, including myoelectric-controlled
artificial limb, where the surface EMG is recorded from the remnant
muscles of the stump and used, after processing, for activating
certain prosthetic functions of the artificial limb, such as hand
open/close \cite{Jiang:12}.
Despite decades of research and development, however, none of the
commercial prostheses is using pattern classification based
controller \cite{Jiang:12}. The main limitation on successful
practical application of pattern classification for myoelectric
control is that it leads to very unnatural control scheme. While
natural movements are continuous and require activations of several
degrees of freedom (DOF) simultaneously and proportionally,
classical schemes for pattern recognition allow only sequential
control, i.e. activation of only one class that corresponds to a
particular action in one decision \cite{Jiang:12}. Simultaneous
activation of two DOFs is thus recognized as a new class of action,
but not as a combination of known actions. Moreover, all these
classes as well as their superpositions must be previously learned.
This leads to higher rehabilitation cost and more frustration of the
user, who must spend hours in a lab to learn the artificial limb
control.
Recently, we have taken quantum perceptron approach to the problem
of simultaneous and proportional myoelectric control
\cite{Siomau:13}. We considered a very simple control scheme, where
two quantum perceptrons were controlling two degrees of freedom of
the wrist prosthesis. We took EMG signals with corresponding angles
of the wrist position from an able-bodied subject who performs wrist
contractions. For the training we used only those EMG that
correspond to the activation of a single DOF. During the test, the
control scheme was given EMG activating multiple DOFs. We found that
in 45 of 55 data blocks of the actions were recognized correctly
with accuracy exceeding $73\%$, which is comparable to the accuracy
of the classical schemes for classification.
In the above example, \cor{we used a specific representation of the
feature vectors. Since the features (i.e. the neural signals) are
real and positive numbers there was no need to expand the feature
space. Moreover, in general it is not possible to scale a given
feature on the unit interval, because the neural signals observed
during the learning and autonomous functioning may differ
significantly in amplitude, and \textit{a priori} scaling may lead
to misapplication of the artificial limb. Therefore, the amplitude
of a signal was normalized over amplitudes from all the channels to
ensure proportional control of the prosthesis. In fact, the specific
structure of the POVM set was the only feature of the quantum
perceptron that we used. With this feature alone we were able to
recognize 4 original classes observed during the training and 4 new
(previously unseen) classes that correspond to simultaneous
activation of two DOF.} In general, within the above control scheme,
$n$ quantum perceptrons are able to recognize $2n$ original classes
with $(2n)!/[2(2n-2)!]-n$ additional two-class superpositions of
these classes. In contrast, $n$ classical perceptrons may recognize
only $2n$ classes, which were seen during the learning. \cor{The
advantage of the quantum perceptron over the classical perceptron
can be understood from the geometric interpretation discussed in
Section~\ref{sec:2b}. While $n$ classical perceptrons construct $n$
hyperplanes in the feature space, which separate the feature space
on $2n$ non-overlapping classes, $n$ quantum perceptrons build $n$
hypervolumes, which may not fill the whole feature space and may
overlap.}
| 2,107 | 10,490 |
en
|
train
|
0.159.4
|
\section{\label{sec:4} Conclusion}
Bridging between quantum information science and machine learning
theory, we showed that the capabilities of an autonomous learning
automata can be dramatically increased using the quantum information
formalism. We have constructed the simplest linear quantum model for
learning machine, which, however, \cor{is superior} its classical
counterpart in learning capabilities. \cor{Due to the quantum
representation of the feature vectors, the probabilistic nature of
quantum measurements and the specific structure of the POVM set, the
quantum perceptron is capable of learning an arbitrary logical
function, performing probabilistic classification, recognizing
superpositions of previously seen classes and even classifying on
previously unseen classes. Since all classical learning models track
back to Rosenblatt's perceptron, we hope that the linear quantum
perceptron will serve as a basis for future development of
practically powerful quantum learning models, and especially in the
domain of nonlinear classification problems.}
\begin{acknowledgments}
This project is supported by KACST.
\end{acknowledgments}
\end{document}
| 267 | 10,490 |
en
|
train
|
0.160.0
|
\begin{document}
\date{}
\title{Stochastic HJB Equations\ and Regular Singular Points}
\begin{abstract}
In this paper we show that some HJB equations arising from both finite and infinite horizon stochastic optimal control problems
have a regular singular point at the origin. This makes them amenable to solution by power series techniques. This extends the
work of Al'brecht who showed that the HJB equations of an infinite horizon deterministic optimal control problem can have a regular singular point
at the origin, Al'brekht solved the HJB equations by power series, degree by degree. In particular, we show that the infinite horizon stochastic optimal control problem
with linear dynamics, quadratic cost and bilinear noise leads to a new type of algebraic Riccati equation which we call the Stochastic
Algebraic Riccati Equation (SARE). If SARE can be solved then one has a complete solution to this infinite horizon stochastic optimal control problem.
We also show that a finite horizon stochastic optimal control problem with linear dynamics, quadratic cost and bilinear noise leads to a Stochastic Differential
Riccati Equation (SDRE) that is well known. If these problems are the linear-quadratic-bilinear part of a nonlinear finite horizon stochastic optimal control problem
then we show how the higher degree terms of the solutions can be computed degree by degree. To our knowledge this computation is new.
\end{abstract}
\section{ Linear Quadratic Regulator with Bilinear Noise}
\setcounter{equation}{0}
Consider an infinite horizon, discounted, stochastic Linear Quadratic Regulator with Bilinear Noise (LQGB),
\begin{eqnarray}n
\min_{u(\cdot)} {1\over 2} {\rm E}\int_0^\infty e^{-\alpha t}\left(x'Qx+2x'Su+u'Ru\right) \ d t
\end{array}\end{equation}an
subject to
\begin{eqnarray}n
dx&=& (Fx+Gu)\ dt+ \sum_{k=1}^r (C_{k} x+D_k u )\ dw_k\\
x(0)&=&x^0
\end{array}\end{equation}an
In a previous version of this paper we studied the case with $D_k=0$ \cite{Kr18}.
The state $x$ is $n$ dimensional, the control $u$ is $m$ dimensional and $w(t)$ is standard $r$ dimensional Brownian motion.
The matrices are sized accordingly, in particular $C_{k}$ is an $n\times n$ matrix and $D_k$ is an $n \times m$ matrix for each $k=1,\ldots,r$. The discount factor is $\alpha\ge 0$.
To the best of our knowledge such problems have not been considered before. The finite horizon version of this problem can be found in Chapter 6 of the excellent treatise by Yong and Zhou \cite{YZ99}. We will also treat finite horizon problems in Section \ref{FH} but not in the same generality as Yong and Zhou. Throughout this note we will require that the coefficient of the noise is $O(x,u)$. Yong and Zhou allow the coefficient to be $O(1)$ in their linear-quadratic problems. The reason why we require $O(x,u)$ is that then the associated stochastic Hamilton-Jacobi-Bellman equations for nonlinear extensions of
LQGB have regular singular points at the origin. Hence they are amenable to solution by power series techniques. If the noise is $O(1)$ these power series techniques have closure problems,
the equations for lower degree terms depend on higher degree terms. If the coefficients of the noise is $O(x,u)$ then the equations can be solved degree by degree.
A first order partial differential equation with independent variable $x$ has a regular singular point at $x=0$ if the coefficients the first order partial derivatives are $O(x)$. A second order partial differential equation has a regular singular point at $x=0$ if the coefficients the first order partial derivatives are $O(x)$ and the coefficients the second order partial derivatives are $O(x)^2$. For more on regular singular points we refer the reader to \cite{BD09}.
If we can find a smooth scalar valued function $\pi(x)$ and a smooth $m$ vector valued $\kappa(x)$ satisfying the discounted stochastic Hamilton-Jacobi-Bellman equations (SHJB)
\begin{eqnarray}\label{shjb1}
0&=& \mbox{min}_u \left\{ -\alpha \pi(x) +\frac{\partial \pi}{\partial x}(x) (Fx+Gu)+{1\over 2} \left( x'Qx+2x'Su+u'Ru\right)\right.
\nonumber
\\
&&\left. +{1\over 2}\sum_{k=1}^r (x'C'_k+u'D'_k)
\frac{\partial^2 \pi}{\partial x^2}(x) (C_kx+D_ku) \right\}
\\
\nonumber
\kappa(x)&=& \mbox{argmin}_u \left\{ \frac{\partial \pi}{\partial x}(x) (Fx+Gu)+{1\over 2} \left( x'Qx+2x'Su+u'Ru\right)\right.
\\ &&\left.+{1\over 2}\sum_{k=1}^r (x'C'_k+u'D'_k)
\frac{\partial^2 \pi}{\partial x^2}(x) (C_kx+D_ku)\right\}
\label{shjb2}
\end{array}\end{equation}a
then by a standard verification argument \cite{FR75} one can show that
$\pi(x^0)$ is the optimal cost of starting at $x^0$ and $u(0)=\kappa(x^0)$ is the optimal control at $x^0$.
We make the standard assumptions of deterministic LQR,
\begin{itemize}
\item The matrix
\begin{eqnarray}n
\bmt Q&S\\S'&R\end{array}\right]
\end{array}\end{equation}an
is nonnegative definite.
\item The matrix $R$
is positive definite.
\item The pair $F$, $G$ is stabilizable.
\item The pair $Q^{1/2}$, $F$ is detectable.
\end{itemize}
Because of the linear dynamics and quadratic cost,
we expect that $\pi(x) $ is a quadratic function of $x$ and $\kappa(x)$ is a linear function of $x$,
\begin{eqnarray}n
\pi(x)&=& {1\over 2}x'Px\\
\kappa(x)&=& Kx
\end{array}\end{equation}an
Then the stochastic Hamilton-Jacobi-Bellman equations (\ref{shjb1}, \ref{shjb2}) simplify to
\begin{eqnarray}
0&=&-\alpha P +PF+F'P +Q -K'RK\nonumber\\
&&+\sum_{k=1}^r \left(C'_k+K'D'_k\right)P\left(C_k+D_kK\right) \label{sare}
\\
K&=&-\left(R+\sum_{k=1}^rD'_kPD_k\right)^{-1}\left(G'P+S'\right) \label{K}
\end{array}\end{equation}a
We call these equations (\ref{sare}, \ref{K}) the Stochastic Algebraic Riccati Equations (SARE).
They reduce to the deterministic Algebraic Riccati Equations (ARE) if $C_k=0$ and $D_k=0$.
Here is an iterative method for solving SARE. Let $P_{(0)}$ be the solution of the first deterministic ARE
\begin{eqnarray}n
0&=& -\alpha P_{(0)}+ P_{(0)}F+F'P_{(0)}+Q-(P_{(0)}G+S)R^{-1}(G'P_{(0)}+S')
\end{array}\end{equation}an
and $K_{(0)}$ be solution of the second deterministic ARE
\begin{eqnarray}n
K_{(0)}&=&-R^{-1}(G'P+S')
\end{array}\end{equation}an
Given $P_{(\tau-1)}$ define
\begin{eqnarray}n
Q_{(\tau)}&=& Q+\sum_{k=1}^r C'_k P_{(\tau-1)}C_k\\
R_{(\tau)}&=& R+\sum_{k=1}^r D'_k P_{(\tau-1)}D_k\\
S_{(\tau)}&=& S+\sum_{k=1}^r C'_k P_{(\tau-1)}D_k
\end{array}\end{equation}an
Let
$P_{(\tau)}$ be the solution of
\begin{eqnarray}n
0&=& -\alpha P_{(\tau)} +P_{(\tau)}F+F'P_{(\tau)}+Q_{(\tau)}-(P_{(\tau)}G+S_{(\tau)})R_{(\tau)}^{-1}(G'P_{(\tau)}+S'_{(\tau)})
\end{array}\end{equation}an
and
\begin{eqnarray}n
K_{(\tau)}&=&-R_{(\tau)}^{-1}\left(G'P_{(\tau)}+S_{(\tau)}'\right)
\end{array}\end{equation}an
If the iteration on $P_{(\tau)}$ nearly converges, that is, for some $\tau$, $P_{(\tau)}\approx P_{(\tau-1)}$ then $P_{(\tau)}$ and $ K_{(\tau)}$ are approximate solutions to SARE
The solution $P$ of the deterministic ARE is the kernel of the optimal cost of a deterministic LQR and since
\begin{eqnarray}n
\bmt Q& S\\ S'& R\end{array}\right] \le \bmt Q_{(\tau-1)}& S_{(\tau-1)}\\S'_{(\tau-1)}& R_{(\tau-1)}\end{array}\right] \le \bmt Q_{(\tau)}& S_{(\tau)}\\S'_{(\tau)}& R_{(\tau)}\end{array}\right]
\end{array}\end{equation}an
it follows that $P_{(0)}\le P_{(\tau-1)} \le P_{(\tau)} $, the iteration is monotonically increasing.
We have found computationally that if matrices $C_k$ and $D_k$ are not too big then the iteration conveges. But if the $C_k$ and $D_k$ are about the same size as $F$ and $G$ or larger the iteration can diverge. Further study of this issue is needed. The iteration does converge in the following simple example.
\section{LQGB Example}
\setcounter{equation}{0}
Here is a simple example with $n=2,m=1,r=2$.
\begin{eqnarray}n
\min_u {1\over 2}\int_0^\infty \|x\|^2+u^2\ dt
\end{array}\end{equation}an
subject to
\begin{eqnarray}n
dx_1&=& x_2\ dt+0.1 x_1 \ dw_1\\
dx_2&=&u\ dt+0.1 (x_2 +u)\ dw_2
\end{array}\end{equation}an
In other words
\begin{eqnarray}n
Q=\bmt 1&0\\0&1\end{array}\right],& S=\bmt 0\\1\end{array}\right], &R=1\\
F= \bmt 0&1\\0&0\end{array}\right],& G=\bmt 0\\1\end{array}\right]& \\
C_1=\bmt 0.1&0\\0&0\end{array}\right], &C_2=\bmt 0&0\\0&0.1\end{array}\right]&\\
D_1=\bmt 0\\0\end{array}\right], &D_2=\bmt 0\\0.1\end{array}\right]&
\end{array}\end{equation}an
The solution of the noiseless ARE is
\begin{eqnarray}n
P&=& \bmt 1.7321 & 1.000\\1.000&1.7321\end{array}\right]
\\
K&=&-\bmt 1.0000& 1.7321\end{array}\right]
\end{array}\end{equation}an
The eigenvalues of the noiseless closed loop matrix $F+GK$ are $-0.8660\pm0.5000i $.
The above iteration converges to the solution of the noisy SARE in eight iterations, the solution is
\begin{eqnarray}n
P&=& \bmt 1.7625 & 1.0176\\1.0176&1.7524\end{array}\right]\\
\\
K&=&-\bmt 1.0176&1.7524\end{array}\right]
\end{array}\end{equation}an
The eigenvalues of the noisy closed loop matrix $F+GK$ are $-0.8762\pm 0.4999i$.
As expected the noisy system is more difficult to control than the noiseless system. It should be noted that the above iteration diverged to infinity
when the noise coefficients were increased from $0.1$ to $1$.
| 3,349 | 11,580 |
en
|
train
|
0.160.1
|
\section{Nonlinear Infinite Horizon HJB}
Suppose the problem is not linear-quadratic, the dynamics is given by an Ito equation
\begin{eqnarray}n
dx&=& f(x,u) \ dt +\sum_{k=1}^r\gamma_k(x,u) \ dw_k
\end{array}\end{equation}an
and the criterion to be minimized is
\begin{eqnarray}n
\min_{u(\cdot)} {\rm E}\int_0^\infty e^{-\alpha t}l(x,u)\ d t
\end{array}\end{equation}an
We assume that $f(x,u), \gamma_k(x,u), l(x,u) $ are smooth functions that have Taylor polynomial expansions
around $x=0,u=0$,
\begin{eqnarray}n
f(x,u)&=& Fx+Gu+f^{[2]}(x,u)+\ldots+f^{[d]}(x,u)+O(x,u)^{d+1}\\
\gamma_k(x,u)&=& C_kx+D_ku+\gamma_k^{[2]}(x,u)+\ldots+\gamma_k^{[d]}(x,u)+O(x)^{d+1}\\
l(x,u)&=&{1\over 2}\left(x'Qx+2x'Su+u'Ru\right) +l^{[3]}(x,u)+\ldots+l^{[d+1]}(x,u)+O(x,u)^{d+2}
\end{array}\end{equation}an
where $^{[d]}$ indicates the homogeneous polynomial terms of degree $d$.
Then the the discounted stochastic Hamilton-Jacobi-Bellman equations become
\begin{eqnarray}
0&=& \mbox{min}_u \left\{ -\alpha \pi(x) +\frac{\partial \pi}{\partial x}(x) f(x,u)+l(x,u)\right. \label{shjb3}
\nonumber
\\
&&\left. +{1\over 2}\sum_{k=1}^r \gamma'_k(x,u)
\frac{\partial^2 \pi}{\partial x^2}(x) \gamma_k(x,u)\right\}
\\
\kappa(x)&=& \mbox{argmin}_u \left\{ -\alpha \pi(x) +\frac{\partial \pi}{\partial x}(x) f(x,u)+l(x,u)\right. \nonumber
\\ \label{shjb4}
&&\left. +{1\over 2}\sum_{k=1}^r \gamma'_k(x,u)
\frac{\partial^2 \pi}{\partial x^2}(x) \gamma_k(x,u)\right\}
\end{array}\end{equation}a
If the control enters the dynamics affinely,
\begin{eqnarray}n
f(x,u)&=& f^0(x) +f^u(x)u\\
\gamma_k(x,u)&=&\gamma^0_k(x)+\gamma^u_k(x)u
\end{array}\end{equation}an
and $l(x,u)$ is always strictly convex in $u$ for every $x$ then the quantity to be minimized in (\ref{shjb3}) is strictly convex in $u$.
If we assume that (\ref{shjb3})
is strictly convex in $u$ then the HJB equations (\ref{shjb3}, \ref{shjb4}) simplify to
\begin{eqnarray}
0&=&-\alpha \pi(x) +\frac{\partial \pi}{\partial x}(x) f(x,\kappa(x))+l(x,\kappa(x))\label{shjb5} \\
&& \nonumber+{1\over 2}\sum_{k=1}^r \gamma'_k(x,\kappa(x))
\frac{\partial^2 \pi}{\partial x^2}(x) \gamma_k(x,\kappa(x))
\\
0&=&\frac{\partial \pi}{\partial x}(x) \frac{\partial f}{\partial u}(x,\kappa(x))+\frac{\partial l}{\partial u}(x,\kappa(x))
\label{shjb6}
\\&& +\sum_{k=1}^r \gamma'_k(x,\kappa(x))
\frac{\partial^2 \pi}{\partial x^2}(x) \frac{\partial \gamma_k}{\partial u}(x,\kappa(x)) \nonumber
\end{array}\end{equation}a
Because $f(x,u)=O(x,u)$ and $\gamma_k(x,u)=O(x,u)$, (\ref{shjb5}) has a regular singular point at $x=0,u=0$ and so is
amenable to power series solution techniques. If $\gamma_k(x,u)=O(1)$ then there is persistent noise
that must be overcome by persistent control action. Presumably then the optimal cost is infinite.
Following Al'brekht \cite{Al61} we assume that the optimal cost $\pi(x)$ and the optimal
feedback have Taylor polynomial expansions
\begin{eqnarray}n
\pi(x)&=& {1\over 2}x'Px +\pi^{[3]}(x)+\ldots+\pi^{[d+1]}(x)+O(x)^{d+2}\\
\kappa(x)&=&Kx+\kappa^{[2]}(x)+\ldots+\kappa^{[d]}(x)+O(x)^{d+1}
\end{array}\end{equation}an
We plug all these expansions into the simplified SHJB equations (\ref{shjb5}, \ref{shjb6}).
At lowest degrees, degree two in (\ref{shjb5}) and degree one in (\ref{shjb6}) we get the familiar SARE
(\ref{sare}, за\ref{K}).
If (\ref{sare}, \ref{K}) are solvable
then we may proceed to the next degrees, degree three in (\ref{shjb5}) and degree two in (\ref{shjb6}).
\begin{eqnarray}
0&=&\frac{\partial \pi^{[3]}}{\partial x}(x) (F+GK)x+x'Pf^{[2]}(x,Kx)+l^{[3]}(x,Kx) \label{shjb7}\\&&
+{1\over 2} \sum_k x'(C'_k +K'D'_k) \frac{\partial^2 \pi^{[3]}}{\partial x^2}(x) (C_k+D_kK) x \nonumber
\\&&+\sum_k x'(C'_k +K'D_k)P\gamma_k^{[2]}(x,Kx)\nonumber \\ \nonumber
\\
0&=& \frac{\partial \pi^{[3]}}{\partial x}(x) G +x'P\frac{\partial f^{[2]}}{\partial u}(x,Kx)+\frac{\partial l^{[3]}}{\partial u}(x,Kx)
\label{shjb8}
\\&&+\sum_k x'(C_k+D_kK)'\left(P\frac{\partial \gamma^{[2]}_k}{\partial u}(x,Kx)+ \frac{\partial^2 \pi^{[3]}}{\partial x^2}(x) D_k\right) \nonumber
\\&& +\sum_k \gamma^{[2]}_k(x,Kx)PD_k \nonumber
+(\kappa^{[2]}(x))'\left(R+\sum_k D'_kPD_k\right) \nonumber
\end{array}\end{equation}a
Notice the first equation (\ref{shjb7}) is a square linear equation for the unknown $\pi^{[3]}(x)$,
the other unknown $\kappa^{[2]}(x)$ does not appear in it.
If we can solve the first equation (\ref{shjb7}) for $\pi^{[3]}(x)$
then we can solve the second equation (\ref{shjb8}) for $\kappa^{[2]}(x)$ because of the standard assumption that $R$ is invertible so $R+\sum_k D_kPD_k$
must also be invertible.
In the deterministic case the eigenvalues of the linear operator
\begin{eqnarray} \label{dop}
\pi^{[3]}(x) &\mapsto& \frac{\partial \pi^{[3]}}{\partial x}(x) (F+GK)x
\end{array}\end{equation}a
are the sums of three eigenvalues of $F+GK$. Under the standard LQR assumptions all the eigenvalues of $F+GK$ are in the open left half plane so any
sum of three eigenvalues of $F+GK$ is different from zero and the operator (\ref{dop}) is invertible.
In the stochastic case the relevant linear operator is a sum of two operators
\begin{eqnarray} \label{sop}
\pi^{[3]}(x) &\mapsto& \frac{\partial \pi^{[3]}}{\partial x}(x) (F+GK)x \\&&
+{1\over 2} \sum_k x'(C'_k +K'D'_k) \frac{\partial^2 \pi^{[3]}}{\partial x^2}(x) (C_k+D_kK) x \nonumber
\end{array}\end{equation}a
Consider a simple version of the second operator, for some $C$,
\begin{eqnarray} \label{sim} \pi^{[3]}(x) &\mapsto&{1\over 2} x'C'\frac{\partial^2 \pi^{[3]}}{\partial x^2}(x)Cx
\end{array}\end{equation}a
Suppose $C$ has a complete set of left eigenpairs, $\lambda_i\in \Compl,\ w^i\in \Compl^{1\times n}$ for $i=1,\ldots,n$,
\begin{eqnarray}n
w^i C&=& \lambda_i w^i
\end{array}\end{equation}an
Then the eigenvalues of (\ref{sim}) are of the form
$
\lambda_{i_1}\lambda_{i_2}+\lambda_{i_2}\lambda_{i_3}+ \lambda_{i_3}\lambda_{i_1}
$
and the corresponding eigenvectors are
$(w^{i_1}x)(w^{i_2}x)(w^{i_3}x)$
for for $ 1\le i_1\le i_2\le i_3$.
But this analysis does not completely clarify whether the operator (\ref{sop}) is invertible. Here is one case where it is known to be invertible.
Consider the space of cubic polynomials $\pi(x)$. We can norm this space using the standard $L_2$ norm on the vector of coefficients of $\pi(x)$ which we denote by $\|\pi(x)\|$. Then there is an induced norm on operators like (\ref{dop}), (\ref{sop}) and
\begin{eqnarray} \label{pop}
\pi^{[3]}(x) &\mapsto& {1\over 2} \sum_k x'(C'_k +K'D_k) \frac{\partial^2 \pi^{[3]}}{\partial x^2}(x) (C_k+D_kK) x \nonumber
\end{array}\end{equation}a
Since the operator (\ref{dop}) is invertible its inverse has an operator norm $\rho<\infty$. If all the eigenvalues of $F+GK $ have real parts less that $-\tau$ then
${ 1 \over \rho} \ge 3\tau$.
Let $\sigma$ be the supremum operator norms of $C_k+D_kK$ for $k=1.\ldots, r$. Then from the discussion above we know that the operator norm of (\ref{pop}) is bounded above by ${3r\sigma^2\over 2}$
\begin{lemma}
If $\tau > {r\sigma^2 \over 2}$ then
the operator (\ref{sop}) is invertible.
\end{lemma}
\begin{proof}
Suppose (\ref{sop}) is not invertible then there exist a cubic polynomial $\pi(x)\ne 0$ such that
\begin{eqnarray}n
\frac{\partial \pi^{[3]}}{\partial x}(x) (F+GK)x &=&
-{1\over 2} \sum_k x'(C'_k +K'D_k) \frac{\partial^2 \pi^{[3]}}{\partial x^2}(x) (C_k+D_kK) x \end{array}\end{equation}an
so
\begin{eqnarray}n
\left\| \frac{\partial \pi^{[3]}}{\partial x}(x) (F+GK)x \right\|=\left\| {1\over 2} \sum_k x'(C'_k +K'D_k) \frac{\partial^2 \pi^{[3]}}{\partial x^2}(x) (C_k+D_kK) x \right\|
\end{array}\end{equation}an
But we know that
\begin{eqnarray}n
\left\| \frac{\partial \pi^{[3]}}{\partial x}(x) (F+GK)x \right\|\ge{1\over \rho}\|\pi(x)\|\ge 3\tau \|\pi(x)\|>{3r\sigma^2 \over 2}\|\pi(x)\|
\end{array}\end{equation}an
while
\begin{eqnarray}n
\left\| {1\over 2} \sum_k x'(C'_k +K'D_k) \frac{\partial^2 \pi^{[3]}}{\partial x^2}(x) (C_k+D_kK) x \right\|\le{3r\sigma^2 \over 2}\|\pi(x)\|
\end{array}\end{equation}an
\end{proof}
The takeaway message from this lemma is that if the nonzero entries of $C_k, D_k$ are small relative to the nonzero entries of $F, G$ then we can expect that (\ref{sop}) will be invertible.
There are two ways to try solve (\ref{shjb7}), the iterative approach or the direct approach .
We have written Matlab software to solve the deterministic version of these equations. This suggests an iteration scheme similar to the above for solving SARE. Let $\pi^{[3]}_{(0)}$
be the solution of the deteministic version of (\ref{shjb7}) where $C_k=0,\ D_k=0$.
Given $\pi^{[3]}_{(\tau-1)}(x)$ define
\begin{eqnarray}n
l^{[3]}_{(\tau)}(x,u)&=&l^{[3]}(x,u)+{1\over 2} \sum_k x'(C'_k +K'D_k) \frac{\partial^2 \pi^{[3]}_{\tau-1}}{\partial x^2}(x) (C_k+D_kK) x \nonumber
\\&&+\sum_k x'(C'_k +K'D_k)P\gamma_k^{[2]}(x,u)\nonumber
\end{array}\end{equation}an
and let $\pi^{[3]}_{(\tau)}$ be the solution of
\begin{eqnarray}n
0&=&\frac{\partial \pi^{[3]}_{(\tau)}}{\partial x}(x) (F+GK)x+x'Pf^{[2]}(x,Kx)+l^{[3]}_{(\tau)}(x,Kx)
\end{array}\end{equation}an
If this iteration converges then we have the solution to (\ref{shjb7}).
We have also written Matlab software to solve (\ref{shjb7}) directly assuming the operator (\ref{sop}) is invertible.
If (\ref{shjb7}) is solvable then solving (\ref{shjb8}) for $\kappa^{[2]}(x)$ is straightforward
as we have assumed that $R$ is invertible.
If these equations are solvable then we can move on to the equations for $\pi^{[4]}(x)$ and $\kappa^{[3]}(x)$ and higher degrees.
It should be noted that if the Lagrangian is an even function and the dynamics is an odd function then the optimal cost $\pi(x)$ is
an even function and the optimal feedback $\kappa(x)$ is an odd function.
| 4,003 | 11,580 |
en
|
train
|
0.160.2
|
\section{Nonlinear Example}
\setcounter{equation}{0}
Here is a simple example with $n=2,m=1,r=1$. Consider a pendulum of length $1\ m$ and mass $1\ kg$ orbiting approximately 400 kilometers
above Earth on the International Space Station (ISS). The "gravity constant" at this height is approximately $g=8.7\ m/sec^2$. The pendulum can be controlled
by a torque $u$ that can be applied at the pivot and there is damping at the pivot with linear damping constant $c=0.1\ kg/sec$ and cubic damping constant $c_3= 0.05\ kg\ sec/m^2$. Let $x_1$ denote the angle of pendulum measured counter clockwise from the outward pointing ray from the center of the Earth and let $x_2$ denote the angular velocity. The determistic equations of motion are
\begin{eqnarray}n
\dot{x}_1&=& x_2
\\
\dot{x}_2&=& lg\sin x_1 -c_1 x_2-c_3 x_2^3 +u
\end{array}\end{equation}an
But the shape of the earth is not a perfect sphere and its density is not uniform so there are fluctuations in the "gravity constant". We set these fluctuations in the "gravity constant" at one percent although they are probably smaller. There might also be fluctuations in the damping constants of around one percent. Further assume that
the commanded torque is not always realized and the relative error in the actual torque fluctuates around one percent.
We model these stochastically by three white noises
\begin{eqnarray}n
dx_1&=& x_2\ dt\\
dx_2&=&\left(lg\sin x_1 -c_1x_2-c_3x_2^3+u\right)\ dt\\
&&+0.01 lg\sin x_1 \ dw_1- 0.01(c_1 x_2+c_3x_2^3)\ dw_2 +0.01u\ dw_3
\end{array}\end{equation}an
This is an example about how stochastic models with noise coefficients of order $O(x)$ can arise. If the noise is modeling an uncertain environment then its coefficients are likely to be $O(1)$. But if it is the model that is uncetain then noise coefficients are likely to be $O(x)$.
The goal is to find a feedback $u=\kappa(x)$ that stabilizes the pendulum to straight up in spite of the noises so we take the criterion to be
\begin{eqnarray}n
\min_u {1\over 2}\int_0^\infty \|x\|^2+u^2\ dt
\end{array}\end{equation}an
with discount factor is $\alpha=0$.
Then
\begin{eqnarray}n
F=\bmt 0&1\\8.7&0.1\end{array}\right],& G=\bmt 0\\1\end{array}\right],& \\
Q=\bmt 1&0\\0&1\end{array}\right], & R=1,& S=\bmt 0\\0\end{array}\right]\\
C_1=\bmt 0&0\\0.087&0\end{array}\right],&C_2=\bmt 0&0\\0&-0.001\end{array}\right],& C_3=\bmt 0&0\\0&0\end{array}\right]\\
D_1=\bmt 0\\0\end{array}\right],& D_2 =\bmt 0\\0\end{array}\right],& D_3 =\bmt 0\\0.01\end{array}\right]
\end{array}\end{equation}an
Because the Lagrangian is an even function and the dynamics is an odd function of $x,u$, we know that
$\pi(x)$ is an even function of $x$ and $\kappa(x)$ s an odd function of $x$.
We have computed the optimal cost $\pi(x)$ to degree $6$ and the optimal feedback $\kappa(x)$ to degree $5$,
\begin{eqnarray}n
\pi(x)&=&26.7042x_1^2+ 17.4701x_1x_2 2.9488x_2^2\\&&
-4.6153x_1^4 -2.9012x_1^3x_2 -0.5535x_1^2x_2^2 -0.0802 x_1x_2^3 -0.0157x_2^4\\
&&
0.3361x_1^6+ 0.1468x_1^5x_2 -0.0015x_1^4x_2^2 -0.0077x_1^3x_2^3 \\
&& -0.0022x_1^2x_2^4 -0.0003x_1x_2^5 +0.0000x_2^6
\\
\kappa(x)&=&-17.4598x_1 -5.8941x_2 \\
&& +2.9012x_1^3+ 1.1071x_1^2x_2+ 0.2405x_1x_2^2+ 0.0628x_2^3\\
&&
-0.1468x_1^5+ 0.0031x_1^4x_2+ 0.0232x_1^3x_2^2\\&&+ 0.0089x_1^2x_2^3+ 0.0014x_1x_2^4 -0.0002x_2^5
\end{array}\end{equation}an
In making this computation we are approximating $\sin x_1$ by its Taylor polynomials
\begin{eqnarray}n
\sin x_1&=& x_1-{x_1^3\over 6} +{x_1^5 \over 120}+\ldots
\end{array}\end{equation}an
The alternating signs of the odd terms in these polynomials are reflected in the nearly alternating signs in the Taylor polynomials of the optimal cost $\pi(x)$
and optimal feedback $\kappa(x)$. If we take a first degree approximation to $\sin x_1$ we are overestimating the gravitational force
pulling the pendulum from its upright position pointing so $\pi^{[2}(x)$ overestimates the optimal cost
and the feedback $u=\kappa^{[1]}(x)$ is stronger than it needs to be. The latter could be a problem if there is a bound on the magnitude of $u$ that we ignored in the analysis.
If we take a third degree approximation to $\sin x_1$ then $\pi^{[2]}(x)+\pi^{[4]}(x)$ underestimates the optimal cost
and the feedback $u=\kappa^{[1]}(x)+\kappa^{[3]}(x)$ is weaker than it needs to be.
If we take a fifth degree approximation to $\sin x_1$ then $\pi^{[2]}(x)+\pi^{[4]}(x)+\pi^{[6]}(x)$ overestimates the optimal cost but by a smaller margin than
$\pi^{[2}(x)$. The feedback $u=\kappa^{[1]}(x)+\kappa^{[3]}(x)+\kappa^{[5]}(x)$ is stronger than it needs to be
but by a smaller margin than $u=\kappa^{[1]}(x)$.
| 1,891 | 11,580 |
en
|
train
|
0.160.3
|
\section{Finite Horizon Stochastic Nonlinear Optimal Control Problem} \label{FH}
\setcounter{equation}{0}
Consider the finite horizon stochastic nonlinear optimal control problem,
\begin{eqnarray}n
\min_{u(\cdot)} {\rm E}\left\{ \int_0^T l(t,x,u) \rm\ d t+\pi_T(x(T))\right\}
\end{array}\end{equation}an
subject to
\begin{eqnarray}n
d x&=& f(t,x,u)dt+\sum_{k=1}^r\gamma_k(t,x,u)d w_k\\
x(0)&=&x^0
\end{array}\end{equation}an
Again we assume that $ f, l,\gamma_k, \pi_T$ are sufficiently smooth.
If they exist and are smooth the optimal cost $\pi(t, x) $ of starting at $x$ at time $t$ and the optimal feedback
$u(t)=\kappa(t,x(t))$ satisfy the time dependent Hamilton-Jacobi-Bellman equations (HJB)
\begin{eqnarray}n
0&=& \mbox{min}_u \left\{ \frac{\partial \pi}{\partial t}(t,x) +\frac{\partial \pi}{\partial x}(t,x) f(t,x,u)
+l(t,x,u)\right. \\&&\left. +{1\over 2}\sum_{l=1}^k \gamma'_k(t,x,u)
\frac{\partial^2 \pi}{\partial x^2}(t,x) \gamma_k(t,x,u) \right\}
\\
0&=& \mbox{argmin}_u \left\{ \sum_i\frac{\partial \pi}{\partial x_i}(t,x) f_i(t,x,u) +l(t,x,u)\right.
\\&&\left. +{1\over 2}\sum_{l=1}^k \gamma'_k(t,x,u)
\frac{\partial^2 \pi}{\partial x^2}(t,x) \gamma_k(t,x,u) \right\}
\end{array}\end{equation}an
If the quantity to be minimized
is strictly convex in $u$ then HJB equations simplify to
\begin{eqnarray} \nonumber
0&=& \frac{\partial \pi}{\partial t}(t,x) + \sum_i\frac{\partial \pi}{\partial x_i}(t,x) f_i(t,x,\kappa(x)) +l(t,x,\kappa(x))
\\&&+{1\over 2} \sum_{k=1}^r
\gamma'_k(t,x,\kappa(x)) \frac{\partial^2 \pi}{\partial x^2}(t,x) \gamma_k(t,x,\kappa(x)) \label{hjb1t}\\
\nonumber
\\
\label{hjb2t}
0&=& \sum_{i,k} \frac{\partial \pi}{\partial x_i}(x) \frac{\partial f _i}{\partial u_k}(t,x,\kappa(x)) +\sum_k \frac{\partial l }{\partial u_k}(t,x,\kappa(x))
\\&& +\sum_{k=1}^r \gamma'_k(t,x,\kappa(x))
\frac{\partial^2 \pi}{\partial x^2}(x) \frac{\partial \gamma_k}{\partial u}(t,x,\kappa(x)) \nonumber
\end{array}\end{equation}a
These equations are integrated backward in time from the
final condition
\begin{eqnarray} \label{hjbT}
\pi(T,x)&=& \pi_T(x)
\end{array}\end{equation}a
Again we assume that we have the following Taylor expansions
\begin{eqnarray}n
f(t,x,u)&=& F(t)x+G(t)u+f^{[2]}(t,x,u)+f^{[3]}(t,x,u)+\ldots\\
l(t,x,u)&=& {1\over 2}\left( x'Q(t)x+u'R(t)u\right)+l^{[3]}(t,x,u)+l^{[4]}(t,x,u)+\ldots\\
\gamma_k(t,x)&=& C_k(t)x+\gamma_k^{[2]}(t,x)+\beta_{k}^{[3]}(t,x)+\ldots\\
\pi_T(x)&=& {1\over 2} x'P_Tx+\pi_T^{[3]}(x)+\pi_T^{[4]}(x)+\ldots\\
\pi(t,x)&=& {1\over 2} x'P(t)x+\pi^{[3]}(t,x)+\pi^{[4]}(t,x)+\ldots\\
\kappa(t,x)&=& K(t)x+\kappa^{[2]}(t,x)+\kappa^{[3]}(t,x)+\ldots
\end{array}\end{equation}an
where $^{[r]}$ indicates terms of homogeneous degree $r$ in $x,u$ with coefficients that are continuous functions of $t$.
The key assumption is that $\gamma_k(t,0)=0$
for then (\ref{hjb1t}) has a regular singular point at $x=0$ and so is amenable to power series methods.
We plug these expansions into the simplified time dependent HJB equations and collect terms of lowest degree, that is, degree two in (\ref{hjb1t}), degree one in (\ref{hjb2t}) and degree two in (\ref{hjbT}).
\begin{eqnarray}n
0&=& \dot{P}(t)+P(t)F(t)+F'(t)P(t)+Q(t)-K'(t)R(t)K(t)\\
&&
+\sum_k \left(C'_k(t)+K'(t)D'_k(t)\right)P(t)\left(C_k(t)+ D_k(t)K(t)\right) \\
K(t)&=& -\left(R(t)+\sum_{k=1}^rD'_k(t)P(t)D_k(t)\right)^{-1} (G'(t) P(t)+S(t))\\
P(T)&=& P_T
\end{array}\end{equation}an
We call these equations the stochastic differential Riccati equation (SDRE). Similar equations in more generality can be found in \cite{YZ99} but since we are interested in nonlinear problems
we require that $\gamma_k(t,x)=O(x)$ so that the stochastic HJB equations have a regular singular at the origin.
If SDRE are solvable we may proceed to the next degrees, degree three in (\ref{hjb1t}), and degree two in (\ref{hjbT}).
\begin{eqnarray}n
0&=&\frac{\partial \pi^{[3]}}{\partial t}(t,x)+ \frac{\partial \pi^{[3]}}{\partial x}(t,x) (F(t)+G(t)K(t))x\\
&&+x'P(t)f^{[2]}(t,x,K(t)x)+l^{[3]}(t,x,Kx)\\&&
\\&&
+{1\over 2}\sum_k x'C'_k(t) \frac{\partial^2 \pi^{[3]}}{\partial x^2}(t,x) \left(C_k+D_k(t)K(t)\right)(t) x\\
&&
+\sum_k x'\left(C'_k(t)+K'(t)D'_k(t)\right)P(t)\gamma_k^{[2]}(t,x)\\
\\
0&=& \frac{\partial \pi^{[3]}}{\partial x}(t,x) G(t) +x'P(t)\frac{\partial f^{[2]}}{\partial u}(t,x,K(t)x)+\frac{\partial l^{[3]}}{\partial u}(t,x,K(t)x)
\\&+&\sum_k x'(C_k(t)+D_k(t)K(t))'\left(P(t)\frac{\partial \gamma^{[2]}_k}{\partial u}(x,K(t)x)+ \frac{\partial^2 \pi^{[3]}}{\partial x^2}(x) D_k(t)\right) \nonumber
\\&+& \sum_k \gamma^{[2]}_k(x,K(t)x)P(t)D_k(t)
+(\kappa^{[2]}(t,x))'\left(R(t)+\sum_k D'_k(t)PD_k(t)\right) \\ \nonumber
\end{array}\end{equation}an
Notice again the unknown $\kappa^{[2]}(t,x)$ does not appear in the first equation which is linear ode for
$ \pi^{[3]}(t,x)$ running backward in time from the terminal condition,
\begin{eqnarray}n
\pi^{[3]}(t,x)&=& \pi^{[3]}_T(x)
\end{array}\end{equation}an
After we have solved it then the second equation for $\kappa^{[2]}(t,x)$ is easily solved because of the standard assumption that $R(t)$ is invertible and hence $R(t)+\sum_k D'_k(t)PD_k(t)$ is invertible.
The higher degree terms can be found in a similar fashion.
\end{document}
| 2,337 | 11,580 |
en
|
train
|
0.161.0
|
\mathbf egin{document}
\mathbf egin{abstract}
Variable selection for structured covariates lying on an underlying known graph is a problem motivated by practical applications, and has been a topic of increasing interest. However, most of the existing methods may not be scalable to high dimensional settings involving tens of thousands of variables lying on known pathways such as the case in genomics studies. We propose an adaptive Bayesian shrinkage approach which incorporates prior network information by smoothing the shrinkage parameters for connected variables in the graph, so that the corresponding coefficients have a similar degree of shrinkage. We fit our model via a computationally efficient expectation maximization algorithm which scalable to high dimensional settings ($p {\sim} 100{,}000$). Theoretical properties for fixed as well as increasing dimensions are established, even when the number of variables increases faster than the sample size. We demonstrate the advantages of our approach in terms of variable selection, prediction, and computational scalability via a simulation study, and apply the method to a cancer genomics study.
\end{abstract}
\footnote{{\noindent \em Corresponding Author}: Suprateek Kundu, Department of Biostatistics \& Bioinformatics, Emory University, 1518 Clifton Road, Atlanta, Georgia 30322, U.S.A. \\ {\noindent \em Email}: [email protected] }
{\noindent Keywords:} adaptive Bayesian shrinkage; EM algorithm; oracle property; selection consistency; structured high-dimensional variable selection.\\
\section{Introduction}
With the advent of modern technology such as microarray analysis and next generation sequencing in genomics, recent studies rely on increasingly large amounts of data containing tens of thousands of variables. For example, in genomics studies, it is common to collect gene expressions from $p\sim 20{,}000$ genes, which is often considerably larger than the number of subjects in these studies, resulting in a classical small $n$, large $p$, problem. In addition, it is well-known that genes lie on a graph of pathways where nodes represent genes and edges represent functional interactions between genes and gene products. Currently, there exist several biological databases which store gene network information from previous studies \citep{Stingo2011}, and these databases are constantly updated and augmented with newly emerging knowledge.
In such cases when genes are known to lie on an underlying graph, usual variable selection approaches such as Lasso \citep{Tibshirani1996}, adaptive Lasso \citep{Zou2006}, or spike and slab methods \citep{Mitchell1988} may run into difficulties, since they do not exploit the association structure between variables which may give rise to correlated predictors. Moreover, there is increasing evidence that incorporating prior graph information, where applicable, can improve prediction and variable selection in analysis of high dimensional data. \citet{Li2008} and \citet{Pan2010} proposed network-based penalties in linear regression, which induce sparsity of estimated effects while encouraging similar effects for connected variables. In a Bayesian framework, \citet{Li2010}, \citet{Stingo2011a}, and \citet{Stingo2011}, used spike and slab type priors for variable selection and Markov random field (MRF) type priors on variable inclusion indicators to incorporate graph information. More recently, \citet{Rockova2014} proposed an expectation maximization (EM) algorithm for variable selection using spike and slab priors which is known as EMVS and extended EMVS to incorporate graph information via MRF priors where a variational approximation was used in computation.
\citet{Rockova2014a} proposed a normal-exponential-gamma shrinkage approach with incorporation of the pathway membership information and developed an EM algorithm for computation.
To our knowledge, there is a scarcity of scalable Bayesian approaches for structured variable selection that possess desirable theoretical and numerical properties in high dimensions. The Bayesian approaches involving MRF type priors are implemented using Markov chain Monte Carlo and hence are not scalable to high dimensions involving tens of thousands of variables, such as in our cancer genomics application. While the EM approach by \citet{Rockova2014a} can incorporate pathway membership information, it is not equipped to incorporate edge information which is the focus of this article. Moreover, the theoretical properties and scalability of their method to the higher dimensions considered in this work ($p\sim 100{,}000$) are unclear.
The variational approximation proposed by \citet{Rockova2014} may suffer from the loss of convexity properties and inferior estimates close to the transition points for tuning parameters, as indicated by the authors.
The frequentist network-based regularization approaches are expected to be more scalable, but make a strong assumption of smoothness of covariate effects for connected variables in the graph, which may be restrictive in real-life applications.
We propose a Bayesian shrinkage approach and an associated EM algorithm for structured covariates, which is scalable to high dimensional settings and possesses a desirable oracle property in variable selection and estimation for both fixed and increasing dimensions. The proposed approach assigns Laplace priors to the regression coefficients and incorporates the underlying graph information via a hyper-prior for the shrinkage parameters in the Laplace priors. Specifically, the shrinkage parameters are assigned a log-normal prior specifying the inverse covariance matrix as a graph Laplacian \citep{Chung1997,Ng2002}, which has a zero or positive partial correlation depending on whether the corresponding edge is absent or present. This enables smoothing of shrinkage parameters for connected variables in the graph and conditional independence between shrinkage parameters for disconnected variables. Thus, the resulting approach encourages connected variables to have a similar degree of shrinkage in the model without forcing their regression coefficients to be similar in magnitude. The operating characteristics of the approach can be controlled via tuning parameters with clearly defined roles.
Although the proposed model can be implemented using Markov chain Monte Carlo, it is not scalable to high dimensional settings of our interest. As such, we implement an EM algorithm which treats the inverse covariance matrix for the shrinkage parameters as missing variables, and marginalizes over them to obtain the ``observed data" posterior which has a closed form.
We incorporate recent computational developments such as the dynamic weighted lasso \citep{Chang2010} to obtain a computationally efficient approach which is scalable to high dimensional settings.
We present the proposed methodology and the EM algorithm in Section 2, the theoretical results in Section 3, and the simulation results comparing our approach with several competitors in Section 4. We apply our method to a cancer genomics study in Section 5.
| 1,546 | 38,227 |
en
|
train
|
0.161.1
|
\section{Methodology}
\subsection{Model Specification}
Let $\mathbf 0_m$ and $\mathbf 1_m$ denote the length-$m$ vectors with 0 entries and 1 entries, respectively, and $I_m$ the $m \times m$ identity matrix. The subscript $m$ may be omitted in the absence of ambiguity. For any length-$m$ vector $\mathbf v$, we define $e^\mathbf v = \left(e^{v_1},\dots,e^{v_m}\right)'$, $\log \mathbf v = \left(\log v_1,\dots,\log v_m \right)'$, $|\mathbf v| = \left(|v_1|,\dots,|v_m|\right)'$, and $D_{\mathbf v} = \mathrm{diag}(\mathbf v)$.
Suppose we have a random sample of $n$ observations $\{y_i, \mathbf x_i; i=1,\ldots,n\}$ where $y_i$ is the outcome variable and $\mathbf x_i$ is a vector of $p$ predictors. Let $\mathcal{G} = \langle V,E \rangle$ denote the known underlying graph for the $p$ predictors, where $V=\{1,\dots,p\}$ is the set of nodes and $E \subset \{(j,k): 1\le j < k \le p\}$ is the set of undirected edges. Let $G$ be the $p \times p$ adjacency matrix in which the $(j,k)$-th element $G_{jk}=1$ if there is an edge between predictors $j$ and $k$, and $G_{jk}=0$ if otherwise.
Consider the linear model
\mathbf egin{eqnarray}
\mathbf y = X \boldsymbol \beta + \mathbf epsilon, \mbox{ } \mathbf epsilon \sim \mathcal{N}(\mathbf 0,\sigma^2I_n), \label{eq:model}
\end{eqnarray}
where $\mathbf y=(y_1,\dots,y_n)'$, $X=(\mathbf x_1,\dots,\mathbf x_n)'$, $\boldsymbol \beta=(\mathbf eta_1,\dots,\mathbf eta_p)'$, $\mathbf epsilon=(\epsilon_1,\dots,\epsilon_n)'$, and $\mathcal{N}(\cdot)$ denotes the Gaussian distribution. We assign the following priors to $\boldsymbol \beta$ and $\sigma^2$
\mathbf egin{align}
\mathbf eta_j \sim& \mathcal{DE}(\lambda_j/\sigma), \mbox{ } \sigma^2 \sim \mathcal{IG}(a_\sigma,b_\sigma), \mbox{ } j=1,\dots,p, \label {eq:base}
\end{align}
where $\lambda_j$ is the shrinkage parameter for $\mathbf eta_j$, $\mathcal{DE}(\cdot)$, and $\mathcal{IG}(\cdot)$ denote the double exponential (Laplace) and inverse gamma distributions, respectively. Prior specification \eqref{eq:base} differs from Bayesian Lasso \citep{Park2008} in that the degree of shrinkage for the $j$-th coefficient is controlled by $\lambda_j$ ($j=1,\ldots,p$) not a common $\lambda$, allowing for adaptive shrinkage guided by underlying graph knowledge.
We encode the graph information $\mathcal{G}$ in the model via an informative prior on the shrinkage parameters as follows.
\mathbf egin{align}
\boldsymbol \alpha = (\log(\lambda_1), \ldots, \log(\lambda_p))' \sim \mathcal{N} \left( \boldsymbol \mu,\nu \mathit{\mathit \Omega}^{-1} \right), \label{eq:shrinkage}
\end{align}
where
\mathbf egin{align*}
\mathit{\mathit \Omega} = \left[ \mathbf egin{array}{cccc} 1+\sum_{j\neq1} \omega_{1j} & -\omega_{12} & \cdots & -\omega_{1p}\\
-\omega_{21} & 1+\sum_{j\neq2} \omega_{2j} & \ddots & -\omega_{2p}\\
\vdots & \ddots & \ddots & \vdots\\
-\omega_{p1} & -\omega_{p2} & \cdots & 1+\sum_{j\neq p} \omega_{pj} \end{array}
\right],
\end{align*}
and assign the following prior to $\boldsymbol \omega = \{ \omega_{jk}: j<k\}$
\mathbf egin{align} \label{prior_omega}
\pi(\boldsymbol \omega) \propto |\mathit{\mathit \Omega}|^{-1/2} \prod_{G_{jk}=1} \omega_{jk}^{a_\omega-1} \exp ( -b_\omega\omega_{jk} ) 1 (\omega_{jk}>0) \prod_{G_{jk}=0} \delta_0(\omega_{jk}),
\end{align}
where $\delta_0$ is the Dirac delta function concentrated at 0 and $1(\cdot)$ is the indicator function. Since $\mathit{\mathit \Omega}$ is symmetric and diagonally dominant, it is guaranteed to be positive definite. It follows from prior \eqref{prior_omega} that $\omega_{jk}=0$ if $G_{jk}=0$ and $\omega_{jk}>0$ if $G_{jk}=1$. In other words, under our model formulation the shrinkage parameters $\lambda_j$ and $\lambda_k$ have a positive partial correlation if predictors $j$ and $k$ are connected and have a zero partial correlation otherwise. The magnitudes of the positive partial correlations are learned from the data, with a higher partial correlation leading to the smoothing of corresponding shrinkage parameters. Our model formulation has several appealing features. First, a higher positive partial correlation between two connected predictors results in an increased probability of having both predictors selected or excluded simultaneously under an EM algorithm. This makes intuitive sense when both variables are important or unimportant. Second, in the scenario where one of the connected predictors is important and the other one is not, the method can learn from the data and impose a weak partial correlation, thereby enabling the corresponding shrinkage parameters to act in a largely uncorrelated manner. Third, the selection of unconnected variables is guided by shrinkage parameters which are partially uncorrelated. Finally, our approach does not constrain the effect sizes for connected variables to be similar in magnitude.
\mathbf egin{figure}[h!]
\includegraphics[width=\textwidth]{prior_plot_density.pdf}
\caption{Top two panels plot the marginal prior densities of $\mathbf eta$ for (a) different $\mu$ while $\nu$ and $\sigma$ are fixed and (b) different $\nu$ while $\mu$ and $\sigma$ are fixed. Bottom two panels (c) and (d) plot the corresponding negative log density functions. The standard normal prior and the horseshoe prior with $\tau=1$ are shown for contrast. The Laplacian prior with $\lambda=e^{0.3}$ is plotted as a comparison to the case with $\mu=0.3$ and $\nu=0.1$.}
\label{fig1}
\end{figure}
The mean vector $\boldsymbol \mu$ in \eqref{eq:shrinkage} determines the locations of $\boldsymbol \alpha$, and can be interpreted as controlling the average sparsity of the model. In particular, one can choose $\boldsymbol \mu = \mu \mathbf 1$ for some $\mu \in \mathbb{R}$, where a greater value of $\mu$ implies a sparser model. Figure \ref{fig1}(a) plots the marginal density for the regression coefficients for different values of $\mu$ with $\lambda$ marginalized out (via Monte Carlo averaging), while $\nu$ and $\sigma$ are kept fixed. It is clear that larger $\mu$ values lead to sharper peaks at zero with lighter tails, thus encouraging greater shrinkage. On the other hand, $\nu$ specifies the prior confidence on the choice of $\boldsymbol \mu$ as the average sparsity parameter. If $\nu = 0$, we have $\boldsymbol \alpha = \boldsymbol \mu$ so that the shrinkage parameters are fixed, resulting in a Lasso type shrinkage. This is evident from Figure \ref{fig1}(d), which plots the negative logarithm of the density for the marginal regression coefficients for different values of $\nu$ while $\mu$ and $\sigma$ are fixed. Figures 1(b) and 1(d) also show that larger values of $\nu$ result in higher-peaked and heavier-tailed densities and the corresponding penalty becomes similar to non-convex penalties in the frequentist literature, e.g. SCAD in \citet{Fan2001}. Overall, changing the value of $\nu$ results in different types of penalty functions which can be convex or non-convex.
We note that \eqref{prior_omega} looks similar to a product of the gamma densities. However, it involves an additional term $|\mathit{\mathit \Omega}|^{-1/2}$ which is required to obtain a closed form full posterior, since the term cancels out between $\pi(\boldsymbol \alpha)$ and $\pi(\boldsymbol \omega)$. A similar trick was used for specifying the inverse covariance matrix for the regression coefficients in \citet{Liu2014}, which they denote as a graph Laplacian structure. However our approach is distinct in that it specifies a graph Laplacian type structure for the inverse covariance matrix for the log-shrinkage parameters and incorporates prior graph knowledge. Moreover, their approach results in an OSCAR type penalty \citep{Bondell2008}, while $-\log(\pi(\mathbf eta))$ under our approach can lead to both convex and non-convex penalties depending on the value of $\nu$.
Proposition \ref{pro} shows that the prior in \eqref{prior_omega} is proper. The proof is presented in the Appendix.
\mathbf egin{pro} \label{pro}
The prior $\pi(\boldsymbol \omega)$ of $\boldsymbol \omega$ in \eqref{prior_omega} is proper.
\end{pro}
\mathbf egin{figure}[h!]
\includegraphics[width=0.5\textwidth]{prior_plot_alpha1.pdf}
\includegraphics[width=0.5\textwidth]{prior_plot_alpha2.pdf}
\includegraphics[width=0.5\textwidth]{prior_plot_alpha3.pdf}
\includegraphics[width=0.5\textwidth]{prior_plot_alpha4.pdf}
\caption{Contour plots of the marginal prior density of $\alpha_1$ and $\alpha_2$ for 4 different combinations of $a_\omega$ and $b_\omega$.}
\label{fig2}
\end{figure}
The prior in \eqref{prior_omega} involves a shape parameter $a_\omega$ and the rate parameter $b_\omega$, which serve the similar roles as those of the gamma distribution. In fact, they are directly involved in regulating the correlations between the elements of $\boldsymbol \alpha$.
To see how they affect these correlations, consider $p=2$ and $G_{12}=1$. It follows that the joint prior density of $\alpha_1$ and $\alpha_2$ after marginalizing out $\omega_{12}$ is given (up to a constant) by
\mathbf egin{align*}
\pi(\alpha_1,\alpha_2) \propto f(\alpha_1,\alpha_2) = \exp \left( -\frac{(\alpha_1-\mu_1)^2+(\alpha_2-\mu_2)^2}{2\nu} \right) \left( b_\omega + \frac{(\alpha_1-\alpha_2)^2}{2\nu} \right)^{-a_\omega}.
\end{align*}
Figure \ref{fig2} draws the contour plots of $f(\alpha_1,\alpha_2)$ for 4 different combination of $a_\omega$ and $b_\omega$; $(a_\omega,b_\omega) = (1,1),(1,4),(4,1),(4,4)$ with $\mu_1=\mu_2=1$ and $\nu=1$. As $a_\omega$ increases and/or $b_\omega$ decreases, $\alpha_1$ and $\alpha_2$ tend to have a stronger correlation, translating to a higher probability of having similar values. This is also evident in the E-step in the EM algorithm (see equation \eqref{eq:Estep}), where high values of $a_\omega/b_\omega$ tend to result in a high mean value for $\omega_{jk}$ which in turn tends to result in similar values for $\alpha_j-\mu_j$ and $\alpha_k - \mu_k$.
| 2,990 | 38,227 |
en
|
train
|
0.161.2
|
\subsection{EM Algorithm}
The Maximum-A-Posteriori (MAP) estimator for the proposed model is obtained by maximizing the posterior density over $\mathbf theta = (\boldsymbol \beta', \sigma^2, \boldsymbol \alpha')'$ with $\boldsymbol \omega$ marginalized out. Specifically,
\mathbf egin{align}
\widehat{\mathbf theta} = \left(\widehat{\boldsymbol \beta},\widehat{\sigma}^2,\widehat{\boldsymbol \alpha}\right) = \qopname\relax m{argmax}_{\mathbf theta} \int \pi(\mathbf theta,\boldsymbol \omega|\mathbf y,X) d\boldsymbol \omega, \label{eq:argmax}
\end{align}
where the full posterior density is given by
\mathbf egin{align*}
\pi(\mathbf theta,\boldsymbol \omega|\mathbf y,X) &\propto \pi(\mathbf y|\boldsymbol \beta,\sigma^2,X) \pi(\boldsymbol \beta|\sigma^2,\boldsymbol \alpha) \pi(\sigma^2) \times |\mathit{\mathit \Omega}|^{1/2} \exp \left( -\frac{(\boldsymbol \alpha-\boldsymbol \mu)' \mathit{\mathit \Omega} (\boldsymbol \alpha-\boldsymbol \mu)}{2\nu} \right)\\
& \qquad \times |\mathit{\mathit \Omega}|^{-1/2} \prod_{j<k,G_{jk}=1} \omega_{jk}^{a_\omega-1} \exp ( -b_\omega\omega_{jk} ) \prod_{j<k,G_{jk}=0} \delta_0(\omega_{jk} ).
\end{align*}
In the case of $\mathit{\mathit \Omega} = I_p$, where no graph information is used, we call the resulting estimator the \emph{EM} estimator for Bayesian \emph{SH}rinkage approach, or EMSH in short. In the general case where prior graph information is used, we call the resulting estimator the EMSH with the \emph{S}tructural information incorporated, or EMSHS in short.
We use $\boldsymbol \mu = \mu\mathbf 1$ where $\mu>0$ for simplicity. Note that the algorithm can be easily modified to accommodate heterogeneous sparsity parameters.
Since
\mathbf egin{align*}
(\boldsymbol \alpha-\boldsymbol \mu)' \mathit{\mathit \Omega} (\boldsymbol \alpha-\boldsymbol \mu) = \sum_{j=1}^p (\alpha_j-\mu)^2 + \sum_{j<k} \omega_{jk} (\alpha_j-\alpha_k)^2,
\end{align*}
we have
\mathbf egin{align}
\pi(\mathbf theta,\boldsymbol \omega|\mathbf y,X) &\propto \pi(\mathbf y|\boldsymbol \beta,\sigma^2,X) \pi(\boldsymbol \beta|\sigma^2,\boldsymbol \alpha) \pi(\sigma^2) \times \exp \left( -\frac{(\boldsymbol \alpha-\boldsymbol \mu)' (\boldsymbol \alpha-\boldsymbol \mu)}{2\nu} \right) \nonumber \\
& \qquad \times \prod_{j<k,G_{jk}=1} \omega_{jk}^{a_\omega-1} \exp \left( -b_\omega \omega_{jk} - \frac{\omega_{jk}}{2\nu} (\alpha_j-\alpha_k)^2 \right) \prod_{j<k,G_{jk}=0} \delta_0(\omega_{jk} ). \label{eq:joint}
\end{align}
Therefore, the marginal posterior density for $\mathbf theta$ is given by
\mathbf egin{align}
\pi(\mathbf theta|\mathbf y,X) & \propto \pi(\mathbf y|\boldsymbol \beta,\sigma^2,X) \pi(\boldsymbol \beta|\sigma^2,\boldsymbol \alpha) \pi(\sigma^2) \times \exp \left( -\frac{(\boldsymbol \alpha-\boldsymbol \mu)' (\boldsymbol \alpha-\boldsymbol \mu)}{2\nu} \right) \nonumber\\
& \qquad \times \prod_{j<k,G_{jk}=1} \left( b_\omega + \frac{1}{2\nu} (\alpha_j-\alpha_k)^2 \right)^{-a_\omega}. \label{eq:marginal}
\end{align}
Since the marginal posterior density in \eqref{eq:marginal} is differentiable with respect to $\mathbf theta$ and the set $\{\mathbf theta: \pi(\mathbf theta|\mathbf y,X) \ge \eta\}$ is bounded and closed for any $\eta>0$, its maximum is attainable and the MAP estimator always exists; see Theorem 2.28 in \citet{rudin1976principles}. Since the logarithm of marginal posterior density may not be convex, the MAP estimator may have multiple (local) solutions. However, our numerical experiments suggests a stable performance under our method, and we show in Section 3, that the algorithm admits a unique solution asymptotically.
Although one can directly optimize \eqref{eq:marginal} to compute $\widehat{\mathbf theta}$ in \eqref{eq:argmax}, we choose to use the EM algorithm to obtain the MAP estimate. This is because the solution surface for $\boldsymbol \alpha$ given $\boldsymbol \beta$ after marginalizing out $\boldsymbol \omega$ in \eqref{eq:marginal} is non-convex, leading to potential computational difficulties. We elaborate more on this when describing the M-step for $\boldsymbol \alpha$. In summary, we optimize $\pi(\mathbf theta|\mathbf y,X)$ by proceeding iteratively with the ``complete data" log-posterior $\pi(\mathbf theta, \boldsymbol \omega | \mathbf y, X)$ in \eqref{eq:joint}, where $\mathit \Omega(\boldsymbol \omega)$ is considered ``missing data." At each EM iteration, we replace $\mathit \Omega$ by its conditional expectation in the E-step and then maximize the expected ``complete data" log posterior with respect to $\mathbf theta$ in the M-step.
The objective function to be optimized at the $t$-th EM iteration is given by
\mathbf egin{align}
Q_t(\mathbf theta) = & -\frac{n+p+2a_\sigma+2}{2} \log (\sigma^2) \nonumber \\
& -\frac{(\mathbf y-X\boldsymbol \beta)'(\mathbf y-X\boldsymbol \beta) + 2\sigma \sum_{j=1}^p e^{\alpha_j} |\mathbf eta_j| + 2b_\sigma}{2\sigma^2} \nonumber \\
& + \sum_{i=1}^p \alpha_i - \frac{(\boldsymbol \alpha-\boldsymbol \mu)' \mathit{\mathit \Omega}^{(t)} (\boldsymbol \alpha-\boldsymbol \mu)}{2\nu}, \label{eq:opt}
\end{align}
where $\mathit{\mathit \Omega}^{(t)} = \mathbb{E}\left( \mathit{\mathit \Omega} | \mathbf y, X, \mathbf theta^{(t-1)} \right)$.
\subsubsection{E-step} It follows from \eqref{eq:joint} that the posterior density of $\boldsymbol \omega$ given $\mathbf theta$ is the product of the gamma densities where $\omega_{jk}$ follows the gamma distribution with parameters $a_\omega$ and $b_\omega+\frac{\left(\alpha_j-\alpha_k\right)^2}{2\nu}$ for $j < k, G_{jk}=1$. Therefore, we have
\mathbf egin{align}
\omega_{jk}^{(t)} = \mathbb{E}(\omega_{jk}|\mathbf y,X,\mathbf theta^{(t-1)}) &= \frac{2\nu a_\omega G_{jk}}{ 2\nu b_\omega + \left( \alpha_j^{(t-1)} -\alpha_k^{(t-1)} \right)^2}, \qquad j<k. \label{eq:Estep}
\end{align}
Since we only need to update as many $\omega_{jk}$ as the number of edges in $\mathcal{G}$, this step can be completed in $O(|E|)$ operations, which is computationally very inexpensive for sparse graphs.
\subsubsection{M-step} For this step, we sequentially optimize the objective function with respect to $\boldsymbol \beta$, $\sigma^2$, and $\boldsymbol \alpha$.
\mathbf egin{itemize}
\item M-step for $\boldsymbol \beta$: With $\sigma = \sigma^{(t-1)}$ and $\boldsymbol \alpha = \boldsymbol \alpha^{(t-1)}$ fixed, $\boldsymbol \beta^{(t)}$ can be obtained as
\mathbf egin{align*}
\boldsymbol \beta^{(t)} = \qopname\relax m{argmin}_{\boldsymbol \beta} \, \frac{1}{2} (\mathbf y-X\boldsymbol \beta)'(\mathbf y-X\boldsymbol \beta)+ \sum_{j=1}^p \xi_j |\mathbf eta_j|,
\end{align*}
where $\xi_j = \sigma e^{\alpha_j}$. This is a weighted lasso problem, which can be solved by many algorithms such as \citet{Efron2004}, \citet{wu2008coordinate}, and \citet{Chang2010}. We use the dynamic weighted lasso (DWL) algorithm developed in \citet{Chang2010}, which is capable of rapidly computing the solution by borrowing information from previous iterations when the regularization parameters change across the EM iterations. Our experience suggests that these regularization parameters differ negligibly over EM iterations under our approach, especially as the solution approaches its limit. As such, the DWL results in substantial savings in computation, compared to alternate algorithms such as LARS which needs to completely recompute the solution for each EM iteration.
Finding a lasso solution using the DWL algorithm requires $O(pq^2)$ operations where $q$ is the number of nonzero coefficients in the solution, provided that the sample correlations between the selected variables and all remaining variables are available. The latter requires an additional $O(npq)$ operations. Therefore, while the initial M-step for $\boldsymbol \beta$ takes $O(npq)$ operations, the DWL algorithm updates the solution in $O(pq)$ operations as the EM iterations continue and the solution stabilizes. Readers are referred to \citet{Chang2010} for further details regarding the DWL algorithm.
We note that \citet{Park2008}, \citet{Armagan2013}, and several others used the normal mixture representation of the Laplace prior below to compute MAP estimates under an EM algorithm
\mathbf egin{align*}
\frac{\lambda}{2\sigma} e^{-\lambda|\mathbf eta|/\sigma} = \int_0^\infty \frac{1}{\sqrt{2\pi\tau\sigma^2}} e^{-\mathbf eta^2/(2\tau\sigma^2)} \frac{\lambda^2}{2} e^{-\lambda^2\tau/2} d\tau,
\end{align*}
where $\tau$ is the latent scale parameter that is imputed in the E-step. We choose to use the form of the Laplace prior instead of the above mixture representation due to several considerations. First, an M-step for $\boldsymbol \beta$ of the EM algorithm under the normal mixture representation takes $O(n^2p)$ operations, which is slower than the proposed approach. Second, as pointed out by \citet{Armagan2013}, the Laplace representation leads to faster convergence than the normal mixture representation. Third, the regression coefficients cannot attain exact zeros in the normal mixture representation, and additional post-processing steps are required for variable selection, which can be sensitive to cut-off values. Lastly, numerical difficulties may arise when $\mathbf eta$ approaches zero under the normal mixture representation because the conditional mean of $\tau^{-1}$ may explode to infinity.
\item M-step for $\sigma$: With $\boldsymbol \beta = \boldsymbol \beta^{(t)}$ and $\boldsymbol \alpha = \boldsymbol \alpha^{(t-1)}$ fixed, we have
\mathbf egin{align*}
\sigma^{(t)} = \qopname\relax m{argmin}_\sigma \frac{c_1}{\sigma^2} + \frac{c_2}{\sigma} + c_3 \log \sigma,
\end{align*}
where $c_1 = \frac{1}{2}(\mathbf y-X\boldsymbol \beta)'(\mathbf y-X\boldsymbol \beta) + b_\sigma$, $c_2 = \sum_{j=1}^p e^{\alpha_j} |\mathbf eta_j|$, and $c_3 = n+p+2a_\sigma+2$.
The solution is then given by $\sigma^{(t)} = \frac{c_2 + \sqrt{c_2^2+8c_1c_3}}{2c_3}.$
\item M-step for $\boldsymbol \alpha$: Since there is no closed-form solution for $\boldsymbol \alpha$, we use the Newton method. With $\boldsymbol \beta = \boldsymbol \beta^{(t)}$, $\sigma = \sigma^{(t)}$, and $\mathit{\mathit \Omega} = \mathit{\mathit \Omega}^{(t)}$ fixed, the Newton search direction at $\boldsymbol \alpha$ is given by $\mathbf d_N(\boldsymbol \alpha) = -H^{-1} \mathbf g,$ where $H = \sigma \mathit{\mathit \Omega} + \nu D_{|\boldsymbol \beta|} D_{e^{\boldsymbol \alpha}}$ \mbox{ and } $\mathbf g = \sigma \mathit{\mathit \Omega} \left( \boldsymbol \alpha-\boldsymbol \mu \right) - \nu \sigma \mathbf 1 + \nu D_{|\boldsymbol \beta|} e^{\boldsymbol \alpha}$. As the Hessian matrix $H$ is always positive definite, $\mathbf d_N(\boldsymbol \alpha)$ becomes a valid Newton direction. Therefore, we can update $\boldsymbol \alpha$ as follows
\mathbf egin{align}\label{eq:update_alpha}
\boldsymbol \alpha^{(t)} &= \boldsymbol \alpha^{(t-1)} + s_t \mathbf d_N \left( \boldsymbol \alpha^{(t-1)} \right),
\end{align}
where $s_t$ is the step size.
Since the usual Newton method involves the inversion of the $p \times p$ Hessian matrix $H$, it is only feasible when $p$ is moderate. When $p$ is large, we suggest replacing the Hessian matrix by its diagonal matrix \citep{becker1988improving} and $\mathbf d_N(\boldsymbol \alpha)$ by $\mathbf d(\boldsymbol \alpha) = - D_H^{-1} \mathbf g$, where $D_H = \mathrm{diag}(H) = \sigma \mathrm{diag}(\mathit{\mathit \Omega}) + \nu D_{|\boldsymbol \beta|} D_{e^{\boldsymbol \alpha}}$. Since $D_H$ is positive definite, $\mathbf d(\boldsymbol \alpha)$ is a valid descent direction, and the step size $s_t$ can be determined by the backtracking line search
\citep{nocedal2006numerical}.
Note that there are only $p+|E|$ unique nonzero elements in $\mathit{\mathit \Omega}$. Therefore, obtaining the $p$-dimensional direction vector takes $O(p+|E|)$ operations only. Since edges in network graphs are usually sparse, its overall computation is much faster than the Newton method even if approximating the Hessian matrix may slightly increase the number of EM iterations.
In addition, it is not necessary to repeat the Newton steps until convergence to obtain the optimal solution for $\boldsymbol \alpha$ within each M-step for $\boldsymbol \alpha$. It suffices that each iteration of M-step for $\boldsymbol \alpha$ ensures an increase in the value of the objective function, in order to guarantee the convergence of the EM algorithm. However, our experience indicates that repeating the Newton steps three to five times within each M-step for $\boldsymbol \alpha$ helps reduce the number of total EM iterations.
As alluded to earlier, the advantage of the EM algorithm over directly optimizing the marginal posterior density $\pi(\mathbf theta|\mathbf y,X)$ with respect to $\mathbf theta$ lies in the fact that the Hessian matrix with respect to $\boldsymbol \alpha$ is guaranteed to be positive definite in the former case, while it is not in the latter. Since the EM algorithm exploits part of the curvature information in optimizing with respect to $\boldsymbol \alpha$ at nearly no extra computational cost, it is expected to lead to a reduced number of total iterations and hence savings in computation \citep{nocedal2006numerical}.
\end{itemize}
The EM algorithm can be started from the E-step for $\boldsymbol \omega$ with suggested initial values $\boldsymbol \beta^{(0)} = \mathbf 0$, $\sigma^{(0)} = \sqrt{(\mathbf y'\mathbf y+2b_\sigma)/c_3}$, and $\alpha_j^{(0)} = \mu$ for all $j$. The number of operations in each EM iteration is $O(npq+|E|)$ initially and reduces to $O(pq+|E|)$ after a few iterations. We repeat the EM procedures until the relative improvement of the optimum value of the objective function goes below a certain threshold, say $\epsilon=e^{-5}$.
| 4,052 | 38,227 |
en
|
train
|
0.161.3
|
\subsection{Role of Shrinkage Parameters $\boldsymbol \alpha=\log(\boldsymbol \lambda)$ }
\label{interpretation}
It is straightforward to show that the estimators satisfy
\mathbf egin{align}
\widehat{\boldsymbol \beta} = \qopname\relax m{argmin}_{\boldsymbol \beta} \, \frac{1}{2}(\mathbf y-X\boldsymbol \beta)'(\mathbf y-X\boldsymbol \beta) + \sum_{j=1}^p \widehat{\xi}_j |\mathbf eta_j|, \label{eq:betahat}
\end{align}
and
\mathbf egin{align} \label{adaptive_penalty}
\widehat{\boldsymbol \alpha} = \qopname\relax m{argmin}_{\boldsymbol \alpha} \, \frac{1}{2\nu} (\boldsymbol \alpha-\boldsymbol \mu)' \mathit{\mathit \Omega}^{(\infty)} (\boldsymbol \alpha-\boldsymbol \mu) - \mathbf 1' \boldsymbol \alpha + \frac{1}{\widehat{\sigma}} | \widehat{\boldsymbol \beta} |' e^{\boldsymbol \alpha},
\end{align}
where $\widehat{\xi}_j = \widehat{\sigma} e^{\widehat{\alpha}_j}$ and $\mathit{\mathit \Omega}^{(\infty)}$ is the final value of $\mathit{\mathit \Omega}$ from the EM algorithm. When $\widehat{\sigma}$ and $\widehat{\boldsymbol \alpha}$ are fixed, the solution $\widehat{\boldsymbol \beta}$ in \eqref{eq:betahat} resembles an adaptive lasso solution with the regularization parameter $\widehat{\mathbf xi}$. Instead of assuming fixed weights as in the adaptive lasso, the EMSHS uses the data and the underlying graph knowledge to learn the weights. Specifically, the estimate of $\alpha_j$ depends on the shrinkage parameters corresponding to variables connected to $x_j,j=1,\ldots,p$ and the corresponding partial correlations, as follows
\mathbf egin{align}
\big|\widehat{\mathbf eta}_j\big| = \frac{\widehat{\sigma}}{\nu} \left( \mu + \nu - \widehat{\alpha}_j + \sum_{k \sim j} \omega_{jk}^{(\infty)} (\widehat{\alpha}_k - \widehat{\alpha}_j) \right) e^{-\widehat{\alpha}_j}. \label{eq:adaptive_weights}
\end{align}
By estimating the weights in an adaptive manner guided by the prior graph knowledge, the proposed approach avoids having to specify an initial consistent estimator for the weights as in the adaptive lasso, which is expected to be of significant practical advantage in high dimensional settings. This is in fact our experience in numerical studies; see Section 4. Finally, we note that larger values of $\widehat{\alpha}_j$ translate to smaller values for $\big|\widehat{\mathbf eta}_j\big|,j=1,\ldots,p$, and vice-versa, clearly demonstrating the role of the shrinkage parameters $\boldsymbol \alpha$.
| 695 | 38,227 |
en
|
train
|
0.161.4
|
\section{Theoretical Properties} \label{sec:oracle}
To fix ideas, let $p_n$ denote the number of candidate predictors, of which $q_n$ are the true important variables. Model~\eqref{eq:model} is reformulated as
\mathbf egin{align*}
\mathbf y_n = X_n \boldsymbol \beta_0 + \mathbf epsilon_n,
\end{align*}
where $\mathbf y_n$ is the $n \times 1$ response vector, $X_n$ is the $n\times p_n$ design matrix, $\boldsymbol \beta_0$ is the $p_n \times 1$ true coefficient vector, and $\mathbf epsilon_n$ is the $n \times 1$ error vector. The errors are independent Gaussian with mean 0 and variance $\sigma_0^2$; $\mathbf epsilon_n \sim \mathcal{N}(\mathbf 0,\sigma_0^2I_n)$, and the errors are also independent of the covariates.
The covariates are stochastic and are dictated by an inverse covariance matrix depending on a true graph $\mathcal{G}_{0n}$. They are standardized such that
\mathbf egin{align*}
\mathbf 1' \mathbf x_{nj} = 0, \qquad \mathbf x_{nj}'\mathbf x_{nj} = n, \qquad j=1,\dots,p_n,
\end{align*}
where $\mathbf x_{nj}$ is the $j$-th column (variable) of $X_n$, and let $\mathit \Sigma_n = \frac{1}{n} X_n' X_n$ be the sample covariance matrix.
Let $\widehat{\mathbf theta}_n = (\widehat{\boldsymbol \beta}_n',\widehat{\sigma}_n^2,\widehat{\boldsymbol \alpha}_n')'$ be the EMSHS solution.
Let $\mathcal{A}_n = \{j:\widehat{\mathbf eta}_{nj} \neq 0\}$ be the index set of the selected variables in $\widehat{\boldsymbol \beta}_n$,
and $\mathcal{A}_0 = \{j:\mathbf eta_{0j} \neq 0\}$ be the index set of the true important variables where $|\mathcal{A}_0|=q_n$.
We assume $\|\boldsymbol \beta_0\|$ is bounded so that the variance of the response and the signal-to-noise ratio stay bounded. Without loss of generality, we assume $\|\boldsymbol \beta_0\| = 1$.
For any index set $\mathcal{A}$, $\mathbf v_\mathcal{A}$ represents the subvector of a vector $\mathbf v$ with entries corresponding to $\mathcal{A}$. $E_{\mathcal{A}\mathcal{B}}$ is the submatrix of a matrix $E$ with rows and columns corresponding to $\mathcal{A}$ and $\mathcal{B}$, respectively. When a sequential index set $\mathcal{A}_n$ is used for a sequence of vectors or matrices indexed by $n$, the subscript $n$ may be omitted for conciseness if it does not cause a confusion. For example, $\mathbf v_{n\mathcal{A}_n}$ can be written as $\mathbf v_{\mathcal{A}_n}$ or $\mathbf v_{n\mathcal{A}}$, and $E_{n\mathcal{A}_n\mathcal{B}_n}$ can be written as $E_{\mathcal{A}_n\mathcal{B}_n}$ or $E_{n\mathcal{A}\mathcal{B}}$.
Let $O( \cdot )$, $o( \cdot )$, $O_p( \cdot )$, and $o_p( \cdot )$ denote the standard big $O$, little $o$, big $O$ in probability, and little $o$ in probability, respectively.
Further $f(n) = \Theta(g(n))$ indicates that $f(n)$ and $g(n)$ satisfy $f(n) = O(g(n))$ and $g(n) = O(f(n))$; $f(n) = \Theta_p(g(n))$ is similarly defined.
When these notations are used for vectors and matrices, they bound the $L_2$-norm $\| \cdot \|$ of the entities. For example, $\mathbf v = O(n)$ means that $\|\mathbf v\| = O(n)$. Every norm $\| \cdot \|$ in this article denotes the $L_2$ norm. Finally, $\rightarrow_p$ and $\rightarrow_d$ denote convergence in probability and in distribution, respectively.
\subsection{Oracle Property for Fixed $p$}
Consider the case with a fixed number of candidate predictors (i.e., $p_n=p$). Suppose the following conditions hold as $n \rightarrow \infty$.
\mathbf egin{enumerate}[label=(A\arabic{enumi}),ref=(A\arabic{enumi})]
\item \label{ass:fix:beta} $\|\boldsymbol \beta_0\| = 1$ and $\min_{j \in \mathcal{A}_0} |\mathbf eta_{0j}| \ge C_\mathbf eta$ for some constant $C_\mathbf eta>0$.
\item \label{ass:fix:XX} $\mathit \Sigma_n \rightarrow_p \mathit \Sigma_0$ where $\mathit \Sigma_0$ is positive definite and depends on $\mathcal{G}_{0n}=\mathcal{G}_0$.
\item \label{ass:fix:mu} $\mu_n = R\log n + o(\log n)$ where $1/2<R<1$.
\item \label{ass:fix:nu} $\nu_n = \Theta( n^{-r} \log n )$ where $0 < r < R-1/2$.
\item \label{ass:fix:omega} $a_{\omega n} b_{\omega n}^{-1} = o(1)$.
\item \label{ass:fix:sigma} $a_{\sigma n} = a_{\sigma 1} n^z$ and $b_{\sigma n} = b_{\sigma 1} n^z$ for $a_{\sigma 1}>0$, $b_{\sigma 1} > 0$, and $0 \le z < 1$.
\end{enumerate}
Assumption \ref{ass:fix:beta} states that the nonzero coefficients stay away from zero, although their magnitudes are allowed to vary with $n$. Assumption \ref{ass:fix:XX} is a fairly general regularity condition for the
design matrix which rules out collinearity between covariates, and ensures that the important variables are not replaced by any other remaining variables in the model. Assumption \ref{ass:fix:XX} is a fairly general regularity condition for the design matrix. Readers are referred to Remark \ref{rem:fix:principle} for the comments on \ref{ass:fix:mu} and \ref{ass:fix:nu}. Assumption \ref{ass:fix:omega} forces the precision matrix to assume a diagonal form as $n \to \infty$.
Thus as $n \to \infty$, we essentially do not need to utilize prior graph knowledge $\mathcal{G}_0$ to establish the theoretical results for fixed $p$ case. Hence our asymptotic results for fixed $p$ is agnostic to the structure of the prior graph, and hence robust to mis-specification. However, we note that for finite samples, incorporation of true prior graph knowledge is of paramount importance in achieving improved numerical performance. According to \ref{ass:fix:sigma}, the prior on $\sigma^2$ is well-tightened, with $\sigma^2$ converging to a constant when $z>0$.
\mathbf egin{thm} \label{oracle:fix}
Assume the conditions \ref{ass:fix:beta}-\ref{ass:fix:sigma}.
The following statements hold for the EMSHS estimator $\widehat{\mathbf theta}_n = (\widehat{\boldsymbol \beta}_n',\widehat{\sigma}_n^2,\widehat{\boldsymbol \alpha}_n')'$ as $n \rightarrow \infty$.
\mathbf egin{enumerate}[label=(\alph*)]
\item $P ( \mathcal{A}_n = \mathcal{A}_0 ) \rightarrow 1$.
\item $n^{1/2} \left( \widehat{\boldsymbol \beta}_{n\mathcal{A}_0} - \boldsymbol \beta_{0\mathcal{A}_0} \right) \rightarrow_d \mathcal{N}\left(\mathbf 0, \sigma_0^2 \mathit \Sigma_{0\mathcal{A}_0\mathcal{A}_0}^{-1} \right)$.
\item The solution is unique in probability.
\end{enumerate}
\end{thm}
The proof for Theorem 1 is provided in Appendix.
\mathbf egin{rem}
Although we only consider Gaussian errors, the results also hold for error distributions with finite variance.
\end{rem}
\mathbf egin{rem} \label{rem:fix:principle}
With a little lack of rigorousness, \ref{ass:fix:mu} ensures $\widehat{\xi}_{nj} = \widehat{\sigma}_n e^{\widehat{\alpha}_{nj}} = \Theta_p(n^R)$ for $j \in \mathcal{A}_n^c$, and \ref{ass:fix:nu} ensures $|\widehat{\mathbf eta}_{nj}| \widehat{\xi}_{nj} = \Theta_p(n^r)$ for $j \in \mathcal{A}_n$.
Therefore, if $\widehat{\boldsymbol \beta}_n$ is $\sqrt{n}$-consistent, which is indeed the case, the important variables receive shrinkage of order $\widehat{\xi}_{nj} = \Theta_p(n^r)$ and the unimportant variables receive shrinkage of order \emph{at least} $\widehat{\xi}_{nj} = \Theta_p(n^{r+1/2})$. This is the key that leads to the oracle property.
\end{rem}
\mathbf egin{rem} \label{rem:fix:sigma}
The true residual variance $\sigma_0^2$ is consistently estimated by $\widehat{\sigma}_n^2$. That is, $\widehat{\sigma}_n^2 \rightarrow_p \sigma_0^2$.
\end{rem}
| 2,395 | 38,227 |
en
|
train
|
0.161.5
|
\subsection{Oracle Property for Fixed $p$}
Consider the case with a fixed number of candidate predictors (i.e., $p_n=p$). Suppose the following conditions hold as $n \rightarrow \infty$.
\mathbf egin{enumerate}[label=(A\arabic{enumi}),ref=(A\arabic{enumi})]
\item \label{ass:fix:beta} $\|\boldsymbol \beta_0\| = 1$ and $\min_{j \in \mathcal{A}_0} |\mathbf eta_{0j}| \ge C_\mathbf eta$ for some constant $C_\mathbf eta>0$.
\item \label{ass:fix:XX} $\mathit \Sigma_n \rightarrow_p \mathit \Sigma_0$ where $\mathit \Sigma_0$ is positive definite and depends on $\mathcal{G}_{0n}=\mathcal{G}_0$.
\item \label{ass:fix:mu} $\mu_n = R\log n + o(\log n)$ where $1/2<R<1$.
\item \label{ass:fix:nu} $\nu_n = \Theta( n^{-r} \log n )$ where $0 < r < R-1/2$.
\item \label{ass:fix:omega} $a_{\omega n} b_{\omega n}^{-1} = o(1)$.
\item \label{ass:fix:sigma} $a_{\sigma n} = a_{\sigma 1} n^z$ and $b_{\sigma n} = b_{\sigma 1} n^z$ for $a_{\sigma 1}>0$, $b_{\sigma 1} > 0$, and $0 \le z < 1$.
\end{enumerate}
Assumption \ref{ass:fix:beta} states that the nonzero coefficients stay away from zero, although their magnitudes are allowed to vary with $n$. Assumption \ref{ass:fix:XX} is a fairly general regularity condition for the
design matrix which rules out collinearity between covariates, and ensures that the important variables are not replaced by any other remaining variables in the model. Assumption \ref{ass:fix:XX} is a fairly general regularity condition for the design matrix. Readers are referred to Remark \ref{rem:fix:principle} for the comments on \ref{ass:fix:mu} and \ref{ass:fix:nu}. Assumption \ref{ass:fix:omega} forces the precision matrix to assume a diagonal form as $n \to \infty$.
Thus as $n \to \infty$, we essentially do not need to utilize prior graph knowledge $\mathcal{G}_0$ to establish the theoretical results for fixed $p$ case. Hence our asymptotic results for fixed $p$ is agnostic to the structure of the prior graph, and hence robust to mis-specification. However, we note that for finite samples, incorporation of true prior graph knowledge is of paramount importance in achieving improved numerical performance. According to \ref{ass:fix:sigma}, the prior on $\sigma^2$ is well-tightened, with $\sigma^2$ converging to a constant when $z>0$.
\mathbf egin{thm} \label{oracle:fix}
Assume the conditions \ref{ass:fix:beta}-\ref{ass:fix:sigma}.
The following statements hold for the EMSHS estimator $\widehat{\mathbf theta}_n = (\widehat{\boldsymbol \beta}_n',\widehat{\sigma}_n^2,\widehat{\boldsymbol \alpha}_n')'$ as $n \rightarrow \infty$.
\mathbf egin{enumerate}[label=(\alph*)]
\item $P ( \mathcal{A}_n = \mathcal{A}_0 ) \rightarrow 1$.
\item $n^{1/2} \left( \widehat{\boldsymbol \beta}_{n\mathcal{A}_0} - \boldsymbol \beta_{0\mathcal{A}_0} \right) \rightarrow_d \mathcal{N}\left(\mathbf 0, \sigma_0^2 \mathit \Sigma_{0\mathcal{A}_0\mathcal{A}_0}^{-1} \right)$.
\item The solution is unique in probability.
\end{enumerate}
\end{thm}
The proof for Theorem 1 is provided in Appendix.
\mathbf egin{rem}
Although we only consider Gaussian errors, the results also hold for error distributions with finite variance.
\end{rem}
\mathbf egin{rem} \label{rem:fix:principle}
With a little lack of rigorousness, \ref{ass:fix:mu} ensures $\widehat{\xi}_{nj} = \widehat{\sigma}_n e^{\widehat{\alpha}_{nj}} = \Theta_p(n^R)$ for $j \in \mathcal{A}_n^c$, and \ref{ass:fix:nu} ensures $|\widehat{\mathbf eta}_{nj}| \widehat{\xi}_{nj} = \Theta_p(n^r)$ for $j \in \mathcal{A}_n$.
Therefore, if $\widehat{\boldsymbol \beta}_n$ is $\sqrt{n}$-consistent, which is indeed the case, the important variables receive shrinkage of order $\widehat{\xi}_{nj} = \Theta_p(n^r)$ and the unimportant variables receive shrinkage of order \emph{at least} $\widehat{\xi}_{nj} = \Theta_p(n^{r+1/2})$. This is the key that leads to the oracle property.
\end{rem}
\mathbf egin{rem} \label{rem:fix:sigma}
The true residual variance $\sigma_0^2$ is consistently estimated by $\widehat{\sigma}_n^2$. That is, $\widehat{\sigma}_n^2 \rightarrow_p \sigma_0^2$.
\end{rem}
\subsection{Oracle Property for Diverging $p$}
When the number of candidate predictors is diverging, let $\mathcal{G}_n = \langle V_n, E_n \rangle$ be the working graph which is used to fit the model where $V_n = \{1,\dots,p_n\}$ and $E_n$ is the set of edges. Let $G_n$ be the adjacency matrix for $\mathcal{G}_n$, $l_{nj} = \sum_{k=1}^{p_n} G_{n,jk}$ be the degree of the vertex $j$ in $\mathcal{G}_n$, and $L_n = \max_{1 \le j \le p_n} l_{nj}$ be the maximum degree among all vertices.
Suppose the following conditions hold as $n \rightarrow \infty$.
\mathbf egin{enumerate}[label=(B\arabic{enumi}),ref=(B\arabic{enumi})]
\item \label{ass:p} $p_n = O(\exp(n^U))$ where $0 \le U < 1$.
\item \label{ass:q} $q_n = O(n^u)$ where $0 \le u < (1-U)/2$ and $q_n \le p_n$.
\item \label{ass:beta} $\|\boldsymbol \beta_0\| = 1$ and $\min_{j\in \mathcal{A}_0} |\mathbf eta_{0j}| \ge C_\mathbf eta q_n^{-1/2}$ for some constant $C_\mathbf eta>0$.
\item \label{ass:XX} Assume that $\mathcal{G}_{0n}$ is such that the smallest eigen value of $\mathit \Sigma_{n\mathcal{A}\mathcal{A}}$ is greater than $\tau_1$ for any index set $\mathcal{A}$ with $|\mathcal{A}| \le n$, and that the largest eigen value of $\mathit \Sigma_n$ is less than $\tau_2$ almost surely, where $0<\tau_1<\tau_2<\infty$.
\item \label{ass:rho} Assume that $\mathcal{G}_{0n}$ is such that the following partial orthogonality condition holds.
\mathbf egin{align*}
\|\mathit \Sigma_{n\mathcal{B}\mathcal{C}}\|^2 \le \rho_n^2 \|\mathit \Sigma_{n\mathcal{B}\mathcal{B}}\| \|\mathit \Sigma_{n\mathcal{C}\mathcal{C}}\|, \qquad \forall \mathcal{B} \subset \mathcal{A}_0, \forall \mathcal{C} \subset \mathcal{A}_0^c,
\end{align*}
almost surely where $\rho_n = O(n^{-1/2})$.
\mathbf egin{comment}
\mathbf egin{align*}
|C_{jk}| = \frac{1}{n} |\mathbf x_j'\mathbf x_k| \le \rho_n = o(n^{-1/2}q_n^{-1/2}), \qquad j \in \mathcal{A}_0, k \notin \mathcal{A}_0.
\end{align*}
This condition leads to
\mathbf egin{align*}
\|C_{\mathcal{B}\mathcal{C}}\| \le n^{1/2} q_n^{1/2} \rho_n, \qquad \mathcal{B} \subset \mathcal{A}_0, \mathcal{C} \subset \mathcal{A}_0^c, |\mathcal{C}|\le n.
\end{align*}
\end{comment}
\item \label{ass:mu} $\mu_n = R\log n + \frac{1}{2} \log (1+p_n/n) + o(\log n)$ where $(U+1)/2<R<1-u$.
\item \label{ass:nu} $\nu_n = \Theta( (1+p_n/n)^{-1} n^{-r} \log n )$ where $0 < r < R-1/2< 1/2-u$.
\item \label{ass:omega} $L_n a_{\omega n} b_{\omega n}^{-1} = o(1)$.
\item \label{ass:sigma} $a_{\sigma n} = a_{\sigma 1} n^z$ and $b_{\sigma n} = b_{\sigma 1} n^z$ for $a_{\sigma 1}>0$, $b_{\sigma 1} > 0$, and $1-r<z<1$.
\end{enumerate}
Assumption~\ref{ass:p} allows the number of candidate predictors to increase at an exponential rate and \ref{ass:q} allows the number of important variables to diverge as well. Assumption \ref{ass:beta} states that the $L_2$ norm of the true regression coefficients is bounded, which in conjunction with diverging $q_n$ implies that some of the true nonzero coefficients may get sufficiently small. However, \ref{ass:beta} ensures that they remain away from zero sufficiently. In order to accommodate increasing $p_n$ and $q_n$, the shrinkage parameters need to be carefully calibrated, which is ensured under conditions \ref{ass:mu} and \ref{ass:nu} on $\mu_n$ and $\nu_n$ and by the fact that $q_n$ increases at a moderate rate in \ref{ass:q}. The roles of $\mu_n$ and $\nu_n$ are further explained in Remark \ref{rem:rate}.
\ref{ass:XX} is analogous to \ref{ass:fix:XX} for the fixed $p$ case. The partial orthogonality condition in \ref{ass:rho} assumes that the unimportant variables are asymptotically weakly correlated with the important variables; similar assumptions are widely used for the case of diverging p in the literature \citep{Huang2008}. Since $p_n\to\infty$, the degree of a vertex in the graph $\mathcal{G}_n$ can diverge. In order to precisely regulate the smoothing effect between neighboring shrinkage parameters, the condition \ref{ass:fix:omega} needs to be extended to \ref{ass:omega} which incorporates information about the degrees of vertices in the working graph $\mathcal{G}_n$. The condition \ref{ass:sigma} is stronger than \ref{ass:fix:sigma} in order to prevent $\widehat{\sigma}^2_n$ from converging to zero much faster than desired: see Remark \ref{rem:sigma} for further comments on $\widehat{\sigma}^2_n$.
\mathbf egin{thm} \label{thm:oracle}
Assume the conditions \ref{ass:p}-\ref{ass:sigma}.
The following statements hold for the EMSHS estimator $\widehat{\mathbf theta}_n = (\widehat{\boldsymbol \beta}_n',\widehat{\sigma}_n^2,\widehat{\boldsymbol \alpha}_n')'$ as $n \rightarrow \infty$.
\mathbf egin{enumerate}
\item $P( \mathcal{A}_n = \mathcal{A}_0 ) \rightarrow 1$.
\item Letting $s_n^2 = \mathbf gamma_n' \mathit \Sigma_{n\mathcal{A}_0\mathcal{A}_0}^{-1} \mathbf gamma_n$ for any sequence of $q_n \times 1$ nonzero vectors $\mathbf gamma_n$, we have
\mathbf egin{align*}
n^{1/2} s_n^{-1} \mathbf gamma_n' ( \widehat{\boldsymbol \beta}_{n\mathcal{A}_0} - \boldsymbol \beta_{0\mathcal{A}_0} ) \rightarrow_d \mathcal{N} (0,\sigma_0^2).
\end{align*}
\item The solution is unique in probability.
\end{enumerate}
\end{thm}
We note that, in contrast to the fixed $p$ case, the oracle property result for the diverging $p$ case requires assumptions on the true graph, as in conditions \ref{ass:XX} and \ref{ass:rho}, as well as knowledge about the working graph $\mathcal{G}_n$ in \ref{ass:omega}. The proof is provided in the Appendix.
\mathbf egin{rem}
Although we only consider Gaussian errors, the results can be readily generalized to moderately heavier tailed errors.
\end{rem}
\mathbf egin{rem} \label{rem:rate}
In parallel with Remark \ref{rem:fix:principle}, with a little lack of rigorousness, \ref{ass:mu} ensures $\widehat{\xi}_{nj} = \Theta_p(n^R)$ for $j \in \mathcal{A}_n^c$, and \ref{ass:nu} ensures $|\widehat{\mathbf eta}_{nj}| \widehat{\xi}_{nj} = \Theta_p(n^r)$ for $j \in \mathcal{A}_n$. If $\widehat{\boldsymbol \beta}_n$ is $\sqrt{n}$-consistent, which is indeed the case, the important variables receive shrinkage of order \emph{at most} $\widehat{\xi}_{nj} = \Theta_p(n^r q_n^{1/2})$ due to \ref{ass:beta} and the unimportant variables receive shrinkage of order \emph{at least} $\widehat{\xi}_{nj} = \Theta_p(n^{r+1/2})$.
\end{rem}
\mathbf egin{rem} \label{rem:sigma}
Unlike Remark \ref{rem:fix:sigma}, $\widehat{\sigma}_n^2$ may converge to 0. However, once rescaled, $\widehat{\sigma}_n^2$ consistently estimates the true residual variance $\sigma_0^2$; $(n+p_n)\widehat{\sigma}_n^2/n \rightarrow_p \sigma_0^2$.
\end{rem}
| 3,690 | 38,227 |
en
|
train
|
0.161.6
|
\section{Simulation Study} \label{simulation}
We conduct simulations to evaluate the performance of the proposed approach in comparison with several existing methods. The competing methods include the lasso (Lasso), the adaptive Lasso (ALasso) \citep{Zou2006}, the Bayesian variable selection approach using spike and slab priors and MRF priors by \citet{Stingo2011} which we denote as BVS-MRF, and finally the EM approach for Bayesian variable selection (denoted as EMVS) proposed by \citet{Rockova2014} and its extension to incorporate structural information (denoted as EMVSS). Of note, EMSHS, EMVSS and BVS-MRF incorporate the graph information, whereas the other methods do not. For Lasso and ALasso, we use the glmnet R package where the initial consistent estimator for ALasso is given by the ridge regression. The Matlab code for the MCMC approach is provided with the original article by \citet{Stingo2011}. \citet{Rockova2014} provided us their unpublished R codes for EMVS and EMVSS.
\subsection{Simulation Set-up}
The simulated data are generated from the following model
\mathbf egin{align*}
y_i = \mathbf x_i' \boldsymbol \beta + \epsilon_i, \qquad 1 \le i \le n,
\end{align*}
where $\mathbf x_i \sim \mathcal{N}(\mathbf 0,\mathit{\mathit \Sigma}_X)$, $\epsilon_i \sim \mathcal{N}(0,\sigma_\epsilon^2)$, and $\boldsymbol \beta = (\underbrace{1,\dots,1}_{q},\underbrace{0,\dots,0}_{p-q})$.
The first $q=5$ variables are the important variables and the last $p-q$ variables are unimportant variables. The sample size is fixed at $n=50$; the residual variance is fixed at $\sigma_\epsilon=1$; and we consider $p=1{,}000$, $10{,}000$, and $100{,}000$.
Let $G_0$ be the adjacency matrix for the true covariate graph, which determines ${\mathit \Sigma}_X$. That is, $G_{0,jk} = 1$ if there is an edge between predictors $j$ and $k$, and $G_{0,jk} = 0$ otherwise. $G_0$ is generated as follows.
\mathbf egin{enumerate}[label=(\arabic*)]
\item We generate $g$ virtual pathways, depending on the total number of predictors; $g=50$ for $p=1{,}000$, $g=300$ for $p=10{,}000$ and $p=100{,}000$.
\item The first pathway is composed of the $q$ important variables only.
\item The number of genes in other pathways are negative binomial random variables with mean $\mu_{path} = 30$.
\item The genes which belong to a pathway are chosen randomly and independently of other pathways. Hence the pathways can be overlapping.
\item Edges are randomly generated ensuring all genes in a pathway have at least one path to all the other genes in the pathway. This can be done by conducting the following procedure for each pathway.
\mathbf egin{enumerate}[label=(\alph*)]
\item Randomly choose two genes and insert an edge between the two. Mark the two genes as connected. Mark the others as unconnected.
\item \label{stepX} Randomly choose a connected gene and an unconnected gene, and add an edge between them. Mark the unconnected gene as connected.
\item Repeat step \ref{stepX} until all genes are connected. This will form a tree, where all genes have at least one path to all the other genes in the pathway.
\item In order to add some extra edges, for each pair of genes that do not share an edge, add an edge between them with probability $p_1 = 0.05$. $p_1$ determines the overall density of edges.
\end{enumerate}
\end{enumerate}
Given $G_0$, the covariance matrix $\mathit{\mathit \Sigma}_X$ is designed as follows.
\mathbf egin{enumerate}[label=(\roman*)]
\item Set $A = I_p$.
\item Calculate the vertex degrees $D_j = \sum_{k=1}^p G_{0,jk}$.
\item \label{step3} For each pair $j<k$ with $G_{0,jk} = 1$, set $A_{jk} = A_{kj} = -S_{jk}/(\max(D_j,D_k) \times 1.1 + 0.1)$ where
\mathbf egin{align*}
S_{jk} = \mathbf egin{cases}
1, & \textrm{if } 1 \le j,k \le q,\\
\mathrm{Ber}(1/2), & \textrm{otherwise}.
\end{cases}
\end{align*}
\item Set $\mathit{\mathit \Sigma}_X = A^{-1}$ and then rescale $\mathit{\mathit \Sigma}_X$ so that its diagonal elements become 1.
\end{enumerate}
Note that the resulting covariance matrix $\mathit{\mathit \Sigma}_X$ is diagonally dominant and positive definite and $X_j$ and $X_k$ are partially correlated only if $G_{0,jk} = 1$.
Also note that since this procedure involves inverting a $p \times p$ matrix, we used this method for $p=1{,}000$ and $p=10{,}000$ cases only. For $p=100{,}000$ case, the network information of the first $10,000$ variables were generated by this procedure, and the second set of independent $90{,}000$ variables were added and they were independent of the first set of variables.
Let $G$ be the adjacency matrix of the pathway graph that is used to fit the model. We consider several scenarios where the graph used to fit the model may be specified correctly or mis-specified, as follows
\mathbf egin{enumerate}[label=\arabic*)]
\item $G_0$ is as described above and $G=G_0$.
\item $G_0$ is as described above but allows no edge between important variables and unimportant variables, and $G=G_0$.
\item $G_0$ is the same as in scenario (1), but $G$ is randomly generated with the same number of edges as $G_0$.
\item $G_0$ is the same as in scenario (2), but $G$ is randomly generated with the same number of edges as $G_0$.
\item $G_0$ is the same as in scenario (1), but $G$ includes only a subset of the edges in $G_0$ for which the corresponding partial correlations are greater than $0.5$.
\end{enumerate}
Scenarios (1) and (2) are cases where the true graph is completely known; scenario (2) allows no correlation between important and unimportant variables and hence is an ideal setting for our approach. In scenarios (3) and (4) considered as the worst case scenario, $G$ is completely mis-specified. Scenario (5) mimics the situation where only strong signals from $G_0$ are known to data analysts, which is between the ideal and the worst case scenarios.
For the proposed approach, we choose an uninformative prior $\sigma^2 \sim \mathcal{IG}(1,1)$. Based on our numerical studies which show that the performance of EMSH and EMSHS is not highly sensitive to $a_\omega$, $b_\omega$, and $\nu$, we recommend using $a_\omega=4$, $b_\omega=1$, and $\nu=1.2$.
We generate 500 simulated datasets in total, each of which contains a training set, validation set, and test set of size $n=50$ each. We fit the model using the training data for a grid of values of the tuning parameter $\mu$ lying between $(3.5,7.5)$, and then we choose the value that minimizes the prediction error for the validation data. Variable selection performance is assessed in terms of the number of false positives (FP) and the number of false negatives (FN) and prediction performance is assessed in terms of mean squared prediction error (MSPE) calculated using the test data. We also report the average computation time per tuning parameter value in seconds.
| 2,028 | 38,227 |
en
|
train
|
0.161.7
|
\subsection{Results}
The simulation results are summarized in Tables \ref{tbl1}, \ref{tbl2}, and \ref{tbl3}. BVS-MRF is omitted in the case of $p=10,000$ and both BVS-MRF and EMVSS are omitted in $p=100{,}000$ cases, because they are not scalable or errors are reported when applied to these settings.
All methods achieve better performance in scenario 2 (or 4) compared to scenario 1 (or 3), indicating that the problem is more challenging in the presence of nonzero correlation between the important variables and the unimportant variables. Within each of scenarios 1, 2, and 5, where true or partially true graph knowledge is available, the structured variable selection methods EMVSS and EMSHS are superior to their counterparts that do not use graph information (i.e. EMVS and EMSH). Moreover, the performance of each structured variable selection method (namely, EMSHS, EMVSS and BVS-MRF) improves from Scenario 3 (or 4) to Scenario 1 (or 2), further demonstrating the benefits of the correctly specified graph information. Similarly, the partially correctly specified graph information in Scenario 5 also improves the performance of these methods compared to Scenario 3. In Scenarios 3 and 4,
For prediction, the EMSHS yields the best performance in all settings when the graph information is correctly or partially correctly specified (i.e., scenarios 1, 2 and 5). When the graph information is completely mis-specified (scenarios 3 and 4), EMSHS still yields the best or close to the best performance in all settings, demonstrating its robustness to mis-specified graph information. In addition, EMSH performs the best among the unstructured variable selection methods for all cases under $p=1{,}000$ and $p= 10{,}000$, lending support to the advantage of using the data to learn adaptive shrinkage in EMSH as discussed in section \ref{interpretation}.
For variable selection, the EMSHS yields the best or close to the best performance in all settings when the graph information is correctly or partially correctly specified. Of note, while EMVS tends to have close to 0 false positives, it has high false negatives. In addition, the false positives under the proposed methods are significantly lower compared to Lasso and adaptive lasso. Finally, EMSHS consistently yields lower false positives and false negatives than EMSH in scenarios 1, 2 and 5, which again demonstrates the advantage of incorporating true graph information.
While the difference in performance between EMSHS and EMVSS in terms of prediction and variable selection is subtle for $p=1{,}000$ in scenarios 1 and 5, they behave somewhat differently for $p=10{,}000$. EMSHS tends to have relatively lower false negatives admitting slightly higher false positives. Including more important variables seems to have led to smaller prediction errors than EMVSS.
Although somewhat slower than the Lasso and adaptive Lasso, the proposed structured variable selection approach is still computationally efficient and is scalable to $p=100{,}000$ and higher dimensions, which is substantially better than the BVS-MRF and EMVSS.
\mathbf egin{small}
\mathbf egin{table}
\caption{\label{tbl1} The mean squared prediction error (MSPE) for the test data, the number of false positives (FP), the number of false negatives (FN), and the average computation time per tuning parameter value in seconds are recorded for $p=1{,}000$ case. In the parentheses are the corresponding standard errors.}
\mathbf egin{tabular}{l|cccr}
Method & MSPE & FP & FN & \multicolumn{1}{c}{Time}\\
\hline
& \multicolumn{4}{l}{Scenario \#1: Reference case}\\
Lasso & 2.29 (0.04) & 24.11 (0.48) & 0.04 (0.01) & 0.00\\
ALasso & 2.12 (0.04) & 12.20 (0.35) & 0.14 (0.02) & 0.00\\
BVS-MRF & 3.41 (0.07) & 10.56 (0.72) & 1.23 (0.05) & 540.78\\
EMVS & 5.58 (0.11) & 0.00 (0.00) & 3.55 (0.06) & 0.50\\
EMVSS & 1.36 (0.02) & 1.39 (0.07) & 0.05 (0.01) & 1.30\\
EMSH & 1.76 (0.04) & 2.62 (0.14) & 0.27 (0.03) & 0.07\\
EMSHS & 1.31 (0.03) & 1.13 (0.09) & 0.06 (0.02) & 0.75\\
\hline
& \multicolumn{4}{l}{Scenario \#2: Ideal case}\\
Lasso & 1.73 (0.02) & 18.11 (0.48) & 0.00 (0.00) & 0.00\\
ALasso & 1.48 (0.02) & 6.17 (0.22) & 0.01 (0.00) & 0.00\\
BVS-MRF & 2.02 (0.04) & 7.18 (0.58) & 0.28 (0.03) & 539.82\\
EMVS & 3.11 (0.10) & 0.00 (0.00) & 1.93 (0.06) & 0.47\\
EMVSS & 1.22 (0.01) & 0.76 (0.05) & 0.01 (0.00) & 1.25\\
EMSH & 1.28 (0.02) & 0.71 (0.06) & 0.09 (0.02) & 0.06\\
EMSHS & 1.14 (0.01) & 0.24 (0.05) & 0.00 (0.00) & 0.74\\
\hline
& \multicolumn{4}{l}{Scenario \#3: Worst case (Reference)}\\
Lasso & 2.21 (0.04) & 23.61 (0.47) & 0.03 (0.01) & 0.00\\
ALasso & 2.04 (0.04) & 11.50 (0.34) & 0.12 (0.02) & 0.00\\
BVS-MRF & 3.39 (0.07) & 10.39 (0.73) & 1.23 (0.06) & 516.85\\
EMVS & 5.41 (0.11) & 0.01 (0.00) & 3.49 (0.06) & 0.51\\
EMVSS & 1.83 (0.05) & 2.62 (0.11) & 0.30 (0.03) & 1.23\\
EMSH & 1.66 (0.04) & 2.74 (0.14) & 0.19 (0.02) & 0.06\\
EMSHS & 1.73 (0.04) & 5.41 (0.31) & 0.22 (0.03) & 0.68\\
\hline
& \multicolumn{4}{l}{Scenario \#4: Worst case (Ideal)}\\
Lasso & 1.72 (0.03) & 18.42 (0.48) & 0.01 (0.00) & 0.00\\
ALasso & 1.48 (0.02) & 6.04 (0.22) & 0.03 (0.01) & 0.00\\
BVS-MRF & 1.99 (0.04) & 7.35 (0.59) & 0.28 (0.03) & 408.52\\
EMVS & 3.13 (0.10) & 0.00 (0.00) & 1.91 (0.06) & 0.48\\
EMVSS & 1.35 (0.02) & 1.17 (0.07) & 0.09 (0.01) & 1.20\\
EMSH & 1.29 (0.02) & 0.76 (0.06) & 0.10 (0.02) & 0.06\\
EMSHS & 1.51 (0.03) & 4.32 (0.28) & 0.19 (0.02) & 0.69\\
\hline
& \multicolumn{4}{l}{Scenario \#5: Intermediate case}\\
Lasso & 2.25 (0.04) & 22.91 (0.47) & 0.05 (0.01) & 0.00\\
ALasso & 2.06 (0.04) & 11.42 (0.31) & 0.13 (0.02) & 0.00\\
BVS-MRF & 3.36 (0.07) & 12.28 (0.79) & 1.21 (0.06) & 449.23\\
EMVS & 5.41 (0.11) & 0.01 (0.00) & 3.45 (0.06) & 0.47\\
EMVSS & 1.34 (0.03) & 1.27 (0.07) & 0.04 (0.01) & 1.96\\
EMSH & 1.66 (0.04) & 2.55 (0.15) & 0.21 (0.02) & 0.06\\
EMSHS & 1.31 (0.03) & 1.33 (0.12) & 0.05 (0.01) & 0.81\\
\hline
\hline
\end{tabular}
\end{table}
\end{small}
| 2,706 | 38,227 |
en
|
train
|
0.161.8
|
\mathbf egin{small}
\mathbf egin{table}
\caption{\label{tbl2} The mean squared prediction error (MSPE) for the test data, the number of false positives (FP), the number of false negatives (FN), and the average computation time per tuning parameter value in seconds are recorded for $p=10{,}000$ case. In the parentheses are the corresponding standard errors.}
\mathbf egin{tabular}{l|cccr}
Method & MSPE & FP & FN & \multicolumn{1}{c}{Time}\\
\hline
& \multicolumn{4}{l}{Scenario \#1: Reference case}\\
Lasso & 3.34 (0.07) & 29.00 (0.53) & 0.34 (0.03) & 0.03\\
ALasso & 3.21 (0.08) & 15.87 (0.46) & 0.54 (0.04) & 0.02\\
EMVS & 6.88 (0.12) & 0.00 (0.00) & 4.17 (0.04) & 33.70\\
EMVSS & 2.01 (0.07) & 2.23 (0.13) & 0.56 (0.04) & 83.52\\
EMSH & 2.98 (0.09) & 4.74 (0.26) & 1.04 (0.05) & 0.96\\
EMSHS & 1.94 (0.08) & 2.71 (0.25) & 0.39 (0.04) & 5.56\\
\hline
& \multicolumn{4}{l}{Scenario \#2: Ideal case}\\
Lasso & 2.30 (0.04) & 23.68 (0.55) & 0.04 (0.01) & 0.03\\
ALasso & 2.05 (0.05) & 9.16 (0.30) & 0.16 (0.02) & 0.02\\
EMVS & 5.36 (0.16) & 0.00 (0.00) & 3.15 (0.06) & 36.64\\
EMVSS & 1.43 (0.03) & 1.02 (0.07) & 0.21 (0.02) & 80.68\\
EMSH & 1.79 (0.05) & 2.10 (0.15) & 0.43 (0.04) & 0.80\\
EMSHS & 1.16 (0.02) & 0.64 (0.11) & 0.01 (0.01) & 4.67\\
\hline
& \multicolumn{4}{l}{Scenario \#3: Worst case (Reference)}\\
Lasso & 3.31 (0.07) & 28.10 (0.54) & 0.34 (0.03) & 0.03\\
ALasso & 3.17 (0.07) & 15.65 (0.47) & 0.57 (0.04) & 0.03\\
EMVS & 7.13 (0.13) & 0.00 (0.00) & 4.23 (0.04) & 28.85\\
EMVSS & 3.27 (0.08) & 3.97 (0.15) & 1.37 (0.05) & 56.38\\
EMSH & 2.91 (0.08) & 4.91 (0.26) & 1.03 (0.05) & 0.87\\
EMSHS & 3.04 (0.08) & 9.34 (0.45) & 1.04 (0.05) & 4.34\\
\hline
& \multicolumn{4}{l}{Scenario \#4: Worst case (Ideal)}\\
Lasso & 2.24 (0.04) & 24.25 (0.55) & 0.04 (0.01) & 0.03\\
ALasso & 1.98 (0.04) & 8.45 (0.32) & 0.14 (0.02) & 0.03\\
EMVS & 5.14 (0.15) & 0.00 (0.00) & 3.01 (0.06) & 31.66\\
EMVSS & 1.84 (0.05) & 2.22 (0.10) & 0.48 (0.03) & 54.44\\
EMSH & 1.72 (0.04) & 2.13 (0.15) & 0.35 (0.03) & 0.72\\
EMSHS & 1.94 (0.05) & 7.55 (0.39) & 0.40 (0.03) & 4.59\\
\hline
& \multicolumn{4}{l}{Scenario \#5: Intermediate case}\\
Lasso & 3.26 (0.07) & 27.68 (0.54) & 0.32 (0.03) & 0.03\\
ALasso & 3.11 (0.07) & 14.75 (0.46) & 0.54 (0.04) & 0.03\\
EMVS & 6.95 (0.12) & 0.00 (0.00) & 4.17 (0.04) & 24.49\\
EMVSS & 1.94 (0.06) & 2.27 (0.11) & 0.46 (0.03) & 63.24\\
EMSH & 2.81 (0.07) & 4.66 (0.25) & 0.95 (0.05) & 0.74\\
EMSHS & 1.72 (0.06) & 2.47 (0.22) & 0.26 (0.03) & 5.37\\
\hline
\end{tabular}
\end{table}
\end{small}
\mathbf egin{small}
\mathbf egin{table}
\caption{\label{tbl3} The mean squared prediction error (MSPE) for the test data, the number of false positives (FP), the number of false negatives (FN), and the average computation time per tuning parameter value in seconds are recorded for $p=100{,}000$ case. In the parentheses are the corresponding standard errors.}
\mathbf egin{tabular}{l|cccr}
Method & MSPE & FP & FN & \multicolumn{1}{c}{Time}\\
\hline
& \multicolumn{4}{l}{Scenario \#1: Reference case}\\
Lasso & 4.87 (0.09) & 30.31 (0.65) & 1.21 (0.05) & 0.12\\
ALasso & 4.77 (0.09) & 16.91 (0.57) & 1.56 (0.06) & 0.18\\
EMSH & 4.66 (0.11) & 4.83 (0.29) & 2.35 (0.06) & 8.66\\
EMSHS & 3.28 (0.12) & 3.67 (0.28) & 1.26 (0.07) & 18.57\\
\hline
& \multicolumn{4}{l}{Scenario \#2: Ideal case}\\
Lasso & 3.23 (0.07) & 28.67 (0.61) & 0.29 (0.03) & 0.12\\
ALasso & 3.07 (0.08) & 11.68 (0.37) & 0.53 (0.04) & 0.13\\
EMSH & 2.93 (0.10) & 2.85 (0.19) & 1.26 (0.05) & 7.38\\
EMSHS & 1.43 (0.07) & 1.02 (0.12) & 0.10 (0.03) & 16.55\\
\hline
& \multicolumn{4}{l}{Scenario \#3: Worst case (Reference)}\\
Lasso & 5.08 (0.09) & 29.82 (0.65) & 1.31 (0.06) & 0.11\\
ALasso & 4.99 (0.10) & 17.20 (0.61) & 1.69 (0.06) & 0.16\\
EMSH & 4.89 (0.10) & 5.14 (0.32) & 2.47 (0.06) & 8.26\\
EMSHS & 4.94 (0.10) & 6.12 (0.37) & 2.49 (0.06) & 13.82\\
\hline
& \multicolumn{4}{l}{Scenario \#4: Worst case (Ideal)}\\
Lasso & 3.34 (0.07) & 28.09 (0.60) & 0.27 (0.03) & 0.11\\
ALasso & 3.18 (0.08) & 11.71 (0.39) & 0.57 (0.04) & 0.15\\
EMSH & 3.01 (0.09) & 2.92 (0.19) & 1.23 (0.06) & 7.01\\
EMSHS & 3.07 (0.09) & 4.99 (0.30) & 1.23 (0.06) & 12.46\\
\hline
& \multicolumn{4}{l}{Scenario \#5: Intermediate case}\\
Lasso & 5.07 (0.09) & 29.67 (0.63) & 1.29 (0.06) & 0.11\\
ALasso & 4.99 (0.10) & 16.36 (0.57) & 1.66 (0.06) & 0.16\\
EMSH & 4.83 (0.11) & 4.87 (0.30) & 2.41 (0.07) & 8.11\\
EMSHS & 3.55 (0.13) & 3.85 (0.30) & 1.44 (0.08) & 15.62\\
\hline
\end{tabular}
\end{table}
\end{small}
| 2,901 | 38,227 |
en
|
train
|
0.161.9
|
\section{Data Application}
We applied the proposed method to analysis of a glioblastoma data set obtained from the The Cancer Genome Atlas Network \citep{verhaak2010integrated}. The data set includes survival times ($T$) and gene expression data for $p=12{,}999$ genes ($X$) for 303 glioblastoma patients. As glioblastoma is known as one of the most aggressive cancers, only $12\%$ of the samples were censored. We removed the censored observations, resulting in a sample size of $n=267$ for analysis. We fit an accelerated failure time (AFT) model as follows
\mathbf egin{align*}
\log T_i = \mathbf eta_1 X_{i1} + \cdots + \mathbf eta_p X_{ip} + \epsilon_i, \qquad i=1,\dots,n,
\end{align*}
where $\epsilon_i$'s are independent Gaussian random variables and all variables were standardized to have mean 0 and variance 1. The network information ($\mathcal{G}$) for $X$ was retrieved from the Kyoto Encyclopedia of Genes and Genomes (KEGG) database including a total of 332 KEGG pathways and $31{,}700$ edges in these pathways.
In addition to EMSHS and EMSH, we included several competing methods that are computationally feasible, namely, lasso, adaptive lasso, EMVS, and EMVSS. The optimal tuning parameters were chosen by minimizing the 5-fold cross-validated mean-squared prediction error. The tuning parameter $\mu$ had 20 candidate values ranging from 5.5 to 6.5 ensuring solutions with various sparsity to be considered. We used $a_\sigma=1$ and $b_\sigma=1$ for prior of $\sigma^2$, which is uninformative. As shown in Table \ref{tbl4}, EMSHS achieves the best prediction performance followed by EMSH and both are substantially less expensive than EMVS and EMVSS in terms of computation. Similar to our simulation results, EMSH again yields better prediction performance than adaptive lasso, demonstrating the advantage of using the data to learn adaptive shrinkage in EMSH.
\mathbf egin{small}
\mathbf egin{table}
\caption{\label{tbl4} Cross-validated mean squared prediction error (CV MSPE) and computation time in seconds per tuning parameter (Time) from the analysis of TCGA genomic data and KEGG pathway information.}
\mathbf egin{tabular}{l|cr}
Method & CV MSPE & \multicolumn{1}{c}{Time}\\
\hline
Lasso & 0.986 & 0.2\\
ALasso & 0.996 & 0.4\\
EMVS & 0.996 & 1346.6\\
EMVSS & 0.982 & 8284.1\\
EMSH & 0.979 & 14.3\\
EMSHS & 0.975 & 17.0\\
\hline
\end{tabular}
\end{table}
\end{small}
To assess variable selection performance of EMSHS and EMSH, we conducted a second analysis. We randomly divided the entire sample into two subsets; the first subset with $187$ subjects (70\% of the whole sample) was used as the training data to fit the model and the second subset with 30\% of the subjects was used as the validation data to select optimal tuning parameter values. We repeated this procedure 100 times, resulting in 100 EMSHS and 100 EMSH solutions. Of the 100 random splits, 28 genes were selected at least once by EMSH and 21 genes were selected at least once by EMSHS. Further examination reveals that the set of genes that were selected by EMSH but not by EMSHS belong to pathways in which most of the genes were not selected. This lends support to the notion that incorporating graph information may reduce false positives, which is consistent with the findings in our simulations where EMSHS tends to yield lower false positives than
EMSH in simulation scenarios 1, 2 and 5.
The 3 genes most frequently selected by EMSHS are TOM1L1, RANBP17, and BRD7. In this set of genes the Wnt signaling pathway \citep{Kandasamy2010} was identified as an enriched pathway by the ToppGene Suite \citep{Chen2009}. Abnormalities in the Wnt signaling pathway have been associated with human malignancies in the literature. For example, BRD7 has been shown to be correlated with enlarged lateral ventricles in mice and highly expressed in gliomas \citep{Tang2014}. TOM1L1 depletion has been shown to decrease tumor growth in xenografted nude mice \citep{Sirvent2012}. EMSHS reported average estimated coefficients of $-0.146$, $0.181$, and $0.193$ for TOM1L1, RANBP17, and BRD7, respectively. The signs of the coefficients are consistent with the known knowledge about the genes in promoting/suppressing the development of cancer. Our data analyses demonstrate that EMSHS yields biologically meaningful results.
| 1,276 | 38,227 |
en
|
train
|
0.161.10
|
\section{Discussion}
This article introduces a scalable Bayesian regularized regression approach and an associated EM algorithm which can incorporate the structural information between covariates in high dimensional settings. The approach relies on specifying informative priors on the log-shrinkage parameters of the Laplace priors on the regression coefficients, which results in adaptive regularization. The method does not rely on initial estimates for weights as in adaptive lasso approaches, which provides computational advantages in higher dimensions as demonstrated in simulations. Appealing theoretical properties for both fixed and diverging dimensions are established, under very general assumptions, even when the true graph is mis-specified. The method demonstrates encouraging numerical performance in terms of scalability, prediction and variable selection, with significant gains when the prior graph is correctly specified, and a robust performance under prior graph mis-specification.
Extending the current approach to more general types of outcomes such as binary or categorical should be possible \citep{mccullagh1989generalized}, although the complexity of the optimization problem may increase. These issues can potentially be addressed using a variety of recent advances in literature involving EM approaches via latent variables \citep{polson2013bayesian}, coordinate descent method \citep{wu2008coordinate}, and other optimization algorithms \citep{nocedal2006numerical} which are readily available to facilitate computation. We leave this task as a future research question of interest.
{}
\section*{Appendix}
\subsection*{Proof of Proposition \ref{pro}}
\mathbf egin{proof}[Proof of Proposition \ref{pro}]
Note that $\mathit{\mathit \Omega} = I + W D_{\boldsymbol \omega} W'$ where $W$ is a $p \times p(p-1)/2$ matrix, whose column $\mathbf w_{jk}$ corresponding to the edge $(j,k)$ is $\mathbf e_j-\mathbf e_k$.
Here $\mathbf e_j$ is the $p \times 1$ coordinate vector whose $j$-th element is 1 and all others are zero.
Therefore, we have $|\mathit{\mathit \Omega}| \ge 1$ and
\mathbf egin{align*}
\int |\mathit{\mathit \Omega}|^{-1/2} \prod_{G_{jk}=1} \omega_{jk}^{a_\omega-1} \exp ( -b_\omega \omega_{jk} ) \mathbf 1 (\omega_{jk}>0) \prod_{G_{jk}=0} \delta_0(\omega_{jk}) d\boldsymbol \omega \le \Gamma(a_\omega)^{|E|} b_\omega^{-a_\omega|E|} < \infty.
\end{align*}
\end{proof}
\subsection*{Proof of Theorem \ref{thm:oracle}}
We first prove Theorem \ref{thm:oracle} as it is more general than Theorem \ref{oracle:fix}. We then prove Theorem \ref{oracle:fix} as a special case. Note from the M-step for $\boldsymbol \beta$ that
\mathbf egin{align} \label{sol:lasso}
\widehat{\boldsymbol \beta}_n = \qopname\relax m{argmin}_{\boldsymbol \beta} \, \frac{1}{2} (\mathbf y_n - X_n \boldsymbol \beta)'(\mathbf y_n-X_n\boldsymbol \beta) + \sum_{j=1}^{p_n} \widehat{\xi}_{nj} |\mathbf eta_j|,
\end{align}
where $\widehat{\mathbf xi}_n = \widehat{\sigma}_n \widehat{\boldsymbol \lambda}_n = \widehat{\sigma}_n e^{\widehat{\boldsymbol \alpha}_n}$. Then, by the Karush-–Kuhn–-Tucker (KKT) conditions (see, for example, \citet{Chang2010}), the solution is given by
\mathbf egin{align}
\label{sol:beta1} \widehat{\boldsymbol \beta}_{\mathcal{A}_n} &= (X_{\mathcal{A}_n} ' X_{\mathcal{A}_n})^{-1} ( X_{\mathcal{A}_n}' \mathbf y - S_{n\mathcal{A}\mathcal{A}} \widehat{\mathbf xi}_{\mathcal{A}_n} ),\\
\label{sol:beta0} \widehat{\boldsymbol \beta}_{\mathcal{A}_n^c} &= \mathbf 0,
\end{align}
and satisfies
\mathbf egin{align}
\label{sol:inactive} | \mathbf x_{nj}' ( \mathbf y_n - X_n \widehat{\boldsymbol \beta}_n ) | \le \widehat{\xi}_{nj}, \qquad j \notin \mathcal{A}_n,
\end{align}
where
\mathbf egin{align} \label{sol:sign}
S_n =\mathrm{diag}(\mathrm{sign}(\widehat{\mathbf eta}_{n1}),\dots,\mathrm{sign}(\widehat{\mathbf eta}_{np_n}))
\end{align}
is the sign matrix of $\widehat{\boldsymbol \beta}_n$. The M-step for $\sigma$ yields
\mathbf egin{align} \label{sol:sigma}
\widehat{\sigma}_n = \frac{\widehat{c}_{2n} + \sqrt{\widehat{c}_{2n}^2+8\widehat{c}_{1n}c_{3n}}}{2c_{3n}},
\end{align}
where $\widehat{c}_{1n} = \frac{1}{2}(\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n)'(\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n) + b_{\sigma n}$, $\widehat{c}_{2n} = \sum_{j=1}^{p_n} e^{\widehat{\alpha}_{nj}} |\widehat{\mathbf eta}_{nj}|$, and $c_{3n} = n+p_n+2a_{\sigma n}+2$.
In addition, from the M-step for $\boldsymbol \alpha$, the solution satisfies
\mathbf egin{align} \label{sol:alpha}
|\widehat{\mathbf eta}_{nj}| e^{\widehat{\alpha}_{nj}} = \frac{\widehat{\sigma}_n}{\nu_n} \left( \mu_n+\nu_n - \widehat{\alpha}_{nj} + \sum_{k \sim j} \omega_{jk}^{(\infty)} (\widehat{\alpha}_{nk} - \widehat{\alpha}_{nj}) \right), \qquad j=1,\dots,p_n.
\end{align}
Let $\mathcal{B}_n = \mathcal{A}_n \cap \mathcal{A}_0$, $\mathcal{C}_n = \mathcal{A}_n - \mathcal{A}_0$, and $\mathcal{D}_n = \mathcal{A}_0 - \mathcal{A}_n$.
The residual vector and the SSE are given by
\mathbf egin{align} \label{solution:residual:full}
\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n = ( I - H_{\mathcal{A}_n} ) ( X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + \mathbf epsilon_n ) + X_{\mathcal{A}_n} \left( X_{\mathcal{A}_n}' X_{\mathcal{A}_n} \right)^{-1} S_{n\mathcal{A}\mathcal{A}} \widehat{\mathbf xi}_{\mathcal{A}_n},
\end{align}
and
\mathbf egin{align}
\nonumber \| \mathbf y_n - X_n \widehat{\boldsymbol \beta}_n \|^2 &= ( X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + \mathbf epsilon_n ) '( I - H_{\mathcal{A}_n} ) ( X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + \mathbf epsilon_n )\\
\label{solution:SSE:full} & \qquad + \widehat{\mathbf xi}_{\mathcal{A}_n}' S_{n\mathcal{A}\mathcal{A}} \left( X_{\mathcal{A}_n}' X_{\mathcal{A}_n} \right)^{-1} S_{n\mathcal{A}\mathcal{A}} \widehat{\mathbf xi}_{\mathcal{A}_n},
\end{align}
where $H_{\mathcal{A}_n} = X_{\mathcal{A}_n} \left( X_{\mathcal{A}_n}' X_{\mathcal{A}_n} \right)^{-1} X_{\mathcal{A}_n}'$.
Due to the partial orthogonality \ref{ass:rho}, the solution $\widehat{\boldsymbol \beta}_n$ can be rewritten as
\mathbf egin{align} \label{solution:beta}
\left[ \mathbf egin{array}{c} \widehat{\boldsymbol \beta}_{\mathcal{B}_n}\\ \widehat{\boldsymbol \beta}_{\mathcal{C}_n} \end{array} \right] = \left[ \mathbf egin{array}{c} \boldsymbol \beta_{0\mathcal{B}_n} + (X_{\mathcal{B}_n}'X_{\mathcal{B}_n})^{-1} (X_{\mathcal{B}_n}' (X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + \mathbf epsilon_n) - S_{n\mathcal{B}\mathcal{B}} \widehat{\mathbf xi}_{\mathcal{B}_n})\\
(X_{\mathcal{C}_n}'X_{\mathcal{C}_n})^{-1} (X_{\mathcal{C}_n}'\mathbf epsilon_n - S_{n\mathcal{C}\mathcal{C}} \widehat{\mathbf xi}_{\mathcal{C}_n}) \end{array} \right] + O_p(\rho_n)
\end{align}
where the last term $O_p(\rho_n)$ exists only when $\mathcal{C}_n \neq \emptyset$.
The residual vector if $\mathcal{C}_n = \emptyset$ is given by
\mathbf egin{align} \label{solution:residual}
\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n = ( I - H_{\mathcal{B}_n} ) ( X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + \mathbf epsilon_n ) + X_{\mathcal{B}_n} \left( X_{\mathcal{B}_n}' X_{\mathcal{B}_n} \right)^{-1} S_{n\mathcal{B}\mathcal{B}} \widehat{\mathbf xi}_{\mathcal{B}_n}.
\end{align}
\mathbf egin{lem} \label{lemma:sigma}
The following statements are true.
\mathbf egin{enumerate}
\item $\widehat{\sigma}_n^2 = O_p((1+p_n/n)^{-1})$.
\item $\widehat{\sigma}_n^{-2} = O_p((1+p_n/n)n^{1-z})$.
\end{enumerate}
\end{lem}
\mathbf egin{proof}
\mathbf egin{enumerate}
\item Since $\widehat{\boldsymbol \beta}_n$ is the solution of \eqref{sol:lasso} and since $\boldsymbol \beta = \mathbf 0$ is a possible solution, we have
\mathbf egin{align*}
\widehat{c}_{1n} + \widehat{\sigma}_n \widehat{c}_{2n} = \frac{1}{2} \|\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n\|^2 + b_{\sigma n} + \widehat{\sigma}_n \sum_{j=1}^{p_n} e^{\widehat{\alpha}_{nj}} |\widehat{\mathbf eta}_{nj}| \le \frac{1}{2} \|\mathbf y_n\|^2 + b_{\sigma n}.
\end{align*}
Since
\mathbf egin{align*}
\|\mathbf y_n\|^2 = \|X_n \boldsymbol \beta_0\|^2 + 2\boldsymbol \beta_0'X_n'\mathbf epsilon_n + \|\mathbf epsilon_n\|^2 = \tau_2 n + \sigma_0^2n + O_p(n^{1/2}) = O_p(n),
\end{align*}
we have $\widehat{c}_{1n} = O_p(n)$ and $\widehat{\sigma}_n \widehat{c}_{2n} = O_p(n)$.
Note that
\mathbf egin{align} \label{sigma_bound}
\sqrt{\frac{2\widehat{c}_{1n}}{c_{3n}}} \le \widehat{\sigma}_n = \frac{\widehat{c}_{2n} + \sqrt{\widehat{c}_{2n}^2 + 8\widehat{c}_{1n}c_{3n}} }{2c_{3n}} \le \frac{\widehat{c}_{2n}}{c_{3n}} + \sqrt{\frac{2\widehat{c}_{1n}}{c_{3n}}}.
\end{align}
Since, if $0 \le b \le x \le a+b$, it follows that
\mathbf egin{align*}
x^2 \le ax+bx \le ax+b(a+b) \le 2ax + b^2,
\end{align*}
we have
\mathbf egin{align*}
\widehat{\sigma}_n^2 \le \frac{2 \widehat{\sigma}_n \widehat{c}_{2n}}{c_{3n}} + \frac{2\widehat{c}_{1n}}{c_{3n}} = O_p((1+p_n/n)^{-1}).
\end{align*}
\item Since $\widehat{c}_{1n} \ge b_{\sigma n}$, the result follows by the lower bound in \eqref{sigma_bound} and \ref{ass:sigma}.
\end{enumerate}
\end{proof}
| 3,264 | 38,227 |
en
|
train
|
0.161.11
|
\mathbf egin{lem} \label{lemma:alpha}
The following statements are true.
\mathbf egin{enumerate}
\item $\|\widehat{\boldsymbol \beta}_n\| = O_p(1)$.
\item $\max_{1 \le j \le p_n} \widehat{\alpha}_{nj} \le \mu_n+\nu_n$.
\item $\min_{1 \le j \le p_n} \widehat{\alpha}_{nj} \ge \frac{1}{2} \log (1+p_n/n) + (r-1/2) \log n + o_p(\log n)$.
\end{enumerate}
\end{lem}
\mathbf egin{proof}
\mathbf egin{enumerate}
\item As $\widehat{c}_{1n} = O_p(n)$ in Lemma \ref{lemma:sigma}, note that
\mathbf egin{align*}
\|\mathbf y_n-X_n\widehat{\boldsymbol \beta}_n\|^2 = \|\mathbf epsilon_n\|^2 - 2(\widehat{\boldsymbol \beta}_n-\boldsymbol \beta_0)'X_n'\mathbf epsilon_n + \|X_n(\widehat{\boldsymbol \beta}_n-\boldsymbol \beta_0)\|^2 = O_p(n).
\end{align*}
This implies $\|X_n(\widehat{\boldsymbol \beta}_n-\boldsymbol \beta_0)\|^2 = O_p(n)$. Since $\|\boldsymbol \beta_0\|=1$ and
\mathbf egin{align*}
\|X_{\mathcal{A}_n} \widehat{\boldsymbol \beta}_{\mathcal{A}_n}\| \le \|X_n(\widehat{\boldsymbol \beta}_n-\boldsymbol \beta_0)\| + \|X_n \boldsymbol \beta_0\|,
\end{align*}
the result follows by \ref{ass:XX}.
\item Let $j_1 = \qopname\relax m{argmax}_j \widehat{\alpha}_{nj}$. Due to \eqref{sol:alpha}, note that
\mathbf egin{align*}
\widehat{\alpha}_{nj_1}-\mu_n-\nu_n \le \sum_{k\sim j_1} \omega_{j_1k}^{(\infty)} ( \widehat{\alpha}_{nk} - \widehat{\alpha}_{nj_1} ) \le 0.
\end{align*}
Therefore, we have $\widehat{\alpha}_{nj_1} \le \mu_n + \nu_n$.
\item Let $j_0 = \qopname\relax m{argmin}_j \widehat{\alpha}_{nj}$. Due to \eqref{sol:alpha}, note that
\mathbf egin{align*}
|\widehat{\mathbf eta}_{nj_0}| e^{\widehat{\alpha}_{nj_0}} \ge \widehat{\sigma}_n \frac{\mu_n + \nu_n - \widehat{\alpha}_{nj_0}}{\nu_n}.
\end{align*}
By Lemma \ref{lemma:sigma}(b), Lemma \ref{lemma:alpha}(a), and \ref{ass:nu}, note that
\mathbf egin{align*}
\widehat{\alpha}_{nj_0} &\ge -\log |\widehat{\mathbf eta}_{nj_0}| + \log \widehat{\sigma}_n + \log (\mu_n+\nu_n-\widehat{\alpha}_{nj_0}) - \log \nu_n\\
&\ge (r-1/2) \log n + \frac{1}{2} \log (1+p_n/n) + \log (\mu_n+\nu_n-\widehat{\alpha}_{nj_0}) + o_p(\log n).
\end{align*}
By \ref{ass:mu}, we have
\mathbf egin{align*}
\mu_n - \widehat{\alpha}_{nj_0} + \log (\mu_n+\nu_n-\widehat{\alpha}_{nj_0}) \le (R-r+1/2)\log n + o(\log n).
\end{align*}
Note that $\nu_n = o(\log n)$ by \ref{ass:nu}. Since $R-r+1/2>0$, we have
\mathbf egin{align*}
\mu_n - \widehat{\alpha}_{nj_0} \le (R-r+1/2)\log n + o(\log n).
\end{align*}
Hence, the result follows.
\end{enumerate}
\end{proof}
\mathbf egin{lem} \label{lemma:xi}
The following statements are true.
\mathbf egin{enumerate}
\item $\max_{1 \le j \le p_n} \widehat{\xi}_{nj} = o_p(\widehat{\sigma}_n (1+p_n/n)^{1/2} n^{1-u})$.
\item $M_n = \max_{1 \le j \le p_n} |m_{nj}| = o(\log n)$ where $m_{nj} = \sum_{k\sim j} \omega_{jk}^{(\infty)} ( \widehat{\alpha}_{nk} - \widehat{\alpha}_{nj} )$.
\item If $|\widehat{\mathbf eta}_{nj}| = 0$ for large $n$, then we have
\mathbf egin{align*}
\widehat{\xi}_{nj} > C_2 \widehat{\sigma}_n (1+p_n/n)^{1/2} n^{R-\zeta},
\end{align*}
for $\forall C_2>0$, $\forall \zeta>0$, and large $n$.
\item If $ |\widehat{\mathbf eta}_{nj}| \le C_1 n^{-c}$ for $\exists C_1>0$, $\forall c<R-r$, and large $n$, we have
\mathbf egin{align*}
\widehat{\xi}_{nj} > C_2 \widehat{\sigma}_n^2 (1+p_n/n) n^{r+c-\zeta},
\end{align*}
for $\forall C_2>0$, $\forall \zeta>0$, and large $n$, and we have
\mathbf egin{align*}
|\widehat{\mathbf eta}_{nj}| \widehat{\xi}_{nj} \le C_3 \widehat{\sigma}_n^2 (1+p_n/n) n^r,
\end{align*}
for $\exists C_3>0$ and large $n$.
\item If $ |\widehat{\mathbf eta}_{nj}| \ge C_1 n^{-c}$ for $\exists C_1>0$, $\forall c<R-r$, and large $n$, we have
\mathbf egin{align*}
\widehat{\xi}_{nj} < C_2 \widehat{\sigma}_n^2 (1+p_n/n) n^{r+c+\zeta},
\end{align*}
for $\forall C_2>0$, $\forall \zeta>0$, and large $n$, and we have
\mathbf egin{align*}
|\widehat{\mathbf eta}_{nj}| \widehat{\xi}_{nj} \ge C_3 \widehat{\sigma}_n^2 (1+p_n/n) n^r,
\end{align*}
for $\exists C_3>0$ and large $n$.
\end{enumerate}
\end{lem}
\mathbf egin{proof}
\mathbf egin{enumerate}
\item By Lemma \ref{lemma:alpha}(b), the claim follows due to \ref{ass:mu} and \ref{ass:nu}.
\item Note that, by Lemma \ref{lemma:alpha}, we have
\mathbf egin{align*}
|\widehat{\alpha}_{nk} - \widehat{\alpha}_{nj}| \le \max_j \widehat{\alpha}_{nj} - \min_j \widehat{\alpha}_{nj} = O_p(\log n).
\end{align*}
On the other hand, we have $\omega_{jk}^{(\infty)} \le a_{\omega n} b_{\omega n}^{-1}$ by \eqref{eq:Estep}. Then, by \ref{ass:omega}, we have
\mathbf egin{align*}
M_n \le \max_{1 \le j \le p_n} \sum_{k \sim j} \omega_{jk}^{(\infty)} | \widehat{\alpha}_{nk} - \widehat{\alpha}_{nj} | \le L_n a_{\omega n} b_{\omega n}^{-1} O(\log n) = o(\log n).
\end{align*}
\item If $|\widehat{\mathbf eta}_{nj}| = 0$, then we have $\widehat{\alpha}_{nj} = \mu_n + \nu_n + m_{nj}$. The claim follows by \ref{ass:mu}, \ref{ass:nu}, and part (b).
\item By \eqref{sol:alpha} and \ref{ass:nu}, note that
\mathbf egin{align*}
\widehat{\alpha}_{nj} &= -\log |\widehat{\mathbf eta}_{nj}| + \log \widehat{\sigma}_n + \log (\mu_n+\nu_n-\widehat{\alpha}_{nj} + m_{nj}) - \log \nu_n\\
&\ge (r+c)\log n + \log \widehat{\sigma}_n + \log (1+p_n/n) + \log (\mu_n+\nu_n-\widehat{\alpha}_{nj} + m_{nj}) + o(\log n).
\end{align*}
By \ref{ass:mu}, we have
\mathbf egin{align*}
\mu_n - \widehat{\alpha}_{nj} + & \log (\mu_n+\nu_n-\widehat{\alpha}_{nj} + m_{nj})\\
&\le (R-r-c)\log n - \log \widehat{\sigma}_n - \frac{1}{2} \log (1+p_n/n) + o(\log n).
\end{align*}
Note that $\nu_n + m_{nj} = o(\log n)$ by \ref{ass:nu} and part (b). Since $c<R-r$ and by Lemma \ref{lemma:sigma}, we have
\mathbf egin{align} \label{core}
\mu_n - \widehat{\alpha}_{nj} \le (R-r-c)\log n - \log \widehat{\sigma}_n - \frac{1}{2} \log (1+p_n/n) + o(\log n),
\end{align}
and therefore
\mathbf egin{align*}
\widehat{\alpha}_{nj} \ge (r+c)\log n + \log \widehat{\sigma}_n + \log (1+p_n/n) + o(\log n).
\end{align*}
This implies
\mathbf egin{align*}
\widehat{\xi}_{nj} > C_2 \widehat{\sigma}_n^2 (1+p_n/n) n^{r+c-\zeta}.
\end{align*}
On the other hand, by \eqref{sol:alpha}, \eqref{core}, and \ref{ass:nu}, we have
\mathbf egin{align*}
|\widehat{\mathbf eta}_{nj}| \widehat{\xi}_{nj} = \widehat{\sigma}_n^2 \frac{\mu_n+\nu_n-\widehat{\alpha}_{nj} + m_{nj}}{\nu_n} \le C_3\widehat{\sigma}_n^2 (1+p_n/n) n^r.
\end{align*}
\item The arguments are in parallel to those in part (d).
\end{enumerate}
\end{proof}
\mathbf egin{lem} \label{lemma:xibound}
Suppose $p_n \times 1$ vectors $\underline{\boldsymbol \alpha}_n$ and $\overline{\boldsymbol \alpha}_n$ satisfy, given $\widehat{\boldsymbol \beta}_n$ and $\widehat{\sigma}_n$,
\mathbf egin{align}
\label{def:loweralpha} |\widehat{\mathbf eta}_{nj}| e^{\underline{\alpha}_{nj}} &= \widehat{\sigma}_n \frac{\mu_n+\nu_n-\underline{\alpha}_{nj}-M_n}{\nu_n}, \qquad 1 \le j \le p_n,\\
\label{def:upperalpha} |\widehat{\mathbf eta}_{nj}| e^{\overline{\alpha}_{nj}} &= \widehat{\sigma}_n \frac{\mu_n+\nu_n-\overline{\alpha}_{nj}+M_n}{\nu_n}, \qquad 1 \le j \le p_n.
\end{align}
Let $\underline{\xi}_{nj} = \widehat{\sigma}_n e^{\underline{\alpha}_{nj}}$ and $\overline{\xi}_{nj} = \widehat{\sigma}_n e^{\overline{\alpha}_{nj}}$. Then, the following statements are true.
\mathbf egin{enumerate}
\item $\underline{\alpha}_{nj} \le \widehat{\alpha}_{nj} \le \overline{\alpha}_{nj}$ for all $1 \le j \le p_n$.
\item $\underline{\alpha}_{nj}$ is a decreasing function of $|\widehat{\mathbf eta}_{nj}|$ and $|\widehat{\mathbf eta}_{nj}| \underline{\xi}_{nj}$ is a decreasing function of $\underline{\alpha}_{nj}$. These hold for $\overline{\alpha}_{nj}$ and $\overline{\xi}_{nj}$ analogously.
\item Lemma \ref{lemma:xi}(c), \ref{lemma:xi}(d), and \ref{lemma:xi}(e) hold with $\widehat{\xi}_{nj}$ replaced by $\underline{\xi}_{nj}$ (or $\overline{\xi}_{nj}$) as well.
\end{enumerate}
\end{lem}
\mathbf egin{proof}
\mathbf egin{enumerate}
\item Obvious from \eqref{def:loweralpha}, \eqref{def:upperalpha}, and the definition of $M_n$.
\item Obvious from the definitions \eqref{def:loweralpha} and \eqref{def:upperalpha}.
\item The same arguments in the proof of \ref{lemma:xi}(c), \ref{lemma:xi}(d), and \ref{lemma:xi}(e) are valid with $m_{nj}$ and $\widehat{\alpha}_{nj}$ replaced by $M_n$ and $\underline{\alpha}_{nj}$ (or $\overline{\alpha}_{nj}$), respectively.
\end{enumerate}
\end{proof}
| 3,445 | 38,227 |
en
|
train
|
0.161.12
|
\mathbf egin{lem} \label{lemma:xibound}
Suppose $p_n \times 1$ vectors $\underline{\boldsymbol \alpha}_n$ and $\overline{\boldsymbol \alpha}_n$ satisfy, given $\widehat{\boldsymbol \beta}_n$ and $\widehat{\sigma}_n$,
\mathbf egin{align}
\label{def:loweralpha} |\widehat{\mathbf eta}_{nj}| e^{\underline{\alpha}_{nj}} &= \widehat{\sigma}_n \frac{\mu_n+\nu_n-\underline{\alpha}_{nj}-M_n}{\nu_n}, \qquad 1 \le j \le p_n,\\
\label{def:upperalpha} |\widehat{\mathbf eta}_{nj}| e^{\overline{\alpha}_{nj}} &= \widehat{\sigma}_n \frac{\mu_n+\nu_n-\overline{\alpha}_{nj}+M_n}{\nu_n}, \qquad 1 \le j \le p_n.
\end{align}
Let $\underline{\xi}_{nj} = \widehat{\sigma}_n e^{\underline{\alpha}_{nj}}$ and $\overline{\xi}_{nj} = \widehat{\sigma}_n e^{\overline{\alpha}_{nj}}$. Then, the following statements are true.
\mathbf egin{enumerate}
\item $\underline{\alpha}_{nj} \le \widehat{\alpha}_{nj} \le \overline{\alpha}_{nj}$ for all $1 \le j \le p_n$.
\item $\underline{\alpha}_{nj}$ is a decreasing function of $|\widehat{\mathbf eta}_{nj}|$ and $|\widehat{\mathbf eta}_{nj}| \underline{\xi}_{nj}$ is a decreasing function of $\underline{\alpha}_{nj}$. These hold for $\overline{\alpha}_{nj}$ and $\overline{\xi}_{nj}$ analogously.
\item Lemma \ref{lemma:xi}(c), \ref{lemma:xi}(d), and \ref{lemma:xi}(e) hold with $\widehat{\xi}_{nj}$ replaced by $\underline{\xi}_{nj}$ (or $\overline{\xi}_{nj}$) as well.
\end{enumerate}
\end{lem}
\mathbf egin{proof}
\mathbf egin{enumerate}
\item Obvious from \eqref{def:loweralpha}, \eqref{def:upperalpha}, and the definition of $M_n$.
\item Obvious from the definitions \eqref{def:loweralpha} and \eqref{def:upperalpha}.
\item The same arguments in the proof of \ref{lemma:xi}(c), \ref{lemma:xi}(d), and \ref{lemma:xi}(e) are valid with $m_{nj}$ and $\widehat{\alpha}_{nj}$ replaced by $M_n$ and $\underline{\alpha}_{nj}$ (or $\overline{\alpha}_{nj}$), respectively.
\end{enumerate}
\end{proof}
\mathbf egin{lem} \label{lemma:main1}
$P(\mathcal{A}_n \nsubseteq \mathcal{A}_0) = P(\mathcal{C}_n \neq \emptyset) \rightarrow 0$.
\end{lem}
\mathbf egin{proof}
Suppose $\mathcal{C}_n \neq \emptyset$. By \eqref{solution:beta} and \ref{ass:rho}, note that
\mathbf egin{align*}
\widehat{\sigma}_n \sum_{j \in \mathcal{C}_n} e^{\widehat{\alpha}_{nj}} |\widehat{\mathbf eta}_{nj}| = {\widehat{\mathbf xi}_{\mathcal{C}_n}}' S_{n\mathcal{C}\mathcal{C}} \widehat{\boldsymbol \beta}_{\mathcal{C}_n} = O_p(h_n^{1/2}) - h_n,
\end{align*}
where $h_n = {\widehat{\mathbf xi}_{\mathcal{C}_n}}' S_{n\mathcal{C}\mathcal{C}} (X_{\mathcal{C}_n}'X_{\mathcal{C}_n})^{-1} S_{n\mathcal{C}\mathcal{C}} {\widehat{\mathbf xi}_{\mathcal{C}_n}}$.
We claim that $h_n \rightarrow_p \infty$ and $\textrm{RHS} \rightarrow_p -\infty$ while LHS stays positive, which yields $P(\mathcal{C}_n \neq \emptyset) \rightarrow 0$.
Suppose $\max_{j \in \mathcal{C}_n} |\widehat{\boldsymbol \beta}_{nj} | \le C_2 n^{-1/2+(r+z-1)/2}$ for $\exists C_2>0$ and large $n$.
By Lemma \ref{lemma:sigma}, \ref{ass:sigma}, Lemma \ref{lemma:xibound}(b), Lemma \ref{lemma:xibound}(c) with $\zeta = (r+z-1)/4$, we have
\mathbf egin{align*}
\min_{j \in \mathcal{C}_n} \underline{\xi}_{nj} > C_1 n^{1/2},
\end{align*}
for $\forall C_1>0$ and large $n$. By Lemma \ref{lemma:xibound}(a), we have
\mathbf egin{align*}
P( \| \widehat{\mathbf xi}_{\mathcal{C}_n} \| \le C_1 n^{1/2} \; \& \; \max_{j \in \mathcal{C}_n} |\widehat{\boldsymbol \beta}_{nj} | \le C_2 n^{-1/2+(r+z-1)/2} ) \rightarrow 0,
\end{align*}
for $\forall C_1>0$ and $\forall C_2>0$.
On the other hand, suppose $\| \widehat{\mathbf xi}_{\mathcal{C}_n} \| \le C_1 n^{1/2}$ for $\exists C_1>0$ and large $n$.
By the fact that the errors are gaussian and by \eqref{solution:beta}, we have $\max_{j \in \mathcal{C}_n} |\widehat{\boldsymbol \beta}_{nj} | = o_p(n^{-1/2+\zeta})$ for $\forall \zeta>0$. Therefore, we have
\mathbf egin{align*}
P( \| \widehat{\mathbf xi}_{\mathcal{C}_n} \| \le C_1 n^{1/2} \; \& \; \max_{j \in \mathcal{C}_n} |\widehat{\boldsymbol \beta}_{nj} | > C_2 n^{-1/2+(r+z-1)/2} ) \rightarrow 0.
\end{align*}
We have reached
\mathbf egin{align*}
P( \| \widehat{\mathbf xi}_{\mathcal{C}_n} \| \le C_1 n^{1/2}) \rightarrow 0,
\end{align*}
for $\forall C_1>0$. Since $h_n \ge (\tau_2n)^{-1} \| \widehat{\mathbf xi}_{\mathcal{C}_n} \|^2$, we have $h_n \rightarrow_p \infty$, as claimed.
\end{proof}
\mathbf egin{lem} \label{lemma:main2}
$P(\mathcal{A}_0 \subsetneq \mathcal{A}_n) = P(\mathcal{C}_n = \emptyset \, \& \, \mathcal{D}_n \neq \emptyset) \rightarrow 0$.
\end{lem}
\mathbf egin{proof}
Suppose $\mathcal{C}_n = \emptyset$ and $\mathcal{D}_n \neq \emptyset$. This implies that
\mathbf egin{align*}
|X_{\mathcal{D}_n}' (\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n)| < \widehat{\mathbf xi}_{\mathcal{D}_n} = \widehat{\sigma}_n e^{\widehat{\boldsymbol \alpha}_{\mathcal{D}_n}}.
\end{align*}
We claim that this inequality is satisfied with probability tending to 0.
Note that $\|\widehat{\mathbf xi}_{\mathcal{D}_n}\| = o_p(n^{1-u/2})$ by Lemma \ref{lemma:sigma}, Lemma \ref{lemma:xi}(a), and \ref{ass:q}.
On the other hand, note from \eqref{solution:residual} that
\mathbf egin{align*}
X_{\mathcal{D}_n}' (\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n) &= X_{\mathcal{D}_n}' ( I - H_{\mathcal{B}_n} ) X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + X_{\mathcal{D}_n}'( I - H_{\mathcal{B}_n} ) \mathbf epsilon_n\\
& \qquad + X_{\mathcal{D}_n} X_{\mathcal{B}_n} \left( X_{\mathcal{B}_n}' X_{\mathcal{B}_n} \right)^{-1} S_{n\mathcal{B}\mathcal{B}} \widehat{\mathbf xi}_{\mathcal{B}_n}\\
&= X_{\mathcal{D}_n}' ( I - H_{\mathcal{B}_n} ) X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + O_p(n^{1/2}q_n^{1/2}) + o_p(n^R q_n^{1/2})\\
&= X_{\mathcal{D}_n}' ( I - H_{\mathcal{B}_n} ) X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + o_p(n^{1-u/2}).
\end{align*}
Since $\|X_{\mathcal{D}_n}' ( I - H_{\mathcal{B}_n} ) X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n}\| \ge C nq_n^{-1/2}$ for some constant $C$ by \ref{ass:beta} and \ref{ass:XX}, the claim follows.
\end{proof}
\mathbf egin{lem} \label{lemma:sigma2}
$\widehat{\sigma}_n^2 = \Theta_p((1+p_n/n)^{-1})$.
\end{lem}
\mathbf egin{proof}
We already have $\widehat{\sigma}_n^2 = O_p((1+p_n/n)^{-1})$ by Lemma \ref{lemma:sigma}(a).
Lemma \ref{lemma:main1} and \ref{lemma:main2} shows that $P(\mathcal{A}_n \neq \mathcal{A}_0) \rightarrow 0$. Assume $\mathcal{A}_n = \mathcal{A}_0$, then by \eqref{solution:SSE:full}, we have
\mathbf egin{align*}
\widehat{c}_{1n} \ge \frac{1}{2} \mathbf epsilon_n' (I - H_{\mathcal{A}_0}) \mathbf epsilon_n = \frac{1}{2} (n-q_n) \sigma_0^2 + O_p(n^{1/2}).
\end{align*}
By the lower bound in \eqref{sigma_bound}, we have $\widehat{\sigma}_n^{-2} = O_p(1+p_n/n)$.
\end{proof}
| 2,655 | 38,227 |
en
|
train
|
0.161.13
|
\mathbf egin{lem} \label{lemma:sigma2}
$\widehat{\sigma}_n^2 = \Theta_p((1+p_n/n)^{-1})$.
\end{lem}
\mathbf egin{proof}
We already have $\widehat{\sigma}_n^2 = O_p((1+p_n/n)^{-1})$ by Lemma \ref{lemma:sigma}(a).
Lemma \ref{lemma:main1} and \ref{lemma:main2} shows that $P(\mathcal{A}_n \neq \mathcal{A}_0) \rightarrow 0$. Assume $\mathcal{A}_n = \mathcal{A}_0$, then by \eqref{solution:SSE:full}, we have
\mathbf egin{align*}
\widehat{c}_{1n} \ge \frac{1}{2} \mathbf epsilon_n' (I - H_{\mathcal{A}_0}) \mathbf epsilon_n = \frac{1}{2} (n-q_n) \sigma_0^2 + O_p(n^{1/2}).
\end{align*}
By the lower bound in \eqref{sigma_bound}, we have $\widehat{\sigma}_n^{-2} = O_p(1+p_n/n)$.
\end{proof}
\mathbf egin{proof} [Proof of Theorem \ref{thm:oracle}]
By virtue of Lemma \ref{lemma:main1} and \ref{lemma:main2}, we assume $\mathcal{A}_n = \mathcal{A}_0$, and note that, by \eqref{solution:beta}, we have
\mathbf egin{align}
\nonumber \widehat{\boldsymbol \beta}_{n\mathcal{A}_0} - \boldsymbol \beta_{0\mathcal{A}_0} &= \left( X_{\mathcal{A}_0}' X_{\mathcal{A}_0} \right)^{-1} X_{\mathcal{A}_0}' \mathbf epsilon_n - \left( X_{\mathcal{A}_0}' X_{\mathcal{A}_0} \right)^{-1} S_{n\mathcal{A}\mathcal{A}} \widehat{\mathbf xi}_{\mathcal{A}_0}\\
\label{diffbeta0} &= O_p(n^{(u-1)/2}) + o(n^{-u/2}) = o(n^{-u/2}),
\end{align}
by Lemma \ref{lemma:xi}(a) and \ref{ass:q}.
Since \ref{ass:beta}, this implies the sign consistency $P(\mathrm{sign}(\widehat{\boldsymbol \beta}_{nj}) = \mathrm{sign}(\boldsymbol \beta_{0j}), \forall j) \rightarrow 1$.
\eqref{diffbeta0} also implies that $\max_{j \in \mathcal{A}_0} |\widehat{\mathbf eta}_{nj}|^{-1} = O_p(q_n^{1/2}) = O_p(n^{u/2})$ by \ref{ass:beta}, and that $\max_{j \in \mathcal{A}_0} \overline{\xi}_{nj} = O_p(n^{r+u/2})$ by Lemmas \ref{lemma:sigma}, \ref{lemma:xibound}(b), and \ref{lemma:xibound}(c).
Then, by Lemma \ref{lemma:xibound}(a), we have $\max_{j \in \mathcal{A}_0} \widehat{\xi}_{nj} = O_p(n^{r+u/2})$.
We have reached
\mathbf egin{align} \label{diffbeta1}
\widehat{\boldsymbol \beta}_{n\mathcal{A}_0} - \boldsymbol \beta_{0\mathcal{A}_0} &= \left( X_{n\mathcal{A}_0}' X_{n\mathcal{A}_0} \right)^{-1} X_{n\mathcal{A}_0}' \mathbf epsilon_n - o_p(n^{-1/2}).
\end{align}
This proves the asymptotic normality in part (b).
For the existance of the solution, we verify the KKT condition \eqref{sol:inactive}
\mathbf egin{align*}
|\mathbf x_{nj}' (\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n)| < \widehat{\mathbf xi}_{nj}, \qquad j \notin \mathcal{A}_0.
\end{align*}
Note that, by \eqref{sol:beta1} and \eqref{diffbeta1},
\mathbf egin{align*}
X_{n\mathcal{A}_0^c}' (\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n) &= X_{n\mathcal{A}_0^c}' ( I - H_{n\mathcal{A}_0} ) \mathbf epsilon_n + o(n^{1/2} \rho_n).
\end{align*}
Since $\mathbf x_{nj}'\mathbf x_{nj} = n$ for all $j$ and $e_{ni}$ are gaussian, we have
\mathbf egin{align*}
\max_{j \notin \mathcal{A}_0} |\mathbf x_{nj}' ( I - H_{n\mathcal{A}_0} ) \mathbf epsilon_n| = O_p(n^{1/2} (\log p_n)^{1/2}) = O_p(n^{(U+1)/2}).
\end{align*}
Therefore, we have
\mathbf egin{align*}
\max_{j \notin \mathcal{A}_0} |\mathbf x_{nj}' (\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n)| = O_p(n^{(U+1)/2}).
\end{align*}
On the other hand, note that $\max_{j \notin \mathcal{A}_0} |\widehat{\mathbf eta}_{nj}| = 0$. By Lemmas \ref{lemma:xibound}(b), \ref{lemma:xibound}(c), and \ref{lemma:sigma2}, we have $\max_{j \notin \mathcal{A}_0} \underline{\xi}_{nj}^{-1} = O_p(n^{-R+\zeta})$ for any $\zeta > 0$. By Lemma \ref{lemma:xibound}(a), we have $\max_{j \notin \mathcal{A}_0} \widehat{\xi}_{nj}^{-1} = O_p(n^{-R+\zeta})$ for any $\zeta > 0$.
Since \ref{ass:mu}, by choosing $\zeta = (2R-U-1)/4$, we have
\mathbf egin{align*}
\max_{j \notin \mathcal{A}_0} |\mathbf x_{nj}' (\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n)| < \min_{j \notin \mathcal{A}_0} |\widehat{\xi}_{nj}|
\end{align*}
with probability tending to 1.
For uniqueness, note that from \eqref{eq:marginal}
\mathbf egin{align*}
-\frac{\partial^2 \log \pi(\mathbf theta|\mathbf y_n,X_n)}{\partial \boldsymbol \alpha \partial \boldsymbol \alpha'} = \frac{1}{\nu_n} I + W D_{\boldsymbol \kappa} W' + \frac{1}{\sigma} D_{e^{\boldsymbol \alpha}} D_{|\boldsymbol \beta|},
\end{align*}
where $W$ is a $p$ by $p(p-1)/2$ matrix, whose column $\mathbf w_{jk}$ corresponding to the edge $(j,k)$ is $\mathbf e_j-\mathbf e_k$, and
\mathbf egin{align*}
\kappa_{jk} = G_{n,jk} a_{\omega n} \frac{4\nu_n b_{\omega n} - 2(\alpha_{nj}-\alpha_{nk})^2}{(2\nu_n b_{\omega n} + (\alpha_{nj} -\alpha_{nk})^2)^2}.
\end{align*}
Since $|\kappa_{jk}| \le \frac{a_{\omega n}}{\nu_n b_{\omega n}}$, we have $\left| \sum_{k \sim j} \kappa_{jk} \right| < \frac{L_n a_{\omega n}}{\nu_n b_{\omega n}}$.
By \ref{ass:omega}, we have
\mathbf egin{align*}
-\frac{\partial^2 \log \pi(\mathbf theta|\mathbf y_n,X_n)}{\partial \boldsymbol \alpha \partial \boldsymbol \alpha'} = \frac{1}{\nu_n} I + o(\frac{1}{\nu_n}) + \frac{1}{\sigma} D_{e^{\boldsymbol \alpha}} D_{|\boldsymbol \beta|}.
\end{align*}
The following table shows the Hessian matrix of $-\pi(\mathbf theta|\mathbf y_n,X_n)$ with respect to $\sigma$, $\boldsymbol \beta_{\mathcal{A}_0}$, $\boldsymbol \alpha_{\mathcal{A}_0}$, and $\boldsymbol \alpha_{\mathcal{A}_0^c}$.
\mathbf egin{center}
\mathbf egin{tabular}{c|cccc}
w.r.t. & $\sigma$ & $\boldsymbol \beta_{\mathcal{A}_0}$ & $\boldsymbol \alpha_{\mathcal{A}_0}$ & $\boldsymbol \alpha_{\mathcal{A}_0^c}$\\
\hline
$\sigma$ & $ \frac{6c_{1n}}{\sigma_n^4} + \frac{2c_{2n}}{\sigma_n^3} - \frac{c_{3n}}{\sigma_n^2}$ & \\
$\boldsymbol \beta_{\mathcal{A}_0}$ & $\frac{2X_{n\mathcal{A}_0}'(\mathbf y_n-X_n\boldsymbol \beta_n)}{\sigma_n^3} - \frac{ S_{\mathcal{A}_0} e^{\boldsymbol \alpha_{\mathcal{A}_0}} }{\sigma_n^2}$ & $\frac{X_{n\mathcal{A}_0}'X_{n\mathcal{A}_0}}{\sigma_n^2}$ \\
$\boldsymbol \alpha_{\mathcal{A}_0}$ & $-\frac{ D_{|\boldsymbol \beta_{\mathcal{A}_0}|} e^{\boldsymbol \alpha_{\mathcal{A}_0}} }{\sigma_n^2}$ & $\frac{D_{e^{\boldsymbol \alpha_{\mathcal{A}_0}}} S_{\mathcal{A}_0}}{\sigma_n}$ & $\frac{I}{\nu_n} + o(\frac{1}{\nu_n}) + \frac{D_{e^{\boldsymbol \alpha_{\mathcal{A}_0}}}D_{|\boldsymbol \beta_{\mathcal{A}_0}|}}{\sigma_n}$ \\
$\boldsymbol \alpha_{\mathcal{A}_0^c}$ & 0 & 0 & 0 & $\frac{I}{\nu_n} + o(\frac{1}{\nu_n})$\\
\end{tabular}
\end{center}
The Hessian evaluated at the solution is given by
\mathbf egin{center}
\mathbf egin{tabular}{c|cccc}
w.r.t. & $\sigma$ & $\boldsymbol \beta_{\mathcal{A}_0}$ & $\boldsymbol \alpha_{\mathcal{A}_0}$ & $\boldsymbol \alpha_{\mathcal{A}_0^c}$\\
\hline
$\sigma$ & {\color{red} $ \frac{4\widehat{c}_{1n}}{\widehat{\sigma}_n^4}$} $+$ {\color{blue} $\frac{\widehat{c}_{2n}}{\widehat{\sigma}_n^3}$} & \\
$\boldsymbol \beta_{\mathcal{A}_0}$ & {\color{red} $\frac{X_{n\mathcal{A}_0}'(\mathbf y_n-X_n\widehat{\boldsymbol \beta}_n)}{\widehat{\sigma}_n^3}$} & {\color{red} $\frac{X_{n\mathcal{A}_0}'X_{n\mathcal{A}_0}}{2\widehat{\sigma}_n^2}$} $+$ {\color{green} $\frac{X_{n\mathcal{A}_0}'X_{n\mathcal{A}_0}}{2\widehat{\sigma}_n^2}$} \\
$\boldsymbol \alpha_{\mathcal{A}_0}$ & {\color{blue} $-\frac{ D_{|\widehat{\boldsymbol \beta}_{\mathcal{A}_0}|} e^{\widehat{\boldsymbol \alpha}_{\mathcal{A}_0}} }{\widehat{\sigma}_n^2}$} & {\color{green} $\frac{D_{e^{\widehat{\boldsymbol \alpha}_{\mathcal{A}_0}}} S_{n\mathcal{A}_0\mathcal{A}_0}}{\widehat{\sigma}_n}$} & {\color{green} $\frac{I}{\nu_n} + o(\frac{1}{\nu_n})$} $+$ {\color{blue} $\frac{D_{e^{\widehat{\boldsymbol \alpha}_{\mathcal{A}_0}}}D_{|\widehat{\boldsymbol \beta}_{\mathcal{A}_0}|}}{\widehat{\sigma}_n}$} \\
$\boldsymbol \alpha_{\mathcal{A}_0^c}$ & 0 & 0 & 0 & $\frac{I}{\nu_n} + o(\frac{1}{\nu_n})$
\end{tabular}
\end{center}
The red-colored submatrix is strictly positive definite. The blue-colored submatrix is positive semi-definite. We claim that the green-colored submatrix is asymptotically strictly positive definite. Note that the smallest eigen value of $X_{n\mathcal{A}_0}'X_{n\mathcal{A}_0}$ is greater than or equal to $n\tau_1$ by \ref{ass:XX}. The smallest eigen value of $I/\nu_n$ is greater than $(1+p_n/n) n^{r-\zeta}$ for any $\zeta>0$ and large $n$ by \ref{ass:nu}. On the other hand, the largest eigenvalue of $D_{e^{\boldsymbol \alpha_{\mathcal{A}_0}}} S_{\mathcal{A}_0}$ is of $O_p((1+p_n/n)^{1/2} n^{r+u/2})$ as discussed above. The claim follows by \ref{ass:nu}.
We have proved that the objective function is strictly convex in the region where the solutions can reside. Suppose we have two distinct solutions. This is only possible if there is at least one non-convex point in the segment between the two points, which cannot be the case because the objective function is strictly convex in that region. Therefore, the solution is unique and this completes the proof.
\end{proof}
| 3,409 | 38,227 |
en
|
train
|
0.161.14
|
\subsection*{Proof of Theorem \ref{oracle:fix}}
\mathbf egin{proof}[Proof of Theorem \ref{oracle:fix}]
Lemmas \ref{lemma:sigma}--\ref{lemma:xibound} hold with $U=0$, $u=0$, $1/2<R<1$, $0<r<R-1/2$, and $0 \le z < 1$. Since the partial orthogonality is not assumed in the fixed $p$ case, we take a different stratege to prove the rest. We first prove $P(\mathcal{D}_n \neq \emptyset) \rightarrow 0$, then $\widehat{\sigma}_n^{-2} = O_p(1)$, and then show $P(\mathcal{C}_n \neq \emptyset \, \& \, \mathcal{D}_n = \emptyset ) \rightarrow 0$.
Suppose $\mathcal{D}_n \neq \emptyset$. Note from \eqref{solution:residual:full} that
\mathbf egin{align*}
X_{\mathcal{D}_n}' (\mathbf y_n - X_n \widehat{\boldsymbol \beta}_n) &= X_{\mathcal{D}_n}' ( I - H_{\mathcal{A}_n} ) X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + X_{\mathcal{D}_n}'( I - H_{\mathcal{A}_n} ) \mathbf epsilon_n\\
& \qquad + X_{\mathcal{D}_n} X_{\mathcal{A}_n} \left( X_{\mathcal{A}_n}' X_{\mathcal{A}_n} \right)^{-1} S_{n\mathcal{A}\mathcal{A}} \widehat{\mathbf xi}_{\mathcal{A}_n}\\
&= X_{\mathcal{D}_n}' ( I - H_{\mathcal{A}_n} ) X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + O_p(n^{1/2}) + o_p(n^R)\\
&= X_{\mathcal{D}_n}' ( I - H_{\mathcal{A}_n} ) X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} + o_p(n).
\end{align*}
Since $X_{\mathcal{D}_n}' ( I - H_{\mathcal{A}_n} ) X_{\mathcal{D}_n} \boldsymbol \beta_{0\mathcal{D}_n} \ge C n$ in probability for $\exists C>0$ by \ref{ass:fix:beta} and \ref{ass:fix:XX}, the KKT condition \eqref{sol:inactive} cannot be satisfied due to Lemma \ref{lemma:xi}(a). This implies $P(\mathcal{D}_n = \emptyset) \rightarrow 1$.
Now assume $\mathcal{D}_n = \emptyset$, then by \eqref{solution:SSE:full}, we have
\mathbf egin{align*}
\widehat{c}_{1n} \ge \frac{1}{2} \mathbf epsilon_n' (I - H_{\mathcal{A}_n}) \mathbf epsilon_n \ge \frac{1}{2} (n-p) \sigma_0^2 + O_p(n^{1/2}).
\end{align*}
By the lower bound in \eqref{sigma_bound}, we have $\widehat{\sigma}_n^{-2} = O_p(1)$.
Suppose $\mathcal{C}_n \neq \emptyset$ and $\mathcal{D}_n = \emptyset$. By \eqref{sol:beta1}, we have
\mathbf egin{align*}
\widehat{\sigma}_n \sum_{j =1}^p e^{\widehat{\alpha}_{nj}} |\widehat{\mathbf eta}_{nj}| = {\widehat{\mathbf xi}_{\mathcal{A}_n}}' S_{n\mathcal{A}\mathcal{A}} \widehat{\boldsymbol \beta}_{\mathcal{A}_n} = O_p(h_n^{1/2}) - h_n,
\end{align*}
where $h_n = {\widehat{\mathbf xi}_{\mathcal{A}_n}}' S_{n\mathcal{A}\mathcal{A}} (X_{\mathcal{A}_n}'X_{\mathcal{A}_n})^{-1} S_{n\mathcal{A}\mathcal{A}} {\widehat{\mathbf xi}_{\mathcal{A}_n}}$. By the similar argument as in Lemma \ref{lemma:main1}, we can show that $h_n \rightarrow_p \infty$ and $P(\mathcal{C}_n \neq \emptyset \, \& \, \mathcal{D}_n = \emptyset ) \rightarrow 0$. (Use the fact $\widehat{\sigma}_n^{-2} = O_p(1)$).
Now we have all the resuls that are analogous to Lemmas \ref{lemma:sigma}--\ref{lemma:sigma2}. The rest of the proof is analogous to the proof of Theorem \ref{thm:oracle}.
\end{proof}
\end{document}
| 1,175 | 38,227 |
en
|
train
|
0.162.0
|
{\beta}gin{document}
\tilde{t}le[Smooth covers on symplectic manifolds]
{Smooth covers on symplectic manifolds}
\author[Fran\c{c}ois Lalonde]{Fran\c{c}ois Lalonde}
\author[Jordan Payette]{Jordan Payette}
\address{D\'epartement de math\'ematiques et de statistique, Universit\'e de Montr\'eal;
D\'{e}partement de
math\'{e}matiques et de statistique, Universit\'{e} de Montr\'{e}al.}
\email{[email protected]; [email protected]}
\thanks{The first author is supported by a Canada Research Chair, a NSERC grant OGP 0092913
(Canada) and a FQRNT grant ER-1199 (Qu\'ebec); the second author is supported by a Graham Bell fellowship from NSERC (Canada) }
\date{}
{\beta}gin{abstract}
In this article, we first introduce the notion of a {\it continuous cover} of a manifold parametrised by any compact manifold $T$ endowed with a mass 1 volume-form. We prove that any such cover admits a partition of unity where the sum is replaced by integrals. When the cover is smooth, we then generalize Polterovich's notion of Poisson non-commutativity to such a context in order to get a more natural definition of non-commutativity and to be in a position where one can compare various invariants of symplectic manifolds. The main theorem of this article states that the discrete Poisson bracket invariant of Polterovich is equal to our smooth version of it, as it does not depend on the nature or dimension of the parameter space $T$. As a consequence, the Poisson-bracket invariant of a symplectic manifold can be computed either in the discrete category or in the smooth one, that is to say either by summing or integrating. The latter is in general more amenable to calculations, so that, in some sense, our result is in the spirit of the De Rham theorem equating simplicial cohomology and De Rham cohomology. We finally study the Poisson-bracket invariant associated to coverings by symplectic balls of capacity $c$, exhibiting some of its properties as the capacity $c$ varies. We end with some positive and negative speculations on the relation between uncertainty phase transitions and critical values of the Poisson bracket, which was the motivation behind this article.
\end{abstract}
\maketitle
\noindent
Subject classification: 53D35, 57R17, 55R20, 57S05.
| 661 | 26,551 |
en
|
train
|
0.162.1
|
\section{Introduction}
In mathematics, the notion of partition of unity is fundamental since it is the concept that distinguishes $C^{\infty}$ geometry from analytic geometry. In the first case, where partitions of unity apply, most objects can be decomposed in local parts, while in the second case where partitions of unity do not apply, most objects are intrinsically global and indecomposable.
It is therefore of some importance to push that notion as far as we can in order to make it more natural and applicable. Our first observation is that the right context in which one should consider partitions of unity is in the continuous category (or possibly in the measurable category if one were able to make sense of that concept for families of open sets). So here continuous covers by open subsets of a given smooth manifold $M$ will be parametrised by any smooth compact manifold (possibly with boundary) $T$ endowed with a volume-form $dt$ of total mass $1$; for simplicity we shall refer to those pairs $(T, dt)$ as 'probability spaces'. We will first prove that for any compact manifold $M$, any such continuously parametrised cover of $M$ admits a smooth partition of unity made of smooth functions.
Concentrating on an arbitrary symplectic manifold $(M, {\omega})$, the covers that we will consider will be made of smooth families of symplectically embedded balls of a given capacity $c= {\partial}i r^2$ indexed by a measure space $(T, d\mu = \mathrm{d} t)$. Here is our first theorem: the level of Poisson non-commutativity, as defined by Polterovich in the discrete case of partitions of unity, can be generalised to the case of our families of covers and associated partitions of unity; morever the number that we get in this general case, which depends {\it a priori} on the probability space, actually does not, being equal to the number associated to the corresponding discrete setting.
Our second theorem is that if one considers the function $f: [0, c_{max}] \to [0, \infty]$ that assigns to $c$ the Polterovich's level of non-commutativity of the covers made of symplectically embedded balls of capacity $c$, as generalised by us in the smooth setting, then this function enjoys the following two properties:
\noindent
1) $f$ is non-increasing, and
\noindent
2) $f$ is upper semi-continuous and left-continuous.
We end this paper with a question concerning the relation, for a given symplectic manifold $(M, {\omega}ega)$, between critical values of the Poisson-bracket invariant as the capacity $c$ of the ball varies, and the critical values (or ``phase transition'') depending on $c$ of the topology of the infinite dimensional space of symplectic embeddings of the standard ball of capacity $c$
into $(M, {\omega}ega)$.
\noindent {\it Acknowledgements}. Both authors are deeply grateful to Lev Buhovsky for suggesting and proving that the $T$-parameter spaces of smooth covers can be reduced to one-dimensional families. Although we present a different proof here, that now includes the discrete case, his idea has had a significant impact on the first version of this paper. The second author would like to thank Dominique Rathel-Fournier for inspiring discussions.
\section{Continuous and smooth covers}
Throughout this article, ``smooth" means ``of class $C^r$" for some arbitrary fixed $r {\mathcal g}e 1$ and $T$ is a compact smooth manifold of finite dimension endowed with a measure $\mu$ of total volume $1$ coming from a volume-form $\mathrm{d} t$. The following definition is far more restrictive than the one that we have in mind, but it will be enough for the purpose of this article.
{\beta}gin{Definition}
Let $M$ be a closed smooth manifold of dimension $n$. Let $U$ be a bounded open subset of Euclidean space $\mathbb{R}^{n}$ whose boundary is smooth, so that the closure of $U$ admits an open neighbourhood smoothly diffeomorphic to $U$.
A {\it continous cover} of $M$ of type $(T,U)$ is a continuous map
$$
G: T \times U \to M $$
such that
{\beta}gin{enumerate}
\item for each $t \in T$, the map $G_t$ is a smooth embedding of $U$ to $M$ that can be extended to a smooth embedding of some ({\it a priori} $t$-dependent) collar neighbourhood of $U$ (and therefore to the closed set $\bar{U}$), and
\item the images of $U$ as $t$ runs over the parameter space $T$, cover $M$.
\end{enumerate}
\end{Definition}
Note that, in general, the topology of $U$ could change within the $T$-family. However, to simplify the presentation, we restrict ourselves to a fixed $U$ -- this is what we had in mind in the sentence preceding this definition.
A {\it smooth cover} is defined in the same way, but now requiring that $G$ be a smooth map.
{\beta}gin{Definition} A {\it partition of unity} $F$ subordinated to a continuous cover $G$ is a smooth function
$$
\tilde{F}: T \times U \to [0, \infty)
$$
such that
{\beta}gin{enumerate}
\item each $\tilde{F}_t : U \to {\bf R}$ is a smooth function with (compact) support in $U$,
\item the closure of the union $\bigcup_{t \in T} \, \mathrm{supp} (\tilde{F}_t)$ is contained in $U$, and
\item for every $x \in M$,
$$
\int_T F_t(x) dt = 1,
$$
\end{enumerate}
\noindent
where the smooth function $F_t : M \to {\bf R}$ is the pushforward of $\tilde{F}_t$ to $M$ using $G_t$, extended by zero outside the image of $G_t$; in other words, it is $F_t(x) = \tilde{F}(t, G^{-1}_t(x))$.
\noindent The notation $F < G$ expresses that $F$ is a partition of unity subordinated to the cover $G$.
\end{Definition}
{\beta}gin{Remark} Condition (2) plays a role in the proofs of a few results of this paper by allowing us to deform $U$ a little while keeping a given $F$ fixed; we were not able to come up with arguments working without this condition. Note that we recover the usual notion of partition of unity by taking $T$ to be a finite set of points with the counting measure.
\end{Remark}
{\beta}gin{Theorem} Each continuous cover admits a partition of unity.
\end{Theorem}
{\partial}roof
Let $G$ be a continuous cover of $M$ of type $(T,U)$. The general idea of the proof is to replace $G$ by a finite open cover $G'$ of $M$, to consider a partition of unity subordinated to the latter and to use it to construct a partition of unity subordinated to $G$.
Cover $U$ by open balls such that their closure is always included inside $U$. Now, push forward this cover to $M$ using each $G_{t}$; the collection of all of these images as $t$ varies in $T$ forms an open cover of $M$ by sets diffeomorphic to the ball. Since $M$ is compact, there exists a finite subcover. Each open set in this subcover comes from some $G_t$, where $t$ is an element of a finite set $T' {\frak {su}}bset T$.
For each $t \in T'$, consider the (finite) collection $C_t$ of open balls inside $U$ whose image under $G_t$ belongs to the aforementioned subcover, so that the latter can be expressed as $G' := \{G_t(V) \, : \, t \in T', V \in C_t\}$. Since the closure of each ball $V \in C_t$ is contained in the open set $U$, by continuity of $G$ there is an open set $B_V {\frak {su}}bset T$ centred at $t$ such that $G_t(\bar{V}) {\frak {su}}bset G_{\tau}(U)$ for all $\tau \in B_V$, so that each $G_{\tau}^{-1} \circ G_t : \bar{V} \to U$ is defined and is a diffeomorphism onto its image. The intersection $B_t = \cap_{V \in C_t} B_V$ contains $t$ and is open since $C_t$ is finite. For each $t \in T'$, consider a smooth nonnegative bump function $\rho_t$ supported in $B_t$ whose integral over $T$ equals $1$.
There exists a smooth partition of unity $\Phi = \{ {\partial}hi_V : M \to [0,1] \, | \, V \in \cup_{t \in T'} C_t \}$ on $M$ subordinated to the finite open cover $G'$. For $t \in T'$ and $V \in C_t$, the real-valued function $\tilde{F}_V(\tau,u) := \rho_t(\tau) {\partial}hi_V(G(\tau,u))$ defined on $T \times U$ is supported in $B_t \times U$, is smooth in both $u$ and $\tau$ and satisfies $\int_T \tilde{F}_V(\tau, G_{\tau}^{-1}(x))d\tau = {\partial}hi_V(x)$. It easily follows that the function
\[ \tilde{F} : T \times U \to \mathbb{R} : (\tau, u) \mapsto {\frak {su}}m_{t \in T'} {\frak {su}}m_{V \in C_t} \tilde{F}_V(\tau, u) \]
is a partition of unity subordinated to $G$.
$\square$
\section{The $pb$ invariant and Poisson non-commutativity}
Leonid Polterovich \cite{P1,P2,PR} introduced recently the notion of the level of Poisson non-com\-mutativity of a given classical (i.e finite) covering of a symplectic manifold. Here is the definition:
{\beta}gin{Definition} Let $(M, {\omega})$ be a closed symplectic manifold and $\mathcal U$ a finite cover of $M$ by open subsets $U_1, \ldots, U_N$. For each partition of unity $F = (f_1, \ldots, f_N)$ subordinated to $\mathcal U$, take the supremum of $\| \{{\Sigma}gma_i a_i f_i, {\Sigma}gma_j b_j f_j\} \|$ when the $N$-tuples of coefficients $(a_i)$ and $(b_i)$ run through the $N$-cube $[-1,1]^N$, where the bracket is the Poisson bracket and the norm is the $C^0$-supremum norm. Then take the infimum over all partitions of unity subordinated to $\mathcal U$. This is by definition the $pb$ \textit{invariant} of $\mathcal U$. To summarize:
\[ {\partial}b(\mathcal U) := \underset{F < \mathcal U}{\mathrm{inf}} \; \underset{(a_i), (b_i) \in [-1,1]^N \, }{\mathrm{sup}} \, \left\| \left\{ {\frak {su}}m_i a_i f_i \, , \, {\frak {su}}m_j b_j f_j \right\} \right\| \; . \]
\end{Definition}
Roughly speaking, this number is a measure of the least amount of ``symplectic interaction" that sets in a cover $\mathcal U$ can have. It is very plausible that such a number depends on the combinatorics of the cover, but also on the symplectic properties of the (intersections of the) open sets in the cover. To illustrate this point, observe that if $\mathcal U$ is an open cover made of only two open sets, then ${\partial}b(\mathcal U) = 0$. A somewhat opposite result holds for covers constituted of displaceable open sets; let's recall that a subset $U$ of $M$ is {\it displaceable} if there is a Hamiltonian diffeomorphism ${\partial}hi$ such that ${\partial}hi(U) \cap U = \emptyset$. The main result of Polterovich in \cite{P1,PR} is that for such a cover, the number ${\partial}b(\mathcal U)$ (multiplied by some finite number which measures the ``symplectic size" of the sets in the cover) is bounded from below by $(2N^2)^{-1}$. In particular, ${\partial}b(\mathcal U) > 0$ in such a case. This result heavily relies on techniques in quantum and Floer homologies and in the theory of quasi-morphisms and quasi-states. Unfortunately, this lower bound depends on the cardinality $N$ of the open cover; as such, it does not show if one could use the ${\partial}b$-invariant in order to assign to a given symplectic manifold a (strictly positive) number that might be interpreted as its level of Poisson non-commutativity. Nevertheless, Polterovich conjectured in \cite{P2} and \cite{PR} that for covers made of displaceable open sets, there should be a strictly positive lower bound for $pb$ independent of the cardinality of the cover, an extremely hard conjecture.
One way of solving this problem might come from the extension of the ${\partial}b$-invariant from finite covers to continuous or smooth covers. Indeed, such covers are morally limits of finite covers as the cardinality $N$ goes to infinity, so we can expect some relation between the minimal value of ${\partial}b$ on such covers and the level of Poisson non-commutativity of the symplectic manifold. This extension has the advantage that one may then compare the ${\partial}b$ invariant for continuous/smooth covers to other quantities that also depend on continuous/smooth covers, such as the critical values at which families of symplectic balls undergo a ``phase transition''. We first need the following definition:
{\beta}gin{Definition} Let $(M, {\omega})$ be a closed symplectic manifold and $G$ a continuous cover of $M$ of type $(T,U)$ by open subsets $G_t(U)$. For each partition of unity $F$ subordinated to $G$, take the supremum of $\| \{\int_N a(t) F(t) dt, \int_N b(t) F(t) dt\} \|$ over all coefficients (or \textit{weights}) $a$ and $b$ that are measurable functions defined on $T$ with $dt$-almost everywhere values in $[-1,1]$. Then take the infimum over all partitions of unity subordinated to $G$. This is by definition the $pb$ \textit{invariant} of $G$. To summarize:
\[ {\partial}b(G) := \underset{F < G}{\mathrm{inf}} \; \underset{a, b : \, T \to [-1,1] \, \mbox{ \tiny{measurable}}}{\mathrm{sup}} \, \left\| \left\{ \int_T a(t) F_{t} dt \, , \, \int_T b(t) F_{t} dt \right\} \right\| \; . \]
\end{Definition}
Note that we recover Polterovich's definition by replacing $T$ by a finite set of points. The following result shows that this pb-invariant is finite.
{\beta}gin{lemma}
Given a continuous cover $G$ of type $(T,U)$, there exists a partition of unity $F$ subordinated to $G$ whose pb-invariant ${\partial}b \, F$ is finite\footnote{The pb-invariant ${\partial}b \, F$ of a partition of unity $F$ is defined as above without the infimum over $F < G$. That is, ${\partial}b \, G = \mathrm{inf}_{F < G} \, {\partial}b \, F$.}.
\end{lemma}
| 3,995 | 26,551 |
en
|
train
|
0.162.2
|
$\square$
\section{The $pb$ invariant and Poisson non-commutativity}
Leonid Polterovich \cite{P1,P2,PR} introduced recently the notion of the level of Poisson non-com\-mutativity of a given classical (i.e finite) covering of a symplectic manifold. Here is the definition:
{\beta}gin{Definition} Let $(M, {\omega})$ be a closed symplectic manifold and $\mathcal U$ a finite cover of $M$ by open subsets $U_1, \ldots, U_N$. For each partition of unity $F = (f_1, \ldots, f_N)$ subordinated to $\mathcal U$, take the supremum of $\| \{{\Sigma}gma_i a_i f_i, {\Sigma}gma_j b_j f_j\} \|$ when the $N$-tuples of coefficients $(a_i)$ and $(b_i)$ run through the $N$-cube $[-1,1]^N$, where the bracket is the Poisson bracket and the norm is the $C^0$-supremum norm. Then take the infimum over all partitions of unity subordinated to $\mathcal U$. This is by definition the $pb$ \textit{invariant} of $\mathcal U$. To summarize:
\[ {\partial}b(\mathcal U) := \underset{F < \mathcal U}{\mathrm{inf}} \; \underset{(a_i), (b_i) \in [-1,1]^N \, }{\mathrm{sup}} \, \left\| \left\{ {\frak {su}}m_i a_i f_i \, , \, {\frak {su}}m_j b_j f_j \right\} \right\| \; . \]
\end{Definition}
Roughly speaking, this number is a measure of the least amount of ``symplectic interaction" that sets in a cover $\mathcal U$ can have. It is very plausible that such a number depends on the combinatorics of the cover, but also on the symplectic properties of the (intersections of the) open sets in the cover. To illustrate this point, observe that if $\mathcal U$ is an open cover made of only two open sets, then ${\partial}b(\mathcal U) = 0$. A somewhat opposite result holds for covers constituted of displaceable open sets; let's recall that a subset $U$ of $M$ is {\it displaceable} if there is a Hamiltonian diffeomorphism ${\partial}hi$ such that ${\partial}hi(U) \cap U = \emptyset$. The main result of Polterovich in \cite{P1,PR} is that for such a cover, the number ${\partial}b(\mathcal U)$ (multiplied by some finite number which measures the ``symplectic size" of the sets in the cover) is bounded from below by $(2N^2)^{-1}$. In particular, ${\partial}b(\mathcal U) > 0$ in such a case. This result heavily relies on techniques in quantum and Floer homologies and in the theory of quasi-morphisms and quasi-states. Unfortunately, this lower bound depends on the cardinality $N$ of the open cover; as such, it does not show if one could use the ${\partial}b$-invariant in order to assign to a given symplectic manifold a (strictly positive) number that might be interpreted as its level of Poisson non-commutativity. Nevertheless, Polterovich conjectured in \cite{P2} and \cite{PR} that for covers made of displaceable open sets, there should be a strictly positive lower bound for $pb$ independent of the cardinality of the cover, an extremely hard conjecture.
One way of solving this problem might come from the extension of the ${\partial}b$-invariant from finite covers to continuous or smooth covers. Indeed, such covers are morally limits of finite covers as the cardinality $N$ goes to infinity, so we can expect some relation between the minimal value of ${\partial}b$ on such covers and the level of Poisson non-commutativity of the symplectic manifold. This extension has the advantage that one may then compare the ${\partial}b$ invariant for continuous/smooth covers to other quantities that also depend on continuous/smooth covers, such as the critical values at which families of symplectic balls undergo a ``phase transition''. We first need the following definition:
{\beta}gin{Definition} Let $(M, {\omega})$ be a closed symplectic manifold and $G$ a continuous cover of $M$ of type $(T,U)$ by open subsets $G_t(U)$. For each partition of unity $F$ subordinated to $G$, take the supremum of $\| \{\int_N a(t) F(t) dt, \int_N b(t) F(t) dt\} \|$ over all coefficients (or \textit{weights}) $a$ and $b$ that are measurable functions defined on $T$ with $dt$-almost everywhere values in $[-1,1]$. Then take the infimum over all partitions of unity subordinated to $G$. This is by definition the $pb$ \textit{invariant} of $G$. To summarize:
\[ {\partial}b(G) := \underset{F < G}{\mathrm{inf}} \; \underset{a, b : \, T \to [-1,1] \, \mbox{ \tiny{measurable}}}{\mathrm{sup}} \, \left\| \left\{ \int_T a(t) F_{t} dt \, , \, \int_T b(t) F_{t} dt \right\} \right\| \; . \]
\end{Definition}
Note that we recover Polterovich's definition by replacing $T$ by a finite set of points. The following result shows that this pb-invariant is finite.
{\beta}gin{lemma}
Given a continuous cover $G$ of type $(T,U)$, there exists a partition of unity $F$ subordinated to $G$ whose pb-invariant ${\partial}b \, F$ is finite\footnote{The pb-invariant ${\partial}b \, F$ of a partition of unity $F$ is defined as above without the infimum over $F < G$. That is, ${\partial}b \, G = \mathrm{inf}_{F < G} \, {\partial}b \, F$.}.
\end{lemma}
{\partial}roof
Consider the partition of unity $\tilde{F} : T \times U \to \mathbb{R}$ constructed in Theorem 4 above. Given a measurable function $a : T \to [-1,1]$, for any $x \in M$ we compute
\[ \int_T a(\tau) F_{\tau}(x) d\tau = {\frak {su}}m_{t \in T'} {\frak {su}}m_{V \in C_t} \bar a_t {\partial}hi_V(x) \, , \]
where $\bar{a}_t := \int_T a(\tau) \rho_t(\tau)d\tau$ is a number whose value is in $[-1,1]$. It follows that
\[ \left| \left\{ \int_T a(\tau) F_{\tau}(x) d\tau \, , \, \int_T b(\tau) F_{\tau}(x) d\tau \right\}(x) \right| \le {\frak {su}}m_{V, W \in \cup_{t \in T'} C_t} |\{ {\partial}hi_V, {\partial}hi_W \}(x)| < \infty \]
for any measurable functions $a, b : T \to [-1,1]$, which proves the claim. When the partition is smooth with respect to $t$ also, then the value is always finite. Indeed, since $F \in C^r(T \times M)$, it follows from Lebesgue's dominated convergence theorem that the function $a \cdot F := \int_T a(t) F_t dt \in C^r(M)$ is defined and also that $\{ a \cdot F, b \cdot F \} = \int_{T \times T} a(t) b(u) \{F_t, F_u \} dtdu < \infty$ when $a$ and $b$ are weights.
{\bf Q}ED
\noindent We will be now working only with smooth covers. Therefore all partitions of unity $F$ satisfy ${\partial}b \, F < \infty.$
We recall a few facts taken from Polterovich's and Rosen's recent book \cite{PR}, since we will need them. Further informations are available in this book and in the references therein. The setting is the following (\cite{PR}, chapter 4):
{\beta}gin{itemize}
\item $(M^{2n}, {\omega}ega)$ a compact symplectic manifold;
\item $U {\frak {su}}bset M$ an open set;
\item $H(U)$ the image of $\widetilde{Ham}(U)$ in $H := \widetilde{Ham}(M)$ under the map induced by the inclusion $U {\frak {su}}bset M$;
\item ${\partial}hi$ : an element of $H(U)$;
\item $c$ : a (subadditive) spectral invariant on $H(U)$ (see the definition below);
\item $q({\partial}hi) := c({\partial}hi) + c({\partial}hi^{-1})$, which is (almost) a norm on $H$;
\item $w(U) := \mathrm{sup}_{{\partial}hi \in H(U)} \, q({\partial}hi)$ the spectral width of $U$ (which may be infinite).
\end{itemize}
{\beta}gin{Definition}[\cite{PR}, 4.3.1]
A function $c : H \to \mathbb{R}$ is called a \textit{subadditive spectral invariant} if it satisfies the following axioms:
{\beta}gin{description}
\item[ Conjugation invariance ] $c({\partial}hi {\partial}si {\partial}hi^{-1}) = c({\partial}si) \; \forall {\partial}hi, {\partial}si \in H$;
\item[ Subadditivity ] $c({\partial}hi {\partial}si) \le c({\partial}hi) + c({\partial}si)$;
\item[ Stability ]
\[ \int_{0}^1 \mathrm{min}(f_{t} - g_{t}) dt \le c({\partial}hi) - c({\partial}si) \le \int_{0}^1 \mathrm{max}(f_{t} - g_{t}) dt \, , \]
\noindent provided ${\partial}hi, {\partial}si \in H$ are generated by normalized Hamiltonians $f$ and $g$, respectively;
\item[ Spectrality ] $c({\partial}hi) \in \mathrm{spec}({\partial}hi)$ for all nondegenerate elements ${\partial}hi \in H$.
\end{description}
\end{Definition}
{\beta}gin{Remark} The first three properties of a spectral invariant are in practice the most important ones. However, from the spectrality axiom, one can show for instance that $w(U) < \infty$ whenever $U$ is displaceable; as such, the spectrality axiom is relevant in order to tie the spectral invariant with the symplectic topology of $M$. Let's mention that a spectral invariant exists on any closed symplectic manifold, as can be shown in the context of Hamiltonian Floer theory. \end{Remark}
Given a Hamiltonian function $f \in C^{\infty}(M)$ generating the (autonomous) Hamiltonian diffeomorphism ${\partial}hi_{f} = {\partial}hi^1_{f}$ and a spectral invariant $c$, we can define the number
\[ \zeta(f) := {\sigma}gma({\partial}hi_{f}) + {\lambda}ngle f \rangle \in \mathbb{R} \]
\noindent where ${\sigma}gma({\partial}hi_{f}) := \lim_{n \to \infty} \frac{1}{n}c({\partial}hi^n_{f})$ (with ${\sigma}gma$ the {\it homogeneization} of $c$) and ${\lambda}ngle f \rangle := V^{-1} \, \int_{M} f {\omega}ega^n$ is the mean-value of $f$, where $V = \int_M {\omega}ega^n$ is the volume of the symplectic manifold $M$. The function $\zeta : C^{\infty}(M) \to \mathbb{R}$ is called the (\textit{partial symplectic}) {\it quasi-state} associated to $c$. It has some very important properties, among which:
{\beta}gin{description}
\item[ Normalization ] $\zeta(a) = a$ for any constant $a$;
\item[ Stability ] $\mathrm{min}_{M} (f-g) \le \zeta(f) - \zeta(g) \le \mathrm{max}_{M} (f-g)$;
\item[ Monotonicity ] If $f {\mathcal g}e g$ on $M$, then $\zeta(f) {\mathcal g}e \zeta(g)$;
\item[ Homogeneity ] If $s \in [0, \infty)$, then $\zeta(sf) = s \zeta(f)$;
\item[ Vanishing ] If the support of $f$ is displaceable, then $\zeta(f) = 0$ (this is a consequence of the spectrality axiom for $c$);
\item[ Quasi-subadditivity ] If $\{f, g \} = 0$, then $\zeta(f+g) \le \zeta(f) + \zeta(g)$.
\end{description}
For $f, g \in C^{\infty}(M)$, define $S(f, g) = \mathrm{min} \{ w(\mathrm{supp} \, f) \; , \; w(\mathrm{supp} \, g) \} \in [0, \infty]$. It follows from Remark 8 that this number is finite whenever either $f$ or $g$ has displaceable support.
{\beta}gin{Theorem}[\cite{EPZ}, 1.4 ; \cite{PR}, 4.6.1 ; the Poisson bracket inequality]
For every pair of functions $f, g \in C^{\infty}(M)$ such that $S(f,g) < \infty$,
\[ \Pi(f,g) := \left| \zeta(f+g) - \zeta(f) - \zeta(g) \right| \le \sqrt{2 S(f,g) \, \| \{ f, g \} \| } \; . \]
\end{Theorem}
\noindent We see that $\Pi(f,g)$ measures the default of additivity of $\zeta$. In fact, this theorem implies:
{\beta}gin{description}
\item[ Partial quasi-linearity ] If $S(f,g) < \infty$ and if $\{f, g \} = 0$, then
\[ \zeta(f+g) = \zeta(f) + \zeta(g) \; \mbox{ and } \zeta(s f) = s \zeta(f) \; \forall s \in \mathbb{R} \, . \]
\end{description}
It is known that some symplectic manifolds admit a spectral invariant $c$ for which $S$ takes values in $[0, \infty)$, in which case $\zeta$ is a genuine symplectic quasi-state : it is a normalized, monotone and quasi-linear functional on the Poisson algebra $(C^{\infty}(M), \{-, - \})$.
{\beta}gin{Theorem}[\cite{P1}, 3.1 ; \cite{PR}, 9.2.2]
Let $(M, {\omega}ega)$ be a symplectic manifold and consider a finite cover $U = \{ U_{1}, \dots, U_{N} \}$ of $M$ by displaceable open sets. Write $w(U) := \mathrm{max}_{i} \, w(U_{i}) < \infty$. Then
\[ {\partial}b(U) \, w(U) \; {\mathcal g}e \; \frac{1}{2N^2} \; . \]
\end{Theorem}
\textit{Proof} :
Let $F$ be a partition of unity subordinated to $U$. Set
\[ G_{1} = F_{1}, \, G_{2} = F_{1} + F_{2}, \, \dots , \, G_{N} = F_{1} + \dots + F_{N} \, . \]
| 3,958 | 26,551 |
en
|
train
|
0.162.3
|
\item[ Stability ]
\[ \int_{0}^1 \mathrm{min}(f_{t} - g_{t}) dt \le c({\partial}hi) - c({\partial}si) \le \int_{0}^1 \mathrm{max}(f_{t} - g_{t}) dt \, , \]
\noindent provided ${\partial}hi, {\partial}si \in H$ are generated by normalized Hamiltonians $f$ and $g$, respectively;
\item[ Spectrality ] $c({\partial}hi) \in \mathrm{spec}({\partial}hi)$ for all nondegenerate elements ${\partial}hi \in H$.
\end{description}
\end{Definition}
{\beta}gin{Remark} The first three properties of a spectral invariant are in practice the most important ones. However, from the spectrality axiom, one can show for instance that $w(U) < \infty$ whenever $U$ is displaceable; as such, the spectrality axiom is relevant in order to tie the spectral invariant with the symplectic topology of $M$. Let's mention that a spectral invariant exists on any closed symplectic manifold, as can be shown in the context of Hamiltonian Floer theory. \end{Remark}
Given a Hamiltonian function $f \in C^{\infty}(M)$ generating the (autonomous) Hamiltonian diffeomorphism ${\partial}hi_{f} = {\partial}hi^1_{f}$ and a spectral invariant $c$, we can define the number
\[ \zeta(f) := {\sigma}gma({\partial}hi_{f}) + {\lambda}ngle f \rangle \in \mathbb{R} \]
\noindent where ${\sigma}gma({\partial}hi_{f}) := \lim_{n \to \infty} \frac{1}{n}c({\partial}hi^n_{f})$ (with ${\sigma}gma$ the {\it homogeneization} of $c$) and ${\lambda}ngle f \rangle := V^{-1} \, \int_{M} f {\omega}ega^n$ is the mean-value of $f$, where $V = \int_M {\omega}ega^n$ is the volume of the symplectic manifold $M$. The function $\zeta : C^{\infty}(M) \to \mathbb{R}$ is called the (\textit{partial symplectic}) {\it quasi-state} associated to $c$. It has some very important properties, among which:
{\beta}gin{description}
\item[ Normalization ] $\zeta(a) = a$ for any constant $a$;
\item[ Stability ] $\mathrm{min}_{M} (f-g) \le \zeta(f) - \zeta(g) \le \mathrm{max}_{M} (f-g)$;
\item[ Monotonicity ] If $f {\mathcal g}e g$ on $M$, then $\zeta(f) {\mathcal g}e \zeta(g)$;
\item[ Homogeneity ] If $s \in [0, \infty)$, then $\zeta(sf) = s \zeta(f)$;
\item[ Vanishing ] If the support of $f$ is displaceable, then $\zeta(f) = 0$ (this is a consequence of the spectrality axiom for $c$);
\item[ Quasi-subadditivity ] If $\{f, g \} = 0$, then $\zeta(f+g) \le \zeta(f) + \zeta(g)$.
\end{description}
For $f, g \in C^{\infty}(M)$, define $S(f, g) = \mathrm{min} \{ w(\mathrm{supp} \, f) \; , \; w(\mathrm{supp} \, g) \} \in [0, \infty]$. It follows from Remark 8 that this number is finite whenever either $f$ or $g$ has displaceable support.
{\beta}gin{Theorem}[\cite{EPZ}, 1.4 ; \cite{PR}, 4.6.1 ; the Poisson bracket inequality]
For every pair of functions $f, g \in C^{\infty}(M)$ such that $S(f,g) < \infty$,
\[ \Pi(f,g) := \left| \zeta(f+g) - \zeta(f) - \zeta(g) \right| \le \sqrt{2 S(f,g) \, \| \{ f, g \} \| } \; . \]
\end{Theorem}
\noindent We see that $\Pi(f,g)$ measures the default of additivity of $\zeta$. In fact, this theorem implies:
{\beta}gin{description}
\item[ Partial quasi-linearity ] If $S(f,g) < \infty$ and if $\{f, g \} = 0$, then
\[ \zeta(f+g) = \zeta(f) + \zeta(g) \; \mbox{ and } \zeta(s f) = s \zeta(f) \; \forall s \in \mathbb{R} \, . \]
\end{description}
It is known that some symplectic manifolds admit a spectral invariant $c$ for which $S$ takes values in $[0, \infty)$, in which case $\zeta$ is a genuine symplectic quasi-state : it is a normalized, monotone and quasi-linear functional on the Poisson algebra $(C^{\infty}(M), \{-, - \})$.
{\beta}gin{Theorem}[\cite{P1}, 3.1 ; \cite{PR}, 9.2.2]
Let $(M, {\omega}ega)$ be a symplectic manifold and consider a finite cover $U = \{ U_{1}, \dots, U_{N} \}$ of $M$ by displaceable open sets. Write $w(U) := \mathrm{max}_{i} \, w(U_{i}) < \infty$. Then
\[ {\partial}b(U) \, w(U) \; {\mathcal g}e \; \frac{1}{2N^2} \; . \]
\end{Theorem}
\textit{Proof} :
Let $F$ be a partition of unity subordinated to $U$. Set
\[ G_{1} = F_{1}, \, G_{2} = F_{1} + F_{2}, \, \dots , \, G_{N} = F_{1} + \dots + F_{N} \, . \]
\noindent Using Theorem 1 and the vanishing property of $\zeta$, one obtains the following estimate:
{\beta}gin{align}
\notag \left| \zeta(G_{k+1}) - \zeta(G_{k}) \right| &= \left| \zeta(G_{k} + F_{k+1} \, ) - \zeta(G_{k}) - \zeta(F_{k+1}) \right| \\
\notag & \le \sqrt{2 \, \mathrm{min} ( w(\mathrm{supp} \, G_{k}) \, , \, w(\mathrm{supp} \, F_{k+1}) )} \, \sqrt{\{ G_{k} , F_{k+1} \}} \, . \end{align}
\noindent Using the definitions of ${\partial}b (F)$ and of $w(U)$, one gets:
{\beta}gin{align}
\notag \left| \zeta(G_{k+1}) - \zeta(G_{k}) \right| & \le \sqrt{2 \, w(U)} \, \sqrt{{\partial}b (F) } \, .
\end{align}
\noindent This inequality holds for all $k$. Using the normalization and vanishing properties of $\zeta$ and applying the triangle inequality to a telescopic sum, one gets:
{\beta}gin{align}
\notag 1 & = \left| \zeta(1) - 0 \right| = \left| \zeta(G_{N}) - \zeta(G_{1}) \right| \le {\frak {su}}m_{k=1}^{N-1} \left| \zeta(G_{k+1}) - \zeta(G_{k}) \right| \\
\notag & \le {\frak {su}}m_{k=1}^{N-1} \sqrt{2 \, w(U) \, {\partial}b (F) } \le N \sqrt{2 \, w(U) \, {\partial}b (F) } \, .
\end{align}
\noindent Since this is true for any $F < U$, the result easily follows.
{\bf Q}ED
A similar results holds in the context of smooth covers. We say that a smooth cover $G : T \times U \to (M, {\omega}ega)$ is made of displaceable sets if each set $G_t(\bar{U}) = \overline{G_t(U)} {\frak {su}}bset (M, {\omega}ega)$ is displaceable (recall that we assume that $G_t$ extends as a smooth embedding to the closure $T \times \bar{U}$). In other words, not only is each $G_t(U)$ displaceable, but so is a small neighborhood of it too.
{\beta}gin{Theorem} For any smooth cover $G$ of type $(T,U)$ made of displaceable sets, there exists a constant $c = c(G) > 0$ such that
\[ {\partial}b(G) {\mathcal g}e \; c(G) \; . \]
\end{Theorem}
\textit{Proof} :
The proof morally consists in a coarse-graining of the smooth cover to a finite cover. Let $W_{1}, \dots, W_{N}$ be any exhaustion of the compact manifold $T$ by nested open sets with the following property: the sets $V_{1} = W_{1}$, $V_{2} = W_{2}-W_{1}$, ..., $V_{N} = W_{N} - W_{N-1}$ are such that for every $j$ the open set $U_j := \cup_{t \in V_{j}} \, \mathrm{Im}(G_{t})$ in $M$ is displaceable. Assume for the moment being that such sets $W_i$ exist. Notice that the sets $U_j$ cover $M$ and let $w(G) := \mathrm{sup}_j w(U_j) < \infty$. Now let $F$ be a partition of unity subordinated to $G$ and consider the functions $\int_{V_{1}} F_{t} dt$, ...\,, $\int_{V_{N}} F_{t} dt$ which form a partition of unity on $M$ subordinated to the $U_j$'s. As in the previous theorem, one estimates:
{\beta}gin{align}
\notag 1 &= \left| \zeta(1) - 0 \right| = \left| \zeta \left(\int_{W_{N}} F_{t}dt \right) - \zeta \left( \int_{W_{1}} F_{t}dt \right) \right| \\
\notag &\le {\frak {su}}m_{k=1}^{N-1} \left| \zeta \left(\int_{W_{k+1}} F_{t}dt \right) - \zeta \left( \int_{W_{k}} F_{t}dt \right) - \underset{0}{\underbrace{\zeta \left( \int_{V_{k+1}} F_{t}dt \right)}}\right| \\
\notag & \le {\frak {su}}m_{k=1}^{N-1} \sqrt{2 \, w(G) \, {\partial}b(F)} \le N \sqrt{2 \, w(G) \, {\partial}b(F)} \, .
\end{align}
\noindent Since this is true for all $F < G$, and since $2N^2$ depends only on $G$ (through the choice of the $W_j$'s), the result follows with $c(G) := (2N^2 w(G))^{-1}$.
The sets $W_{j}$'s exist for the following reason. The closure of each $G_{t}(U)$ is a compact displaceable set, so that some open neighborhood $O_t$ of this set is displaceable. By the continuity of the cover $G$, for any $t$ there exists an open set $\{t \} \in Y_t {\frak {su}}bset T$ such that $G(Y_t \times U) {\frak {su}}bset O_t$. Since $T$ is compact, only a finite number of these $Y_t$ suffices to cover $T$, say $Y_1, \dots, Y_N$. Set $W_j = \cup_{k=1}^{j} Y_j$. Since $V_j {\frak {su}}bset Y_j$, the sets $G(V_j \times U)$ are indeed displaceable. This concludes the proof.
{\bf Q}ED
It is natural to compare the ${\partial}b$ invariant of different smooth covers of type $(T,U)$, especially if they are related to each other by a smooth family of smooth covers of the same type. This might help in understanding what is the 'optimal' way to cover a symplectic manifold $(M, {\omega}ega)$ by copies of a set $U$. We are led to the following definition which lies at the heart of this article:
{\beta}gin{Definition} A \textit{constraint} on smooth covers of $M$ of type $(T,U)$ is a set $C$ of such covers; the set of all smooth covers of type $(T,U)$ corresponds to the unconstrained case. Considering the $C^r$-Whitney topology on the space of smooth covers $G : T \times U \to M$, a \textit{constrained class of smooth covers of $M$ of type $(T,U)$} is defined as a connected component of the given constraint. We define the ${\partial}b$ invariant of a (constrained) class $A$ as the infimum of ${\partial}b(G)$ when $G$ runs over all smooth covers in $A$. \end{Definition}
As an instance of a constraint, we shall consider later on the one given by asking for each embedding $G_t : (U^{2n}, {\omega}ega_0) \hookrightarrowrightarrow (M^{2n}, {\omega}ega)$ to be symplectic. The obvious difficulty with this last notion of pb invariant is that it intertwines four extrema: the supremum in the definition of the $C^0$-norm, the supremum over coefficients, the infimum over partitions of unity and the infimum over the smooth cover in the class. As a consequence of this difficulty, it is not clear if this number is strictly positive for every $M$, a problem which is related to Polterovich's conjecture; however, this number is now known to be positive for closed surfaces, as Polterovich's conjecture was recently proved valid in this context by Buhovsky, Tanny and Logunov \cite{BLT} and by the second author for genera $g {\mathcal g}e 1$ \cite{Pa}.
\section{Equivalence of the smooth and discrete settings}
This section is mainly devoted to the proof of Theorem 13 below which can be summarized as follows: the pb invariant of any class of $T$-covers is equal to the pb invariant of an affiliated class of discrete covers.
Fix a pair $(T,U)$. Any constraint $C$ of type $(T,U)$ determines the subset of \textit{constrained embeddings}
\[ C^* := \{ G_t : U \hookrightarrowrightarrow M \, | \, t \in T, \, G \in C \} {\frak {su}}bset \mathrm{Emb}(U, M) \, . \]
\noindent Any section of the natural map $T \to {\partial}i_0(T)$ -- which associates to $t \in T$ the connected component to which it belongs -- induces a well-defined, \textit{i.e.} section-independent, map $p_T : {\partial}i_0(C) \to [{\partial}i_0(T), C^*] {\sigma}meq {\partial}i_0(C^*)^{{\partial}i_0(T)}$. An element $A \in {\partial}i_0(C)$ is just a constrained class of covers, and the element $A^* = p_T(A) \in \mathrm{Im}(p_T)$ corresponds to the $|{\partial}i_0(T)|$ (not necessarily distinct) connected components of $C^*$ from which open sets the smooth covers in $A$ are built. Denote by $B$ the subset of ${\partial}i_0(C^*)$ which is the image of $A^*$.
Thus $B$ comprises sufficiently many open sets to cover the whole of $M$.
| 3,977 | 26,551 |
en
|
train
|
0.162.4
|
{\beta}gin{align}
\notag 1 &= \left| \zeta(1) - 0 \right| = \left| \zeta \left(\int_{W_{N}} F_{t}dt \right) - \zeta \left( \int_{W_{1}} F_{t}dt \right) \right| \\
\notag &\le {\frak {su}}m_{k=1}^{N-1} \left| \zeta \left(\int_{W_{k+1}} F_{t}dt \right) - \zeta \left( \int_{W_{k}} F_{t}dt \right) - \underset{0}{\underbrace{\zeta \left( \int_{V_{k+1}} F_{t}dt \right)}}\right| \\
\notag & \le {\frak {su}}m_{k=1}^{N-1} \sqrt{2 \, w(G) \, {\partial}b(F)} \le N \sqrt{2 \, w(G) \, {\partial}b(F)} \, .
\end{align}
\noindent Since this is true for all $F < G$, and since $2N^2$ depends only on $G$ (through the choice of the $W_j$'s), the result follows with $c(G) := (2N^2 w(G))^{-1}$.
The sets $W_{j}$'s exist for the following reason. The closure of each $G_{t}(U)$ is a compact displaceable set, so that some open neighborhood $O_t$ of this set is displaceable. By the continuity of the cover $G$, for any $t$ there exists an open set $\{t \} \in Y_t {\frak {su}}bset T$ such that $G(Y_t \times U) {\frak {su}}bset O_t$. Since $T$ is compact, only a finite number of these $Y_t$ suffices to cover $T$, say $Y_1, \dots, Y_N$. Set $W_j = \cup_{k=1}^{j} Y_j$. Since $V_j {\frak {su}}bset Y_j$, the sets $G(V_j \times U)$ are indeed displaceable. This concludes the proof.
{\bf Q}ED
It is natural to compare the ${\partial}b$ invariant of different smooth covers of type $(T,U)$, especially if they are related to each other by a smooth family of smooth covers of the same type. This might help in understanding what is the 'optimal' way to cover a symplectic manifold $(M, {\omega}ega)$ by copies of a set $U$. We are led to the following definition which lies at the heart of this article:
{\beta}gin{Definition} A \textit{constraint} on smooth covers of $M$ of type $(T,U)$ is a set $C$ of such covers; the set of all smooth covers of type $(T,U)$ corresponds to the unconstrained case. Considering the $C^r$-Whitney topology on the space of smooth covers $G : T \times U \to M$, a \textit{constrained class of smooth covers of $M$ of type $(T,U)$} is defined as a connected component of the given constraint. We define the ${\partial}b$ invariant of a (constrained) class $A$ as the infimum of ${\partial}b(G)$ when $G$ runs over all smooth covers in $A$. \end{Definition}
As an instance of a constraint, we shall consider later on the one given by asking for each embedding $G_t : (U^{2n}, {\omega}ega_0) \hookrightarrowrightarrow (M^{2n}, {\omega}ega)$ to be symplectic. The obvious difficulty with this last notion of pb invariant is that it intertwines four extrema: the supremum in the definition of the $C^0$-norm, the supremum over coefficients, the infimum over partitions of unity and the infimum over the smooth cover in the class. As a consequence of this difficulty, it is not clear if this number is strictly positive for every $M$, a problem which is related to Polterovich's conjecture; however, this number is now known to be positive for closed surfaces, as Polterovich's conjecture was recently proved valid in this context by Buhovsky, Tanny and Logunov \cite{BLT} and by the second author for genera $g {\mathcal g}e 1$ \cite{Pa}.
\section{Equivalence of the smooth and discrete settings}
This section is mainly devoted to the proof of Theorem 13 below which can be summarized as follows: the pb invariant of any class of $T$-covers is equal to the pb invariant of an affiliated class of discrete covers.
Fix a pair $(T,U)$. Any constraint $C$ of type $(T,U)$ determines the subset of \textit{constrained embeddings}
\[ C^* := \{ G_t : U \hookrightarrowrightarrow M \, | \, t \in T, \, G \in C \} {\frak {su}}bset \mathrm{Emb}(U, M) \, . \]
\noindent Any section of the natural map $T \to {\partial}i_0(T)$ -- which associates to $t \in T$ the connected component to which it belongs -- induces a well-defined, \textit{i.e.} section-independent, map $p_T : {\partial}i_0(C) \to [{\partial}i_0(T), C^*] {\sigma}meq {\partial}i_0(C^*)^{{\partial}i_0(T)}$. An element $A \in {\partial}i_0(C)$ is just a constrained class of covers, and the element $A^* = p_T(A) \in \mathrm{Im}(p_T)$ corresponds to the $|{\partial}i_0(T)|$ (not necessarily distinct) connected components of $C^*$ from which open sets the smooth covers in $A$ are built. Denote by $B$ the subset of ${\partial}i_0(C^*)$ which is the image of $A^*$.
Thus $B$ comprises sufficiently many open sets to cover the whole of $M$.
Let ${\lambda}ngle 1, n \rangle = [1,n] \cap \mathbb{N}$. Considering the natural map $q: C^* \to {\partial}i_0(C^*)$, for $B {\frak {su}}bset {\partial}i_0(C^*)$ let $B' = q^{-1}(B) {\frak {su}}bset C^*$. Assuming that $B'$ comprises enough open sets to cover $M$, define
\[ {\partial}b_{\mathrm{discrete}}(B) := \inf \, \{ \, {\partial}b(G) \, | \, \exists n \in \mathbb{N}, \, G : {\lambda}ngle 1, n \rangle \to B' \mbox{ a cover of $M$ } \} \, . \]
\noindent To simplify the notations, we will, in the sequel, denote the set $B$ by the same symbol $A^*$.
{\beta}gin{Theorem} [Equivalence smooth-discrete]
Let $M$ be a symplectic manifold of dimension $2n$, $U$ an open subset of ${\bf R}^{2n}$ as mentioned above, and $T$ a compact manifold of strictly positive dimension endowed with a Lebesgue measure $\mu$ of total mass $1$. Consider a constraint $C$ on smooth covers of $M$ of type $(T,U)$, let $A \in {\partial}i_0(C)$ be a constrained class of such covers and write $A^* = p_T(A) {\frak {su}}bset {\partial}i_0(C^*)$. Then
\[ {\partial}b(A) = {\partial}b_{\mathrm{discrete}}(A^*) \; . \]
\end{Theorem}
{\partial}roof
We first prove ${\partial}b(A) {\mathcal g}e {\partial}b_{\mathrm{discrete}}(A^*)$. Let $G$ be a smooth cover of type $(T,U)$ in the constrained class $A$ and consider a smooth partition of unity $F < U$. By property (2) in the definition of a partition of unity and by continuity of $G$, we deduce that for each $t \in T$ there is an open set $t \in B_t {\frak {su}}bset T$ such that $\mathrm{supp}(G_t^*(F_s)) {\frak {su}}bset U$ for all $s \in B_t$. Since $T$ is compact, there is a finite set $T' = \{t_1, \dots, t_n\} {\frak {su}}bset T$ such that the collection $B = \{B_{t_1}, \dots, B_{t_n}\}$ covers $T$. Consider a partition of unity $\rho = \{\rho_1, \dots, \rho_n\}$ on $T$ subordinated to $B$ and for each $t_i$ define
\[ F'_i : M \to [0, \infty) : x \mapsto F_i(x) = \int_T \rho_i(t)F(x,t) dt \; . \]
We observe that the collection $F' = \{F'_1, \dots, F'_n\}$ is a partition of unity on $M$ by smooth functions which is subordinated to the finite cover $G' := \left. G \right|_{T'}$ of $M$. We note that $\mathrm{Im}(G') {\frak {su}}bset (A^*)'$, where we use a notation introduced just before the statement of the theorem. For $a' = \{a'_1, \dots, a'_n\} {\frak {su}}bset [-1,1]$, the quantity $a := {\frak {su}}m_{i=1}^n a'_i \rho_i : T \to [-1,1]$ is a $T$-weight. For $a',b' \in [-1,1]^n$ we easily compute
{\beta}gin{align}
\notag \left\{ \int_T a(t) F_t dt \, , \, \int_T b(u) F_u du \right\} &= \left\{ {\frak {su}}m_{i=1}^n a'_i F'_i \, , \, {\frak {su}}m_{j=1}^n b'_j F'_j \right\} \; .
\end{align}
Taking the suprema over weights thus yields ${\partial}b(F) {\mathcal g}e {\partial}b(F')$, while taking the infima over partitions of unities yields ${\partial}b(G) {\mathcal g}e {\partial}b(G')$. Taking the infima over covers in classes $A$ and $A^*$ finally yields ${\partial}b(A) {\mathcal g}e {\partial}b_{\mathrm{discrete}}(A^*)$.
We now prove ${\partial}b(A) \le {\partial}b_{\mathrm{discrete}}(A^*)$. Let $G' : {\lambda}ngle 1, n \rangle \to (A^*)'$ be a finite cover of $M$ and let $F' = \{F'_1, \dots, F'_n\}$ be a partition of unity subordinated to $G'$. Since $A^* = p_T(A)$, there exists a smooth cover $G''$ of $M$ of type $(T,U)$ in the constrained class $A \in {\partial}i_0(C)$. Interpreting $A^* = \{A^*_1, \dots, A^*_m\}$ as a collection of connected components of $C^*$, for each connected component $A^*_i$ we can associate a point $t''_i \in T$ such that $G''_{t''_i} \in A^*_i$. From this association we can get an injective map ${\lambda}ngle 1, n \rangle \to T$ which associates to the integer $j$ a point $t'_j$ in the same connected component as the point $t''_i$, with $A^*_i \ni G'_j$. Call the image of this map $T' {\frak {su}}bset T$.
From these data we shall construct a smooth cover $G$ of type $(T,U)$ in the class $A$ which could act as a substitute for $G'$, in the sense that $\left. G \right|_{T'} = G'$. In fact, we shall define a smooth family $G_s$ of covers of type $(T,U)$ with $s \in [0,1]$ so that $G_0 = G''$ and $G_1 = G$, thereby illustrating that $G$ is indeed in the constrained class $A$. Fix a Riemannian metric on $T$. Observe that smoothly deforming $G''$ within $A$ if necessary, we can assume that $G''$ is constant in an $\epsilon$-neighbordhood of $T'$. If some connected component of $T$ contains none of the points $t'_j$, just set $G_s = G''$ on that component. For any other connected component of $T$, say the one containing $t''_i$, pick a Riemannian metric on it and consider disjoint embedded closed geodesic $\epsilon$-balls centred at the points $t'_j$. Outside the reunion of these balls, set again $G_s = G''$, whereas on the ball containing $t'_j$ define $G_s$ as follows. First choose a smooth path $g_j : [0, \epsilon] \to A^*_i$ such that $g_j(0) = G'_j$and $g_j(\epsilon) = G''(t'_j)$. Also pick a smooth function $\chi : [0, \epsilon] \to [0,1]$ such that $\chi(u) = 1$ if $u < \epsilon/3$ and $\chi(u) = 0$ is $u > 2\epsilon/3$. Denoting $r(p)$ the radial distance in the $j$-th ball of a point $p$ from $t'_j$, set on that ball $G_s(p) = g_j([1 - (1- \chi(s \epsilon)) \chi(r)]\epsilon)$. This completely defines the family $G_s$ in the way we desired.
We observe that $G$ is constant on an $(\epsilon/3)$-neighbourhood of each $t'_j$. For each $j$, pick a smooth positive function $\rho_j$ with support in the $(\epsilon/3)$-ball about $t'_j$ and which integrates to $1$. We define the smooth function $F : T \times M \to [0, \infty)$ as $F(t,m) = {\frak {su}}m_{j=1}^n \rho_j(t) F'_j(x)$. We easily verify that this is a smooth partition of unity subordinated to $G$.
For any $T$-weight $a : T \to [-1,1]$, define $a' = (a'_1, \dots, a'_n) \in [-1,1]^n$ via $a'_j = \int_T a(t) \rho_j(t) dt$. For $T$-weights $a$ and $b$ we then easily compute
{\beta}gin{align}
\notag \left\{ {\frak {su}}m_{i=1}^n a'_i F'_i \, , \, {\frak {su}}m_{j=1}^n b'_j F'_j \right\} &= \left\{ \int_T a(t) F_t dt \, , \, \int_T b(u) F_u du \right\} \; .
\end{align}
Taking the suprema over weights thus yields ${\partial}b(F') {\mathcal g}e {\partial}b(F)$, while taking the infima over partitions of unities yields ${\partial}b(G') {\mathcal g}e {\partial}b(G)$. Taking the infima over covers in classes $A^*$ and $A$ finally yields ${\partial}b_{\mathrm{discrete}}(A^*) {\mathcal g}e {\partial}b(A)$.
{\bf Q}ED
| 3,844 | 26,551 |
en
|
train
|
0.162.5
|
We now prove ${\partial}b(A) \le {\partial}b_{\mathrm{discrete}}(A^*)$. Let $G' : {\lambda}ngle 1, n \rangle \to (A^*)'$ be a finite cover of $M$ and let $F' = \{F'_1, \dots, F'_n\}$ be a partition of unity subordinated to $G'$. Since $A^* = p_T(A)$, there exists a smooth cover $G''$ of $M$ of type $(T,U)$ in the constrained class $A \in {\partial}i_0(C)$. Interpreting $A^* = \{A^*_1, \dots, A^*_m\}$ as a collection of connected components of $C^*$, for each connected component $A^*_i$ we can associate a point $t''_i \in T$ such that $G''_{t''_i} \in A^*_i$. From this association we can get an injective map ${\lambda}ngle 1, n \rangle \to T$ which associates to the integer $j$ a point $t'_j$ in the same connected component as the point $t''_i$, with $A^*_i \ni G'_j$. Call the image of this map $T' {\frak {su}}bset T$.
From these data we shall construct a smooth cover $G$ of type $(T,U)$ in the class $A$ which could act as a substitute for $G'$, in the sense that $\left. G \right|_{T'} = G'$. In fact, we shall define a smooth family $G_s$ of covers of type $(T,U)$ with $s \in [0,1]$ so that $G_0 = G''$ and $G_1 = G$, thereby illustrating that $G$ is indeed in the constrained class $A$. Fix a Riemannian metric on $T$. Observe that smoothly deforming $G''$ within $A$ if necessary, we can assume that $G''$ is constant in an $\epsilon$-neighbordhood of $T'$. If some connected component of $T$ contains none of the points $t'_j$, just set $G_s = G''$ on that component. For any other connected component of $T$, say the one containing $t''_i$, pick a Riemannian metric on it and consider disjoint embedded closed geodesic $\epsilon$-balls centred at the points $t'_j$. Outside the reunion of these balls, set again $G_s = G''$, whereas on the ball containing $t'_j$ define $G_s$ as follows. First choose a smooth path $g_j : [0, \epsilon] \to A^*_i$ such that $g_j(0) = G'_j$and $g_j(\epsilon) = G''(t'_j)$. Also pick a smooth function $\chi : [0, \epsilon] \to [0,1]$ such that $\chi(u) = 1$ if $u < \epsilon/3$ and $\chi(u) = 0$ is $u > 2\epsilon/3$. Denoting $r(p)$ the radial distance in the $j$-th ball of a point $p$ from $t'_j$, set on that ball $G_s(p) = g_j([1 - (1- \chi(s \epsilon)) \chi(r)]\epsilon)$. This completely defines the family $G_s$ in the way we desired.
We observe that $G$ is constant on an $(\epsilon/3)$-neighbourhood of each $t'_j$. For each $j$, pick a smooth positive function $\rho_j$ with support in the $(\epsilon/3)$-ball about $t'_j$ and which integrates to $1$. We define the smooth function $F : T \times M \to [0, \infty)$ as $F(t,m) = {\frak {su}}m_{j=1}^n \rho_j(t) F'_j(x)$. We easily verify that this is a smooth partition of unity subordinated to $G$.
For any $T$-weight $a : T \to [-1,1]$, define $a' = (a'_1, \dots, a'_n) \in [-1,1]^n$ via $a'_j = \int_T a(t) \rho_j(t) dt$. For $T$-weights $a$ and $b$ we then easily compute
{\beta}gin{align}
\notag \left\{ {\frak {su}}m_{i=1}^n a'_i F'_i \, , \, {\frak {su}}m_{j=1}^n b'_j F'_j \right\} &= \left\{ \int_T a(t) F_t dt \, , \, \int_T b(u) F_u du \right\} \; .
\end{align}
Taking the suprema over weights thus yields ${\partial}b(F') {\mathcal g}e {\partial}b(F)$, while taking the infima over partitions of unities yields ${\partial}b(G') {\mathcal g}e {\partial}b(G)$. Taking the infima over covers in classes $A^*$ and $A$ finally yields ${\partial}b_{\mathrm{discrete}}(A^*) {\mathcal g}e {\partial}b(A)$.
{\bf Q}ED
\section{Independence on the probability space}
The equivalence of the smooth and of the discrete settings suggests that the pb invariants might be independent from the underlying probability space $T$ parametrising the smooth covers. The purpose of this section is make this idea precise.
{\beta}gin{proposition}
Let $M$ be a symplectic manifold of dimension $2n$, $U$ an open subset of ${\bf R}^{2n}$ as mentioned above, and $T_1$ and $T_2$ be compact manifold of strictly positive dimension each endowed with a smooth volume form of total mass $1$. Consider constraints $C_1$ and $C_2$ on smooth covers of $M$ of type $(T_1,U)$ and $(T_2, u)$, respectively. Let $A_i \in {\partial}i_0(C_i)$, $i=1,2$, be constrained classes and assume that the corresponding sets of embeddings $(A_i^*)' {\frak {su}}bset C^*_i {\frak {su}}bset \mathrm{Emb}(U, M)$ coincide in the latter space. Then
\[ {\partial}b(A_1) = {\partial}b(A_2) \; . \]
\end{proposition}
{\partial}roof
It follows from Theorem 13 that ${\partial}b(A_i) = {\partial}b_{\mathrm{discrete}}(A_i^*)$, $i=1,2$. Looking at the definition, ${\partial}b_{\mathrm{discrete}}(A_i^*)$ only depends on the set $(A_i^*)'$, which is itself assumed to be independent from $i$.
{\bf Q}ED
Next we discuss special sorts of constraints which not only frequently appear in practice, but also for which the hypothesis in the previous proposition follows from a somewhat less stringent assumption.
{\beta}gin{Definition}
A constraint $C$ on covers of type $(T,U)$ is \textit{prime} if there exists a set $C' {\frak {su}}bset \mathrm{Emb}(U, M)$ such that $G \in C$ if and only if $G_t \in C'$ for every $t \in T$. In other words, $C$ is prime if it is the largest constraint such that $C^* {\frak {su}}bset C'$ (equivalently, $C^* = C'$).
\end{Definition}
\noindent We point out that $C'$ thus admits sufficiently many open sets to cover the whole of $M$. Conversely, given a set $C' {\frak {su}}bset \mathrm{Emb}(U, M)$ which admits sufficiently many open sets to cover $M$ and a probability space $T$, it is not guaranteed that there exists a constraint $C$ of covers of type $(T,U)$ (let alone a prime one) such that $C^* = C'$; this happens if $|{\partial}i_0(C')| > |{\partial}i_0(T)|$ and if no reunion of $|{\partial}i_0(T)|$ connected components of $C'$ has sufficiently many open sets to cover $M$. In comparison, as long as $|{\partial}i_0(C')|$ is finite, we can always find a discrete cover of $M$ made of open sets in $C'$. Note that this is however the only obstacle: given a set $C' {\frak {su}}bset \mathrm{Emb}(U, M)$ such that there exists a smooth cover $G$ of $M$ of type $(T,U)$ with $G_* : {\partial}i_0(T) \to {\partial}i_0(C')$ well-defined and surjective, then $C' = C^*$ for some (prime) constraint $C$ on covers of type $(T,U)$.
{\beta}gin{Definition}
A prime constraint $C$ on covers of type $(T,U)$ with $C^* = C' {\frak {su}}bset \mathrm{Emb}(U,M)$ is \textit{filled} if there is $G \in C$ such that the map $G_* : {\partial}i_0(T) \to {\partial}i_0(C')$ is surjective. By extension, we say that $C'$ is \textit{filled by $T$} if the associated prime constraint $C$ of type $(T,U)$ is filled.
\end{Definition}
{\beta}gin{corollary}
Let $M$ be a symplectic manifold of dimension $2n$, $U$ an open subset of ${\bf R}^{2n}$ as mentioned above, and $T_1$ and $T_2$ be compact manifold of strictly positive dimension each endowed with a smooth volume form of total mass $1$. Consider constraints $C_1$ and $C_2$ on smooth covers of $M$ of type $(T_1,U)$ and $(T_2, u)$, respectively. Let $C' {\frak {su}}bset \mathrm{Emb}(U,M)$ be filled by both $T_1$ and $T_2$ and consider the corresponding prime constraints $C_1$ and $C_2$. Let $A_i \in {\partial}i_0(C_i)$, $i=1,2$, be constrained classes and assume that the corresponding sets of embeddings $(A_i^*)' {\frak {su}}bset C^*_i {\frak {su}}bset \mathrm{Emb}(U, M)$ coincide in the latter space. Then
\[ {\partial}b(A_1) = {\partial}b(A_2) \; . \]
\end{corollary}
{\beta}gin{Remark} {\lambda}bel{independence}
For one application of this corollary, note that $C'$ is filled by any probability space $T$ of strictly positive dimension whenever $C'$ is connected and contains sufficiently many open sets to cover $M$. In that case $(A^*)'=C'$ for any $A \in {\partial}i_0(C)$ (where $C$ is the prime and filled constraint associated with $C'$), since in fact $|{\partial}i_0(C)|=1$. As a consequence, when $C'$ is not necessarily connected but each of its components contains sufficiently many embeddings to cover $M$, then the restriction of ${\partial}b$ to prime constrained classes of covers parametrised by \textit{connected} $T$ comes from a function on ${\partial}i_0(C')$.
\end{Remark}
\section{The behaviour of ${\partial}b$ on symplectic balls}
For the rest of this article, we only\footnote{The results of this section can however be easily adapted for star-shaped domain $U {\frak {su}}bset \mathbb{R}^{2n}$.} consider $U = U(c) = B^{2n}(c)$, that is the standard symplectic ball capacity $c = {\partial}i r^2$ (where $r$ is the radius). We also only consider (smooth) \textit{symplectic} covers, that is covers $G$ of type $(T,U)$ satisfying the symplectic prime constraint $C$ given as follows: $G \in C$ if $G_t \in C' = \mathrm{Emb}_{{\omega}ega}(U, M)$ for every $t \in T$. We shall write $U(c)$, $C(c)$ and $C'(c)$ when we want to stress the dependence on $c$.
Of special interest are the cases when $T = S^n$ for some $n {\mathcal g}e 1$. A constrained class $A$ of $C$ determines a connected component\footnote{It is still a conjecture, that we shall dub the \textit{symplectic camel conjecture}, whether $C'(c)$ is connected (whenever nonempty) when $(M, {\omega}ega)$ is compact and for any $c$.} $A' = p_T(A) {\frak {su}}bset C'$, and determines in fact an element of the $n$-th homotopy group ${\partial}i_n(A')$. Conversely, since $M$ is compact and using the fact that the group $\mathrm{Symp}(M, {\omega}ega)$ is $k$-transitive for all $k \in \mathbb{N}$, any element in ${\partial}i_n(C')$ can be represented by some class $A \in {\partial}i_0(C)$. The pb-invariants of symplectically constrained classes hence allow to probe the homotopic properties of $C'(c)$, properties which might change with $c$. Consequently, it appears important to better understand how the pb-invariants depend on the capacity $c$. This behavior of ${\partial}b$ on $c$ is the main question raised in this paper.
However, invoking {\bf C}ref{independence} and again the $k$-transitivity of $\mathrm{Symp}(M, {\omega}ega)$, we deduce that for any connected probability space $T$ there is a bijective correspondence between ${\partial}i_0(C)$ and ${\partial}i_0(C')$. We can thus interpret the ${\partial}b$ functional on smooth covers of type $(T,U(c))$ parametrised by connected spaces $T$ simply as a map ${\partial}b : {\partial}i_0(C'(c)) \to [0, \infty)$, the latter being clearly independent from $T$. It therefore appears that the ${\partial}b$-invariants can only probe the homotopy type of $C'(c)$ in a crude way.
Let $c_{max}$ denote the largest capacity a symplectic (open) ball embedded in $M$ can have; that can be much smaller than the one implied by the volume constraint $\mathrm{Vol}(U(c)) \le \mathrm{Vol}(M, {\omega}ega)$, according to the Non-Squeezing Theorem. For $0 < c < c' < c_{max}$ the obvious inclusion $U(c') {\frak {su}}bset U(c)$ induces the restriction map $C'(c) \to C'(c')$ and hence also $r_{c,c'} : {\partial}i_0(C'(c)) \to {\partial}i_0(C'(c'))$.
{\beta}gin{Definition} The \textit{tree of path-connected classes of symplectic embeddings of $U$ in $M$} is the set
\[ \Psi(U,M) := \bigsqcup_{c \in (0, c_{max})} \{c\} \times {\partial}i_0(C'(c)) \; . \]
A \textit{(short) branch of $\Psi(U,M)$} is a continuous path ${\beta}ta : (0, c_{{\beta}ta}) \to \Psi(U,M) : c \mapsto (c, A^*_{{\beta}ta}(c))$ such that $r_{c,c'}(A^*_{{\beta}ta}(c)) = A^*_{{\beta}ta}(c')$.
\end{Definition}
| 3,901 | 26,551 |
en
|
train
|
0.162.6
|
\section{The behaviour of ${\partial}b$ on symplectic balls}
For the rest of this article, we only\footnote{The results of this section can however be easily adapted for star-shaped domain $U {\frak {su}}bset \mathbb{R}^{2n}$.} consider $U = U(c) = B^{2n}(c)$, that is the standard symplectic ball capacity $c = {\partial}i r^2$ (where $r$ is the radius). We also only consider (smooth) \textit{symplectic} covers, that is covers $G$ of type $(T,U)$ satisfying the symplectic prime constraint $C$ given as follows: $G \in C$ if $G_t \in C' = \mathrm{Emb}_{{\omega}ega}(U, M)$ for every $t \in T$. We shall write $U(c)$, $C(c)$ and $C'(c)$ when we want to stress the dependence on $c$.
Of special interest are the cases when $T = S^n$ for some $n {\mathcal g}e 1$. A constrained class $A$ of $C$ determines a connected component\footnote{It is still a conjecture, that we shall dub the \textit{symplectic camel conjecture}, whether $C'(c)$ is connected (whenever nonempty) when $(M, {\omega}ega)$ is compact and for any $c$.} $A' = p_T(A) {\frak {su}}bset C'$, and determines in fact an element of the $n$-th homotopy group ${\partial}i_n(A')$. Conversely, since $M$ is compact and using the fact that the group $\mathrm{Symp}(M, {\omega}ega)$ is $k$-transitive for all $k \in \mathbb{N}$, any element in ${\partial}i_n(C')$ can be represented by some class $A \in {\partial}i_0(C)$. The pb-invariants of symplectically constrained classes hence allow to probe the homotopic properties of $C'(c)$, properties which might change with $c$. Consequently, it appears important to better understand how the pb-invariants depend on the capacity $c$. This behavior of ${\partial}b$ on $c$ is the main question raised in this paper.
However, invoking {\bf C}ref{independence} and again the $k$-transitivity of $\mathrm{Symp}(M, {\omega}ega)$, we deduce that for any connected probability space $T$ there is a bijective correspondence between ${\partial}i_0(C)$ and ${\partial}i_0(C')$. We can thus interpret the ${\partial}b$ functional on smooth covers of type $(T,U(c))$ parametrised by connected spaces $T$ simply as a map ${\partial}b : {\partial}i_0(C'(c)) \to [0, \infty)$, the latter being clearly independent from $T$. It therefore appears that the ${\partial}b$-invariants can only probe the homotopy type of $C'(c)$ in a crude way.
Let $c_{max}$ denote the largest capacity a symplectic (open) ball embedded in $M$ can have; that can be much smaller than the one implied by the volume constraint $\mathrm{Vol}(U(c)) \le \mathrm{Vol}(M, {\omega}ega)$, according to the Non-Squeezing Theorem. For $0 < c < c' < c_{max}$ the obvious inclusion $U(c') {\frak {su}}bset U(c)$ induces the restriction map $C'(c) \to C'(c')$ and hence also $r_{c,c'} : {\partial}i_0(C'(c)) \to {\partial}i_0(C'(c'))$.
{\beta}gin{Definition} The \textit{tree of path-connected classes of symplectic embeddings of $U$ in $M$} is the set
\[ \Psi(U,M) := \bigsqcup_{c \in (0, c_{max})} \{c\} \times {\partial}i_0(C'(c)) \; . \]
A \textit{(short) branch of $\Psi(U,M)$} is a continuous path ${\beta}ta : (0, c_{{\beta}ta}) \to \Psi(U,M) : c \mapsto (c, A^*_{{\beta}ta}(c))$ such that $r_{c,c'}(A^*_{{\beta}ta}(c)) = A^*_{{\beta}ta}(c')$.
\end{Definition}
We can therefore define a function ${\partial}b : \Psi(U,M) \to [0, \infty)$ in the obvious way. Given a branch ${\beta}ta$ with domain $(0, c_{{\beta}ta})$, we can define a map ${\partial}b_{{\beta}ta} = {\partial}b \circ {\beta}ta : (0, c_{{\beta}ta}) \to [0, \infty)$.
{\beta}gin{Theorem} Given any branch ${\beta}ta$, the function ${\partial}b_{{\beta}ta}$ is non-increasing, upper semi-continuous and left-continuous.
\end{Theorem}
{\partial}roof
(a) Let us first show that the function is non-increasing. Fix $0 < c' < c < c_{{\beta}ta}$ and let $\epsilon > 0$. From the work done above and with the interpretation of $A^*_{{\beta}ta}(c)$ as a connected component of $C'(c)$, there exists a discrete cover $G' : {\lambda}ngle 1, n \rangle \to A^*_{{\beta}ta}(c')$ of $M$ such that ${\partial}b(G') < {\partial}b_{{\beta}ta}(c') + \epsilon$. We claim that this cover refines a cover $G : {\lambda}ngle 1, n \rangle \to A^*_{{\beta}ta}(c)$ of $M$; assuming this for the moment, we would then have
\[ {\partial}b_{{\beta}ta}(c) \le {\partial}b(G) \le {\partial}b(G') < {\partial}b_{{\beta}ta}(c') + \epsilon \; . \]
As this holds for any $\epsilon > 0$, we get ${\partial}b_{{\beta}ta}(c) \le {\partial}b_{{\beta}ta}(c')$ \textit{i.e.} ${\partial}b_{{\beta}ta}$ is non-increasing.
To prove the existence of $G$, consider a symplectic embedding $B \in A^*_{{\beta}ta}(c)$. Since ${\beta}ta$ is a branch, the restriction $B'$ of $B$ to $U(c')$ is an embedding in $A^*_{{\beta}ta}(c')$; the latter space being a connected component of $C'(c')$ with respect to the Whitney $C^r$-topology, for each $j \in {\lambda}ngle 1, n \rangle$ there is smooth path of symplectic embeddings of $U(c')$ into $M$ joining $B'$ to $G'(j)$. By the symplectic isotopy extension theorem, each of these paths extends to a global symplectic isotopy on $M$, which thus sends $B$ to an embedding $G(j)$ of $U(c)$ into $M$. Clearly $G$ is a discrete cover of $M$ refined by $G'$.
(b) Now let us show that for every $c \in (0, c_{beta})$, the function ${\partial}b_{{\beta}ta}$ is upper semi-continuous at $c$, \textit{i.e.} $\limsup_{c' \to c} {\partial}b_{{\beta}ta}(c') \le {\partial}b_{{\beta}ta}(c)$.
On the one hand, it follows from part (a) that ${\partial}b_{{\beta}ta}(c)$ is greater or equal to all limits of $f$ from the right. On the other hand, for any $\epsilon > 0$, there are a discrete cover $G$ representing $A^*_{{\beta}ta}(c)$ and a partition of unity $F < G$ such that ${\partial}b(F) < {\partial}b_{{\beta}ta}(c) + \epsilon$. In fact, by our definition of a partition of unity, there is a strictly smaller capacity $c' < c$ such that the support of $F$ is compact inside the open ball $U(c') {\frak {su}}bset U(c)$. Transporting the data to the restriction of the pair $(G,F)$ to $U(c'')$ for any $c'' \in [c',c]$, one gets
$${\partial}b_{{\beta}ta}(c'') \le {\partial}b_{{\beta}ta}(c) + \epsilon.$$
\noindent Since the choice of $c'$ indirectly depends on $\epsilon > 0$ through $F$, and might get as close to $c$ when $\epsilon$ approaches to zero, we do not get ${\partial}b_{{\beta}ta}(c'') \le {\partial}b_{{\beta}ta}(c)$ but only that ${\partial}b_{{\beta}ta}$ is upper semi-continuous from the left.
(c) We wish to prove that ${\partial}b_{{\beta}ta}$ is in fact left-continuous, that is to say that ${\partial}b_{{\beta}ta}(c)$ is equal to the limit of ${\partial}b_{{\beta}ta}(c')$ as $c'$ tends to $c$ from the left. Consider a sequence of capacities $c_i < c$ converging to $c$ with highest value $\lim {\partial}b_{{\beta}ta}(c_i)$ (the value $\infty$ is not excluded). This limit cannot be smaller than ${\partial}b_{{\beta}ta}(c)$ because otherwise it would contradict the non-increasing property. However, by upper semi-continuity, it cannot be greater than ${\partial}b_{{\beta}ta}(c)$. Therefore, it has to be equal to ${\partial}b_{{\beta}ta}(c)$.
{\bf Q}ED
With regard to the continuity of the function ${\partial}b_{{\beta}ta}$ associated to a branch ${\beta}ta$ it is not possible to be much more specific than the above Theorem, at least not when $\mathrm{dim} \, M = 2$. Indeed, in that case $c_{max} = \mathrm{Area}(M, {\omega}ega)$ and Moser's argument allows to prove that the space $C'(c) = \mathrm{Emb}_{{\omega}ega}(B^{2}(c), M)$ is connected whenever non-empty, so that there is only one maximal branch ${\beta}ta$. Polterovich's conjecture has recently been established in dimension two \cite{BLT}: in fact there is a universal constant ${{\mathcal g}amma}mma > 0$ such that ${\partial}b_{{\beta}ta}(c)c > {{\mathcal g}amma}mma$ whenever $c \le c_{max}/2$. Using the invariance of the quantity ${\partial}b_{{\beta}ta}(c)c$ upon pullback of the data under any symplectic covering map, this inequality holds even for $c > c_{max}/2$ when $M$ has genus $g {\mathcal g}e 1$ (\textit{c.f.} \cite{Pa}). However for $M = S^2$, by enlarging two opposite hemispheres one gets ${\partial}b_{{\beta}ta}(c) = 0$ when $c > c_{max}/2$. Consequently ${\partial}b_{{\beta}ta}$ is discontinuous on $S^2$, yet might be continuous on higher genus surfaces.
\section{Phase transitions and the ${\partial}b$ function}
We conclude this paper by a few speculations since they disclose the main motivation behind this article.
The first ``phase transition'' discovered in Symplectic Topology is the following one:
{\beta}gin{Theorem} (Anjos-Lalonde-Pinsonnault) In any ruled symplectic 4-manifold $(M, {\omega})$, there is a unique value $c_{crit}$ such that the infinite dimensional space $Emb(c,{\omega})$ of all symplectic embeddings of the standard closed ball of capacity $c$ in $M$ undergoes the following striking property: below $c_{crit}$, the space $Emb(c,{\omega})$ is homotopy equivalent to a finite dimensional manifold, while above that value, $Emb(c,{\omega})$ does not retract onto any finite dimensional manifold (or CW-complex) since it possesses non-trivial homology groups in dimension as high as one wishes. Below and above that critical value, the homotopy type stays the same.
\end{Theorem}
\noindent The reason for the term {\it phase transition} is still debatable, but there are several physical reasons, from Thermodynamics, to adopt that terminology.
{\beta}gin{Definition} Given a closed symplectic manifold $(M, {\omega}ega)$, let us call an {\it uncertainty phase transition} any critical value $c$ at which the space of symplectic embeddings of balls of capacity $c$ into $(M, {\omega}ega)$ undergoes a change of its homotopy type. \end{Definition}
This terminology reflects the fact that a symplectically embedded ball quantifies the uncertaintly in the position and momentum of a (collection of) particles(s).
The proof of the above theorem is quite indirect: one identifies all homology classes of symplectically embedded balls through the action of two groups on them: the full group of symplectic diffeomorphisms and the subset of these that preserve a given standard ball, the latter being viewed as the group of symplectic diffeomorphisms on the blow-up. Each of theses groups is computed by their action on a stratification of all compatible almost complex structures that realise holomophically some homology classes (essentially the homology classes that cut out the symplectic manifold in simple parts). Everything boils down to the behaviour of some J-curves in the given symplectic manifold for each $J$, generic or not (the non-generic ones playing the fundamental role since only the first stratum is generic). So, for instance, some homology class of symplectically embedded balls may disappear at some capacity $c_{crit}$ because the homology class of symplectic diffeomorphisms that preseve some standard ball of capacity $c$ in $M$ vanishes when $c$ crosses $c_{crit}$. It is conceivable that the class that vanished was supporting a covering that minimized the $pb$ at that level of capacity. We know that the dimension of the homology class, i.e. the dimension of the parametrizing space $T$, plays no role by our theorem on smooth-discrete equivalence. However, it is possible, that such a class, discretized or not, contained the optimal configuration of balls for a covering in order to mimimize $pb$. Therefore the main question that drove us to study the $pb$ invariant in the smooth setting is:
\noindent
{\bf Question (Poisson-Uncertainty).} Is there a relation between the critical values of the Poisson bracket and the critical values (or phase transitions) of $Emb_{{\omega}ega}(B(c), M)$ as $c$ varies ?
This is a natural question since the latter probes the topological changes in configurations of balls, while the former looks for $pb$-optimal configurations. We do not have in mind any direct sketch proving that there is a relation. So we must simply for the moment look at the facts. We have little material to work on, since the $pb$ conjecture has been proved (very recently) only for real surfaces, while the study of the topology of balls is known only in dimension $2$ and $4$ for ruled symplectic $4$-manifolds. Thus we may just examine the case of surfaces. In this case, there is no critical value for the phase transition, but there are for the pb-invariant, showing that the answer to the above question is negative in dimension $2$.
Small displaceable balls should not see the symplectic form, actually the space of (unparamatrised) symplectic balls below the uncertainty critical value retracts to the topology of the manifold itself for ruled symplectic 4-manifolds. This refines the symplectic camel conjecture for small capacities and it leads us to state the following conjecture:
{\beta}gin{Conjecture} (The Topology conjecture). The limit of the function $c {\partial}b(c)$, as $c$ tends to zero, is a finite number, and depends only on the differential topology of the symplectic manifold.
\end{Conjecture}
Now, while the the Poisson-uncertainty question might have a positive answer in high dimensions, we show here that the Poisson-uncertainty question has a negative answer in dimension $2$ for the sphere.
| 4,058 | 26,551 |
en
|
train
|
0.162.7
|
\noindent The reason for the term {\it phase transition} is still debatable, but there are several physical reasons, from Thermodynamics, to adopt that terminology.
{\beta}gin{Definition} Given a closed symplectic manifold $(M, {\omega}ega)$, let us call an {\it uncertainty phase transition} any critical value $c$ at which the space of symplectic embeddings of balls of capacity $c$ into $(M, {\omega}ega)$ undergoes a change of its homotopy type. \end{Definition}
This terminology reflects the fact that a symplectically embedded ball quantifies the uncertaintly in the position and momentum of a (collection of) particles(s).
The proof of the above theorem is quite indirect: one identifies all homology classes of symplectically embedded balls through the action of two groups on them: the full group of symplectic diffeomorphisms and the subset of these that preserve a given standard ball, the latter being viewed as the group of symplectic diffeomorphisms on the blow-up. Each of theses groups is computed by their action on a stratification of all compatible almost complex structures that realise holomophically some homology classes (essentially the homology classes that cut out the symplectic manifold in simple parts). Everything boils down to the behaviour of some J-curves in the given symplectic manifold for each $J$, generic or not (the non-generic ones playing the fundamental role since only the first stratum is generic). So, for instance, some homology class of symplectically embedded balls may disappear at some capacity $c_{crit}$ because the homology class of symplectic diffeomorphisms that preseve some standard ball of capacity $c$ in $M$ vanishes when $c$ crosses $c_{crit}$. It is conceivable that the class that vanished was supporting a covering that minimized the $pb$ at that level of capacity. We know that the dimension of the homology class, i.e. the dimension of the parametrizing space $T$, plays no role by our theorem on smooth-discrete equivalence. However, it is possible, that such a class, discretized or not, contained the optimal configuration of balls for a covering in order to mimimize $pb$. Therefore the main question that drove us to study the $pb$ invariant in the smooth setting is:
\noindent
{\bf Question (Poisson-Uncertainty).} Is there a relation between the critical values of the Poisson bracket and the critical values (or phase transitions) of $Emb_{{\omega}ega}(B(c), M)$ as $c$ varies ?
This is a natural question since the latter probes the topological changes in configurations of balls, while the former looks for $pb$-optimal configurations. We do not have in mind any direct sketch proving that there is a relation. So we must simply for the moment look at the facts. We have little material to work on, since the $pb$ conjecture has been proved (very recently) only for real surfaces, while the study of the topology of balls is known only in dimension $2$ and $4$ for ruled symplectic $4$-manifolds. Thus we may just examine the case of surfaces. In this case, there is no critical value for the phase transition, but there are for the pb-invariant, showing that the answer to the above question is negative in dimension $2$.
Small displaceable balls should not see the symplectic form, actually the space of (unparamatrised) symplectic balls below the uncertainty critical value retracts to the topology of the manifold itself for ruled symplectic 4-manifolds. This refines the symplectic camel conjecture for small capacities and it leads us to state the following conjecture:
{\beta}gin{Conjecture} (The Topology conjecture). The limit of the function $c {\partial}b(c)$, as $c$ tends to zero, is a finite number, and depends only on the differential topology of the symplectic manifold.
\end{Conjecture}
Now, while the the Poisson-uncertainty question might have a positive answer in high dimensions, we show here that the Poisson-uncertainty question has a negative answer in dimension $2$ for the sphere.
To see this, let us consider the simple situation of $(M, {\omega}ega)$ being $S^2$ with its standard symplectic form, say of area $A$. As $M$ is a surface, it satisfies the symplectic camel conjecture, which is to say that the space $\mathrm{Emb}(c, {\omega}ega)$ is connected. The Poisson bracket function is then defined for any $c \in (0, A)$. There exists on any closed symplectic manifold a spectral invariant $c$ such that $c(Id) = 0$, see Theorem 4.7.1 in \cite{PR}. It follows from that and the other properties of $c$ that the spectral width $w(U)$ of any subset $U {\frak {su}}bset M$ satisfies $w(U) \le 4 e_H(U)$ where $e_H(U)$ is the Hofer displacement energy of $U$. For open sets in $S^2$, $e_H(U) = \mathrm{Area}(U)$ if this area is smaller than $A/2$ and $e_H(U) = \infty$ otherwise. In this context, Polterovich's conjecture (now a theorem on surfaces \cite{BLT,Pa}) states that there is a constant $C > 0$ such that for any, continuous or discrete, cover $G$ of $S^2$ by displaceable open sets, the inequality
\[ {\partial}b(G)w(G) {\mathcal g}e C \; \mbox{ holds }. \]
\noindent This implies that ${\partial}b(c) e_H(U(c)) {\mathcal g}e C$. Thus when $c < A/2$, we have ${\partial}b(c) {\mathcal g}e 2C/A$. However, we observe that ${\partial}b(c) = 0$ whenever $c > A/2$: two symplectic balls of capacity $c > A/2$ suffice to cover $S^2$ and the ${\partial}b$-invariant of such a cover vanishes. Polterovich's conjecture hence goes against any claim that the Poisson bracket function ${\partial}b(c)$ only has discontinuities when $\mathrm{Emb}(c, {\omega}ega)$ undergoes a transition in its homotopy type.
As a concluding remark, we point out that our borrowings in the thermodynamical and statistical mechanical terminology are explained by our insight that tools from these subjects might play a role in the understanding of the symplectic problems we considered in this paper. The space of symplectically embedded balls can be understood as an infinite dimensional (pre)symplectic manifolds which is some sort of limit of finite dimensional ones. In this paper, continuous covers have also been understood as limits of discretes ones. It is a recurrent theme in statistical mechanics that systems with a very large number of degrees of freedom tend to behave in universal and somewhat simpler ways.
{\beta}gin{thebibliography}{1}
\bibitem{ALP} S. Anjos, F. Lalonde and M. Pinsonnault, The homotopy type of the space of symplectic balls in rational ruled 4-manifolds, {\it Geometry and Topology} {\bf 13} (2009), 1177--1227.
\bibitem{BLT} L. Buhovsky, A. Logunov and S. Tanny. Poisson Brackets of Partitions of Unity on Surfaces. Preprint arXiv:1705.02513v2.
\bibitem{EPZ} M. Entov, L. Polterovich and F. Zapolsky, Quasi-morphisms and the Poisson bracket, {\it Pure Appl. Math. Q.} {\bf 3}, 2007, 1037 -- 1055.
\bibitem{Pa} J. Payette, The geometry of the Poisson bracket invariant on surfaces. Preprint arXiv:1803.09741.
\bibitem{P1} L. Polterovich, Quantum unsharpness and symplectic rigidity, {\it Lett. Math. Physics} {\bf 102}, 2012, 245 -- 264.
\bibitem{P2} L. Polterovich, Symplectic geometry of quantum noise, {\it Comm. Math. Physics} {\bf 327}, 2014, 481 -- 519.
\bibitem{PR} L. Polterovich and D. Rosen, {\it Function Theory on Symplectic Manifolds}, CRM Monograph Series, American Mathematical Society, vol. 34, 2014.
\end{thebibliography}
\end{document}
| 2,157 | 26,551 |
en
|
train
|
0.163.0
|
\begin{document}
\title{Entropy squeezing and atomic inversion in the $k$-photon Jaynes-Cummings model in the presence of Stark shift and Kerr medium: full nonlinear approach}
\newcommand{\norm}[1]{\left\Vert#1\right\Vert}
\newcommand{\abs}[1]{\left\vert#1\right\vert}
\newcommand{\set}[1]{\left\{#1\right\}}
\newcommand{\mathbb R}{\mathbb R}
\newcommand{\mathbb{I}}{\mathbb{I}}
\newcommand{\mathbb C}{\mathbb C}
\newcommand{\varepsilon}{\varepsilon}
\newcommand{\longrightarrow}{\longrightarrow}
\newcommand{\mathbf{B}(X)}{\mathbf{B}(X)}
\newcommand{\mathfrak{H}}{\mathfrak{H}}
\newcommand{\mathcal{A}}{\mathcal{A}}
\newcommand{\mathcal{D}}{\mathcal{D}}
\newcommand{\mathcal{N}}{\mathcal{N}}
\newcommand{\mathcal{x}}{\mathcal{x}}
\newcommand{\mathcal{p}}{\mathcal{p}}
\newcommand{\lambda}{\lambdambda}
\newcommand{a^{ }_F}{a^{ }_F}
\newcommand{a^{ }_Fd}{a^\dag_F}
\newcommand{a^{ }_Fy}{a^{ }_{F^{-1}}}
\newcommand{a^{ }_Fdy}{a^\dag_{F^{-1}}}
\newcommand{\phi^{ }_n}{\mathcal{p}hi^{ }_n}
\newcommand{\hat{\mathcal{H}}}{\hat{\mathcal{H}}}
\newcommand{\hat{\mathcal{H}}D}{\mathcal{H}}
\begin{abstract}
In this paper the interaction between a two-level atom and a single-mode field in the $k$-photon Jaynes-Cummings model (JCM) in the presence of Stark shift and Kerr medium is studied. All terms in the respected Hamiltonian, such as the single-mode field, its interaction with the atom, the contribution of the Stark shift and the Kerr medium effects are considered to be $f$-deformed. In particular, the effect of the initial state of radiation field on the dynamical evolution of some physical properties such as atomic inversion and entropy squeezing are investigated by considering different initial field states. To achieve this purpose, coherent, squeezed and thermal states as initial field states are considered.
\end{abstract}
\section{Introduction}\lambdabel{sec-intro}
The well-known Jaynes-Cummings model (JCM) is an important, simplified and standard model that describes elegantly the interaction between an atom and a single-mode field in the dipole and rotating wave approximations (RWA) \cite{Jaynes}. Many interesting physical features have been studied by this model. Some examples are atomic inversion \cite{HU}, collapse and revival \cite{Scully,Setare}, entanglement \cite{Buzek12,Tan,Ouyang}, sub-Poissonian statistics \cite{Mandel,Mirzaee}, quadrature squeezing \cite{Rui} and entropy squeezing \cite{Fang,Jian}. A lot of researches in this field are based on the linear interaction between atom and field, i.e. the atom-field coupling is performed to be a constant throughout the evolution of the whole system. Phoenix and Knight \cite {Phoenix} used the JCM and employed a diagonalised reduced density operator to calculate entropy and demonstrated thereby the essential two-state nature of the field. Kayham investigated the entropy squeezing of a two-level atom interacting with a quantum field prepared initially in the Glauber-Lachs state by the standard JCM \cite{Kayham}. Liao {\it et al} considered a system of two two-level atoms interacting with a binomial field in an ideal cavity and investigated the time evolution of the single-atom entropy squeezing, atomic inversion and linear entropy of the system \cite{Liao}. Zhang {\it et al} discussed the entanglement and evolution of some of the nonclassicality features of the atom-field system in a standard JCM with squeezed vacuum and coherent state fields as initial field state \cite{Zhang}. Mortezapour {\it et al} studied the entanglement of dressed atom and its spontaneous emission in a three-level $\Lambda$-type closed-loop atomic system in multi-photon resonance condition and beyond it \cite{Mortezapour}. The entropy squeezing, atomic inversion and variance squeezing in the interaction between a two-level atom with a single mode cavity field via $k$-photon process have been investigated in \cite{Kang}. Ateto in \cite{Ateto} has been extended the JCM for combined the influences of atomic motion, field-mode structure and Kerr-like medium and investigated the effects of them on the dynamics of entropy of entanglement of the cavity and atomic populations.
However, in recent years, researches have strongly focused on the nonlinear interaction between a two-level atom and field in the deformed JCM. This model which firstly suggested by Buck and Sukumar \cite{Buck,Sukumar} describes the dependence of atom-field coupling on the light intensity. Bu\v{z}ek investigated the physical quantities, particularly atomic population and squeezing in the intensity-dependent coupling JCM \cite{Buz^ek}. The interaction between a $\Lambda$-type three-level atom with a single-mode cavity field with intensity-dependent coupling in a Kerr medium has been investigated by one of us \cite{Faghihi}. Sanchez and R\'{e}camier introduced a nonlinear JCM constructed from its standard structure by deforming all of the bosonic field operators \cite{Recamier}. Naderi {\it et al} replaced $\hat{a}$ and $\hat{a}^\dagger$ in the standard JCM by the $f$-deformed operators $\hat{A}$ and $\hat{A}^\dagger$ and introduced the two-photon $q$-deformed JCM \cite{Naderi}. Barzangeh {\it et al} investigated the effect of a classical gravitational field on the dynamical behavior of nonlinear atom-field interaction within the framework of the $f$-deformed JCM \cite {Barzanjeh}. Abdel-Aty {\it et al} studied the entropy squeezing of a two-level atom in a Kerr medium and examined the influence of the nonlinear interaction of the Kerr medium on the quantum information and entropy squeezing \cite {Abdel-Aty}. Cordero and R\'{e}camier considered the Jaynes-Cummings Hamiltonian with deformed operators of the field and additional Kerr term which is introduced by means of a purely algebraic method \cite{Cordero}. Recently, Faghihi {\it et al} investigated the entanglement dynamics of the nonlinear interaction between a three-level atom (in a $\Lambda$ configuration) and a two-mode cavity field in the presence of a cross-Kerr medium and its deformed counterpart \cite{Honarasa}, intensity-dependent atom-field coupling and the detuning parameters \cite{faghihi2,faghihi3}. Also, the authors have studied a three-level atom in motion which interacts with a single-mode field in an optical cavity in an intensity-dependent coupling regime \cite{faghihi4}. The effects of the mean photon number, detuning, Kerr-like medium and various of the intensity-dependent coupling functional on the degree of entanglement in the interaction between $\Lambda$-type three-level atom with a two-mode field have been studied in \cite{Hanoura}.
Abdalla {\it et al} considered the interaction of a two-level atom with a single-mode multi-photon field in the medium consisting the Stark shift and Kerr medium effects, with the coupling term which is assumed to be a function of time, but still linear in the intensity of light \cite{Abdalla M S}.
Moreover, it is (partially) nonlinear only due to the presence of the Kerr medium. They investigated atomic inversion and entropy squeezing and showed that, the existence of the time-dependent coupling parameter leads to a time delaying in the interaction which is twice the delay time for time-independent case.
We aimed the present paper to nonlinearize the latter atom-field system which is considered in \cite{Abdalla M S}.
Precisely speaking, the interaction will be occured in a time-dependent and at the same time nonlinear manner between a two-level atom and a nonlinear single-mode field for $k$-photon transitions in the presence of the Stark shift effect and Kerr medium, both of which are considered to be $f$-deformed. As is clear, in this way, all terms in the Hamiltonian will behave in a nonlinear regime by entering a nonlinearity function $f(n)$, which is generally a well-defined function of the intensity of light. Fortunately, the complicated considered system can be solved analytically and therefore, we will be able to evaluate the time evolution of some of the physical properties such as atomic inversion and entropy squeezing. Since, the exact solution depends on the initial atom-field state, we take the atom to be in its exited state, but the field is considered to be in coherent state, squeezed state and thermal state. Although, our proposal may work well for any arbitrary nonlinear physical system with known $f(n)$, the effect of various parameters and different initial field states will be investigated in detail by considering a particular nonlinearity function.
The paper is organized in the following way: in section 2 we introduce the interaction Hamiltonian of our considered system in the full nonlinear regime of $k$-photon JCM and then by solving the corresponding Schr\"{o}dinger equation, the probability amplitudes at any time $t$ for the whole system with arbitrary initial field state have been obtained. In sections 3 and 4 we investigate temporal evolution of atomic inversion and entropy squeezing, respectively. Section 5 deals with presenting our numerical results for atomic inversion and entropy squeezing versus the scaled time for single- and two-photon transitions and so we discuss about the effects of the Kerr medium, Stark shift, detuning and intensity-dependent coupling on the evolution of mentioned properties. Also, the results of the effects of three- and four-photon transitions on the time evolution of the atomic inversion and entropy squeezing are given in section 6. Finally, we give a summary and conclusion in section 7.
\section{The $k$-photon JCM: full nonlinear regime}
The Hamiltonian of a two-level atom interacting with a quantized field by the standard JCM in the dipole and the rotating wave approximations can be simply written as ($\hbar=1$), $\hat{H}=\nu \hat{a}^\dagger \hat{a} +\frac{\omega}{2}\hat{\sigma}_{z}+\lambdambda ( \hat{a}^\dagger\hat{\sigma}_{-}+\hat{a}\hat{\sigma}_{+})$, where $\hat{\sigma}_{z}$ and $\hat{\sigma}_{\mathcal{p}m}$ are the Pauli operators, $\hat{a}$ and $\hat{a}^\dagger$ are the bosonic annihilation and creation operators, $\nu$ is the frequency of the field, $\omega$ is the transition frequency between the excited and ground states of the atom and $\lambdambda$ is the constant coupling between atom and field. By a few steps of generalizing standard JCM, the time dependent single-mode $k$-photon JCM in the presence of linear Stark shift and Kerr medium with the time dependent coupling has been studied by the Hamiltonian \cite{Abdalla M S}
\begin{eqnarray}\lambdabel{2}
\hat{H}(t)&=&\nu \hat{a}^\dagger \hat{a}+\frac{\omega}{2}\hat{\sigma}_{z}+ \hat{a}^\dagger \hat{a}(\beta_{1}|g\rangle\lambdangle g|+\beta_{2}|e\rangle\lambdangle e|)\nonumber\\ &+& \chi \hat{a}^{\dagger 2} \hat{a}^{2}+\lambdambda(t)(\hat{a}^{\dagger k}\hat{\sigma}_{-}+\hat{a}^{k}\hat{\sigma}_{+}),
\end{eqnarray}
where $\beta_{1}$ and $\beta_{2}$ are the effective Stark coefficients, $\chi$ denotes the third-order susceptibility of Kerr medium and $\lambdambda(t)$ is the time-dependent coupling parameter.
The third term in Hamiltonian (\ref{2}) indicates to the linear (in $a^\dag a$) Stark shift effect, which is arisen from the virtual transition to the intermediate level \cite{Puri,Ahmad,Obada}, and can exist for two-photon transition, i.e., $k=2$ \cite{Puri}. So, for instance, the authors of Refs. \cite{Abdalla M S,Liao2} used $\delta_{k,2}$ besides the Stark shift term in their Hamiltonian. In addition, it should be emphasized that, in the Hamiltonian (\ref{2}), whenever $k\neq2$ one has to set $\beta_{1}=0=\beta_{2}$ \cite{Liao2}. Altogether, it is worth to mention that, the (nonlinear) Stark shift can also exist for the cases with $k>2$ \cite{Ahmad}. In this latter case, the Hamiltonian (\ref{2}) changes to the following form
\begin{eqnarray}\lambdabel{2,1}
\hat{H}(t)&=&\nu \hat{a}^\dagger \hat{a}+\frac{\omega}{2}\hat{\sigma}_{z}+ \hat{a}^{\dagger k} \hat{a}^{k}(\beta_{1}|g\rangle\lambdangle g|+\beta_{2}|e\rangle\lambdangle e|) \nonumber\\ &+& \chi \hat{a}^{\dagger 2} \hat{a}^{2}+\lambdambda(t)(\hat{a}^{\dagger 2k}\hat{\sigma}_{-}+\hat{a}^{2k}\hat{\sigma}_{+}).
\end{eqnarray}
The Hamiltonian (\ref{2,1}) for $k=1$ is equal to the Hamiltonian (\ref{2}) for $k=2$ (linear Stark shift can be occured). Altogether, in the continuation of the paper, by following the path of \cite{Abdalla M S}, we will generalize the Hamiltonian (\ref{2}), which is performed for linear Stark shift effect, in order to be able to compare our results with the presented results in \cite{Abdalla M S}. Anyway, by defining the detuning parameter $\mathcal{D}elta=\omega-k\nu$, the Hamiltonian (\ref{2}) can be rewritten in the form
\begin{eqnarray}\lambdabel{3}
\hat{H}(t)&=&\nu( \hat{a}^\dagger \hat{a}+\frac{k}{2}\hat{\sigma}_{z})+\frac{\mathcal{D}elta}{2}\hat{\sigma}_{z}+ \hat{a}^\dagger \hat{a}(\beta_{1}|g\rangle\lambdangle g|+\beta_{2}|e\rangle\lambdangle e|)\nonumber\\ &+& \chi \hat{a}^{\dagger 2} \hat{a}^{2}+\lambdambda(t)(\hat{a}^{\dagger k}\hat{\sigma}_{-}+\hat{a}^{k}\hat{\sigma}_{+}).
\end{eqnarray}
The aim of this paper is to generalize all terms of the Hamiltonian (\ref{3}) via the well-known nonlinear coherent state approach \cite{Manko,Vogel2}. By the notion of the nonlinearity we mean that, we intend to enter the $f$-deformation function in all possible terms, i.e. we will replace all $\hat{a}$ and $\hat{a}^\dagger$ respectively by $\hat{A}=\hat{a} f(\hat{n})$ and $\hat{A}^\dagger=f(\hat{n})\hat{a}^\dagger$ where $f(\hat{n})$ is a function of the number operator (intensity of light). By performing the mentioned procedure, the full nonlinear single-mode $k$-photon time-dependent JCM in the presence of effective Stark shift and Kerr medium can be written in the following manner
\begin{eqnarray}\lambdabel{5}
\hat{H}(t)&=&\nu (\hat{A}^\dagger \hat{A}+\frac{k}{2}\hat{\sigma}_{z})+\frac{\mathcal{D}elta}{2}\hat{\sigma}_{z}+ \hat{A}^\dagger \hat{A}(\beta_{1}|g\rangle\lambdangle g|+\beta_{2}|e\rangle\lambdangle e|)\nonumber\\&+&\chi \hat{A}^{\dagger 2}\hat{A}^{2}+\lambdambda(t)( \hat{A}^{\dagger k}\hat{\sigma}_{-}+\hat{A}^{k}\hat{\sigma}_{+}),
\end{eqnarray}
| 3,904 | 16,765 |
en
|
train
|
0.163.1
|
The Hamiltonian of a two-level atom interacting with a quantized field by the standard JCM in the dipole and the rotating wave approximations can be simply written as ($\hbar=1$), $\hat{H}=\nu \hat{a}^\dagger \hat{a} +\frac{\omega}{2}\hat{\sigma}_{z}+\lambdambda ( \hat{a}^\dagger\hat{\sigma}_{-}+\hat{a}\hat{\sigma}_{+})$, where $\hat{\sigma}_{z}$ and $\hat{\sigma}_{\mathcal{p}m}$ are the Pauli operators, $\hat{a}$ and $\hat{a}^\dagger$ are the bosonic annihilation and creation operators, $\nu$ is the frequency of the field, $\omega$ is the transition frequency between the excited and ground states of the atom and $\lambdambda$ is the constant coupling between atom and field. By a few steps of generalizing standard JCM, the time dependent single-mode $k$-photon JCM in the presence of linear Stark shift and Kerr medium with the time dependent coupling has been studied by the Hamiltonian \cite{Abdalla M S}
\begin{eqnarray}\lambdabel{2}
\hat{H}(t)&=&\nu \hat{a}^\dagger \hat{a}+\frac{\omega}{2}\hat{\sigma}_{z}+ \hat{a}^\dagger \hat{a}(\beta_{1}|g\rangle\lambdangle g|+\beta_{2}|e\rangle\lambdangle e|)\nonumber\\ &+& \chi \hat{a}^{\dagger 2} \hat{a}^{2}+\lambdambda(t)(\hat{a}^{\dagger k}\hat{\sigma}_{-}+\hat{a}^{k}\hat{\sigma}_{+}),
\end{eqnarray}
where $\beta_{1}$ and $\beta_{2}$ are the effective Stark coefficients, $\chi$ denotes the third-order susceptibility of Kerr medium and $\lambdambda(t)$ is the time-dependent coupling parameter.
The third term in Hamiltonian (\ref{2}) indicates to the linear (in $a^\dag a$) Stark shift effect, which is arisen from the virtual transition to the intermediate level \cite{Puri,Ahmad,Obada}, and can exist for two-photon transition, i.e., $k=2$ \cite{Puri}. So, for instance, the authors of Refs. \cite{Abdalla M S,Liao2} used $\delta_{k,2}$ besides the Stark shift term in their Hamiltonian. In addition, it should be emphasized that, in the Hamiltonian (\ref{2}), whenever $k\neq2$ one has to set $\beta_{1}=0=\beta_{2}$ \cite{Liao2}. Altogether, it is worth to mention that, the (nonlinear) Stark shift can also exist for the cases with $k>2$ \cite{Ahmad}. In this latter case, the Hamiltonian (\ref{2}) changes to the following form
\begin{eqnarray}\lambdabel{2,1}
\hat{H}(t)&=&\nu \hat{a}^\dagger \hat{a}+\frac{\omega}{2}\hat{\sigma}_{z}+ \hat{a}^{\dagger k} \hat{a}^{k}(\beta_{1}|g\rangle\lambdangle g|+\beta_{2}|e\rangle\lambdangle e|) \nonumber\\ &+& \chi \hat{a}^{\dagger 2} \hat{a}^{2}+\lambdambda(t)(\hat{a}^{\dagger 2k}\hat{\sigma}_{-}+\hat{a}^{2k}\hat{\sigma}_{+}).
\end{eqnarray}
The Hamiltonian (\ref{2,1}) for $k=1$ is equal to the Hamiltonian (\ref{2}) for $k=2$ (linear Stark shift can be occured). Altogether, in the continuation of the paper, by following the path of \cite{Abdalla M S}, we will generalize the Hamiltonian (\ref{2}), which is performed for linear Stark shift effect, in order to be able to compare our results with the presented results in \cite{Abdalla M S}. Anyway, by defining the detuning parameter $\mathcal{D}elta=\omega-k\nu$, the Hamiltonian (\ref{2}) can be rewritten in the form
\begin{eqnarray}\lambdabel{3}
\hat{H}(t)&=&\nu( \hat{a}^\dagger \hat{a}+\frac{k}{2}\hat{\sigma}_{z})+\frac{\mathcal{D}elta}{2}\hat{\sigma}_{z}+ \hat{a}^\dagger \hat{a}(\beta_{1}|g\rangle\lambdangle g|+\beta_{2}|e\rangle\lambdangle e|)\nonumber\\ &+& \chi \hat{a}^{\dagger 2} \hat{a}^{2}+\lambdambda(t)(\hat{a}^{\dagger k}\hat{\sigma}_{-}+\hat{a}^{k}\hat{\sigma}_{+}).
\end{eqnarray}
The aim of this paper is to generalize all terms of the Hamiltonian (\ref{3}) via the well-known nonlinear coherent state approach \cite{Manko,Vogel2}. By the notion of the nonlinearity we mean that, we intend to enter the $f$-deformation function in all possible terms, i.e. we will replace all $\hat{a}$ and $\hat{a}^\dagger$ respectively by $\hat{A}=\hat{a} f(\hat{n})$ and $\hat{A}^\dagger=f(\hat{n})\hat{a}^\dagger$ where $f(\hat{n})$ is a function of the number operator (intensity of light). By performing the mentioned procedure, the full nonlinear single-mode $k$-photon time-dependent JCM in the presence of effective Stark shift and Kerr medium can be written in the following manner
\begin{eqnarray}\lambdabel{5}
\hat{H}(t)&=&\nu (\hat{A}^\dagger \hat{A}+\frac{k}{2}\hat{\sigma}_{z})+\frac{\mathcal{D}elta}{2}\hat{\sigma}_{z}+ \hat{A}^\dagger \hat{A}(\beta_{1}|g\rangle\lambdangle g|+\beta_{2}|e\rangle\lambdangle e|)\nonumber\\&+&\chi \hat{A}^{\dagger 2}\hat{A}^{2}+\lambdambda(t)( \hat{A}^{\dagger k}\hat{\sigma}_{-}+\hat{A}^{k}\hat{\sigma}_{+}),
\end{eqnarray}
In this respect, a few words seems to be necessary about our present work. It may be recognized that, starting with the nonlinear Hamiltonian describing the interaction between a three-level atom and a single-mode $f$-deformed cavity field (without the Stark shift) and following the path of Refs. \cite{Puri,Ahmad}, the same equations of motion for the three levels of the atom will be achieved. Therefore, one can conclude that, replacing $a, a^\dag$ with $A, A^\dag$
does not change the final results of the above Refs. By these explanations, we would like to emphasize that, the Stark shift should exist in the
generalized form of the Hamiltonian (\ref{5}), too. In other words, the Stark shift coefficients are now linear in terms of $\hat{A}^\dag \hat{A}$,
i.e., the field part of the Hamiltonian (\ref{5}).
So, in Hamiltonian (\ref{5}) (similar to (\ref{2}) and (\ref{3})), the linear (in terms of $\hat{A}^\dag \hat{A}$) Stark shift can exist for the case $k=2$.
And whenever $k\neq2$ one should set $\beta_{1}=0=\beta_{2}$.
To see what we have really done explicitly, it can be easily seen that
\begin{eqnarray}\lambdabel{6}
\hat{H}(t)&=&\nu( \hat{a}^\dagger \hat{a} f^{2}(\hat{n})+\frac{k}{2}\hat{\sigma}_{z})+\frac{\mathcal{D}elta}{2} \hat{\sigma}_{z}+\hat{a}^\dagger \hat{a} f^{2}(\hat{n})(\beta_{1}|g\rangle\lambdangle g|+\beta_{2}|e\rangle\lambdangle e|) \nonumber\\ &+&\chi f^{2} (\hat{n}) f^{2}(\hat{n}-1)\hat{a}^{\dagger2} \hat{a}^{2}+\lambdambda(t)\left(\frac{\left[ f(\hat{n})\right] !}{\left[ f(\hat{n}-k)\right] !} \hat{a}^{\dagger k}\hat{\sigma}_{-} +\hat{a}^{ k}\frac{\left[ f(\hat{n})\right] !}{\left[ f(\hat{n}-k)\right] !} \hat{\sigma}_{+}\right),
\end{eqnarray}
where $\hat{n}=\hat{a}^\dagger \hat{a}$ and $\left[ f(\hat{n})\right]! \doteq f(\hat{n})f(\hat{n}-1).....f(1)$ with $\left[ f(0)\right] !\doteq1$.
As is clear from (\ref{6}) in comparison with previous Hamiltonian (\ref{3}) which is considered in \cite{Abdalla M S}, we have in fact made the transformations, $ \nu \rightarrow \nu f^{2}(\hat n), \beta_{1(2)}\rightarrow\beta_{1(2)}f^{2}(\hat n), \chi\rightarrow\chi f^{2}(\hat{n})f^{2}(\hat{n}-1)$ and $\lambdambda(t) \rightarrow \lambdambda(t) \frac{[f(\hat{n}]!}{[f(\hat{n}-k]!}.$
It is seen that, the field frequency, Stark shifts, third-order susceptibility and time-dependent parameter are all evolved from $c$-numbers to operator-valued functions (intensity-dependent parameters) \cite{Buz^ek,Faghihi,Singh,Manko,Honarasa}.
| 2,367 | 16,765 |
en
|
train
|
0.163.2
|
In this respect, a few words seems to be necessary about our present work. It may be recognized that, starting with the nonlinear Hamiltonian describing the interaction between a three-level atom and a single-mode $f$-deformed cavity field (without the Stark shift) and following the path of Refs. \cite{Puri,Ahmad}, the same equations of motion for the three levels of the atom will be achieved. Therefore, one can conclude that, replacing $a, a^\dag$ with $A, A^\dag$
does not change the final results of the above Refs. By these explanations, we would like to emphasize that, the Stark shift should exist in the
generalized form of the Hamiltonian (\ref{5}), too. In other words, the Stark shift coefficients are now linear in terms of $\hat{A}^\dag \hat{A}$,
i.e., the field part of the Hamiltonian (\ref{5}).
So, in Hamiltonian (\ref{5}) (similar to (\ref{2}) and (\ref{3})), the linear (in terms of $\hat{A}^\dag \hat{A}$) Stark shift can exist for the case $k=2$.
And whenever $k\neq2$ one should set $\beta_{1}=0=\beta_{2}$.
To see what we have really done explicitly, it can be easily seen that
\begin{eqnarray}\lambdabel{6}
\hat{H}(t)&=&\nu( \hat{a}^\dagger \hat{a} f^{2}(\hat{n})+\frac{k}{2}\hat{\sigma}_{z})+\frac{\mathcal{D}elta}{2} \hat{\sigma}_{z}+\hat{a}^\dagger \hat{a} f^{2}(\hat{n})(\beta_{1}|g\rangle\lambdangle g|+\beta_{2}|e\rangle\lambdangle e|) \nonumber\\ &+&\chi f^{2} (\hat{n}) f^{2}(\hat{n}-1)\hat{a}^{\dagger2} \hat{a}^{2}+\lambdambda(t)\left(\frac{\left[ f(\hat{n})\right] !}{\left[ f(\hat{n}-k)\right] !} \hat{a}^{\dagger k}\hat{\sigma}_{-} +\hat{a}^{ k}\frac{\left[ f(\hat{n})\right] !}{\left[ f(\hat{n}-k)\right] !} \hat{\sigma}_{+}\right),
\end{eqnarray}
where $\hat{n}=\hat{a}^\dagger \hat{a}$ and $\left[ f(\hat{n})\right]! \doteq f(\hat{n})f(\hat{n}-1).....f(1)$ with $\left[ f(0)\right] !\doteq1$.
As is clear from (\ref{6}) in comparison with previous Hamiltonian (\ref{3}) which is considered in \cite{Abdalla M S}, we have in fact made the transformations, $ \nu \rightarrow \nu f^{2}(\hat n), \beta_{1(2)}\rightarrow\beta_{1(2)}f^{2}(\hat n), \chi\rightarrow\chi f^{2}(\hat{n})f^{2}(\hat{n}-1)$ and $\lambdambda(t) \rightarrow \lambdambda(t) \frac{[f(\hat{n}]!}{[f(\hat{n}-k]!}.$
It is seen that, the field frequency, Stark shifts, third-order susceptibility and time-dependent parameter are all evolved from $c$-numbers to operator-valued functions (intensity-dependent parameters) \cite{Buz^ek,Faghihi,Singh,Manko,Honarasa}.
The time-dependent $\lambdambda$-parameter makes the whole Hamiltonian to be time-dependent. Different forms may be chosen for $\lambdambda(t)$. In this paper we will select $\lambdambda(t)=\gamma\cos(\mu t)$, where $\gamma$ and $\mu$ are arbitrary constants. Following the probability amplitude approach \cite{Scully}, we assume that, the wave function of the atom-field can be expressed as \cite{Wolfang P}
\begin{equation}\lambdabel{7}
\hspace{-1in}|\mathcal{p}si(t)\rangle=\sum_{n} \exp[-i\nu(\hat{n}f^{2}(\hat{n})+\frac{k}{2}\hat{\sigma}_{z})](c_{n,e}(t)|n,e\rangle+c_{n+k,g}(t)|n+k,g\rangle),
\end{equation}
where $|n,e\rangle$ and $|n+k,g\rangle$ are the states in which the atom is in exited and ground state and the field has $n$ and $n+k$ photons, respectively. Setting the wave function (\ref{7}) in the time dependent Schr\"{o}dinger equation, $i\hbar\frac{\mathcal{p}artial}{\mathcal{p}artial t}|\mathcal{p}si(t)\rangle=\hat{H}|\mathcal{p}si(t)\rangle$, we obtain the following coupled equations for $c _{n,e}(t)$ and $c_{n+k,g}(t)$:
\begin{eqnarray}\lambdabel{9}
i\frac{d c_{n,e}}{dt}&=&R_{1}c_{n,e}(t)+\alpha_{n} \cos(\mu t)c_{n+k,g}(t), \nonumber \\i\frac{d c_{n+k,g}}{dt}&=&R_{2}c_{n+k,g}(t)+\alpha_{n} \cos(\mu t)c_{n,e}(t),
\end{eqnarray}
where $R_{1}$, $R_{2}$ and $\alpha_{n}$ are defined as follows:
\begin{eqnarray}\lambdabel{10}
R_{1}&=&\frac{\mathcal{D}elta}{2}+n f^{2}(n)\beta_{2}+\chi n(n-1)f^{2}(n)f^{2}(n-1),\nonumber\\ R_{2}&=&- \frac{\mathcal{D}elta}{2}+(n+k) f^{2}(n+k)\beta_{1}\nonumber\\ &+&\chi (n+k)(n+k-1)f^{2}(n+k)f^{2}(n+k-1), \nonumber\\\alpha_{n}&=&\gamma\frac{\left[ f(n+k)\right] !}{\left[ f(n)\right] !}\sqrt{\frac{(n+k)!}{n!}}.
\end{eqnarray}
The fast frequency dependence of $c _{n,e}(t)$ and $c_{n+k,g}(t)$ can be removed by transforming them to the slowly varying functions $X(t)$ and $Y(t)$ as
\begin{equation}\lambdabel{11}
X(t)=c_{n,e}(t) \exp(iR_{1}t), \hspace{1.5cm} Y(t)=c_{n+k,g}(t) \exp(iR_{2}t).
\end{equation}
On using (\ref{11}) in equation (\ref{9}) we obtain
\begin{eqnarray}\lambdabel{12}
\frac{d X}{dt}&=&-i\frac{\alpha_{n}}{2}(\ e^{i(\mu+R_{n})t}+\ e^{-i(\mu-R_{n})t})Y,\nonumber\\\frac{d Y}{dt}&=&-i\frac{\alpha_{n}}{2}(\ e^{i(\mu-R_{n})t}+\ e^{-i(\mu+R_{n})t})X,
\end{eqnarray}
where
\begin{eqnarray}\lambdabel{13}
R_{n} &=& R_{1}-R_{2}\nonumber\\
&=& \mathcal{D}elta +\chi[n(n-1)f^{2}(n)f^{2}(n-1)-(n+k)(n+k-1)f^{2}(n+k)f^{2}(n+k-1)]\nonumber\\
&+& [nf^{2}(n)\beta_{2}-(n+k)f^{2}(n+k)\beta_{1}].
\end{eqnarray}
The coupled differential equations in (\ref{12}) consist of two terms; the term in the form $e^{i(\mu - R_{n})t}$ ($e^{i(\mu+R_{n})t}$) describes the process that, energy is conserved (nonconserved). So we neglect the terms corresponding to nonconserving energy (in the rotating wave approximation). Under this condition, equations in (\ref{12}) change to
\begin{eqnarray}\lambdabel{14}
\frac{dX}{dt}&=&-i\frac{\alpha_{n}}{2}\ e^{-i(\mu-R_{n})t} Y, \nonumber\\\frac{d Y}{dt}&=&-i\frac{\alpha_{n}}{2}\ e^{i(\mu-R_{n})t}X.
\end{eqnarray}
By solving the above coupling equations, we obtain
\begin{eqnarray}\lambdabel{15}
c_{n,e}(t)&=& \left \lbrace c_{n,e}(0)\left (\cos(\Omega_{n}t)-i(R_{n}-\mu)\frac{\sin( \Omega_{n}t)}{2 \Omega_{n}}\right) -i\frac{\alpha_{n}}{2\Omega_{n}}\sin(\Omega_{n}t)c_{n+k,g}(0)\right\rbrace \nonumber\\
&\times& \exp [-i(\varphi_{n}+\mu/2)t],
\end{eqnarray}
\begin{eqnarray}\lambdabel{16}
c_{n+k,g}(t) &=& \left \lbrace c_{n+k,g}(0)\left (\cos(\Omega_{n}t)+i(R_{n}-\mu)\frac{\sin(\Omega_{n}t)}{2\Omega_{n}}\right)-i\frac{\alpha_{n}}{2\Omega_{n}}\sin(\Omega_{n}t)c_{n,e}(0)\right\rbrace \nonumber\\
&\times& \exp[-i(\varphi_{n}-\mu/2)t],
\end{eqnarray}
where
\begin{eqnarray}\lambdabel{17}
\varphi_{n}&=&\frac{\chi}{2}[n(n-1)f^{2}(n)f^{2}(n-1)+(n+k)(n+k-1)f^{2}(n+k)f^{2}(n+k-1)]\nonumber\\
&+&\frac{1}{2}[nf^{2}(n)\beta_{2}+(n+k)f^{2}(n+k)\beta_{1}].
\end{eqnarray}
and $\Omega_{n}=\frac{1}{2}\sqrt{(R_{n}-\mu)^{2}+\alpha_{n}^{2}}$ is the generalized Rabi frequency (note that $\alpha_{n}$ and $R_{n}$ are defined respectively in (\ref{10}) and (\ref{13})). It ought to be mentioned that, in equations (\ref{10}), (\ref{13}) and (\ref{17}), the values $\beta_{1}=0=\beta_{2}$ should be set for the case $k\neq2$.
In the above equations, $c_{n,e}(0)$ and $c_{n+k,g}(0)$ may be determined with the initial states of atom and field. In this work we suppose that, the atom is initially in the excited state ($|e\rangle $), however, the cavity field is considered to be initially in different states such as coherent state, squeezed state and thermal state which can be defined by their associated density operators as
\begin{equation}\lambdabel{18}
\rho_{CS}(0)= |\alpha\rangle \lambdangle\alpha|=e^{-\lambdangle n \rangle}\sum_{n,m=0}^{\infty}\frac{\alpha^{m}\alpha^{\ast n}}{\sqrt{m! n!}}|m\rangle\lambdangle n|
\end{equation}
\begin{equation}\lambdabel{19}
\rho_{SS}(0)=|\mathcal{x}i\rangle \lambdangle\mathcal{x}i|=\sum_{n,m=0}^{\infty}\frac{(\tanh r)^{n+m} \sqrt{(2m)!(2n)!}}{(2^{n}n!2^{m}m!)\cosh r} |2m\rangle\lambdangle 2n|,
\end{equation}
\begin{equation}\lambdabel{20}
\rho_{TS}(0)=\sum_{n=0}^{\infty}\frac{\lambdangle n\rangle^{n}}{(1+\lambdangle n\rangle)^{n+1}}|n\rangle\lambdangle n|.
\end{equation}
Therefore, by using each of the above initial field conditions we can find the explicit form of the solution of time dependent Schr\"{o}dinger equation. This enables one to analyze interesting properties such as atomic inversion and entropy squeezing, which will be done in the following sections.
It can clearly be seen that, setting $f(n)=1$ in the relations (\ref{15}), (\ref{16}) recovers the results of Ref. \cite{Abdalla M S}. As another important point, it is worth mentioning that, choosing different nonlinearity functions leads to different Hamiltonian systems and so, different physical results may be achieved. Altogether, in the continuation of this paper, we select particularly the intensity dependent coupling as $f(n)=\sqrt{n}$. This function is a favorite function for the authors who have worked in the nonlinear regime of atom-field interaction (see for instance \cite{Singh}, \cite{Huang}). In particular, Fink {\it et al} have explored a natural way that, this nonlinearity function will be appeared in physical systems \cite{Fink}.
| 3,299 | 16,765 |
en
|
train
|
0.163.3
|
\section{Atomic inversion}
The atomic inversion measures the difference in the populations of the two levels of the atom and plays a fundamental role in laser theory \cite{Wolfang P}. After determining $c_{n,e}(t)$ and $c_{n+k,g}(t)$ for the initial field states in (\ref{18}), (\ref{19}) and (\ref{20}), we can investigate this quantity which is given by
\begin{equation} \lambdabel{21}
W(t)=\sum_{n=0}^{\infty}(|c_{n,e}(t)|^{2}-|c_{n+k,g}(t)|^{2}).
\end{equation}
By inserting the equations (\ref{15}) and (\ref{16}) in (\ref{21}) for an arbitrary initial field state, we obtain
\begin{equation}\lambdabel{22}
W(t)=\sum_{n=0}^{\infty}\rho_{nn}(0)\left(\cos(2\Omega_{n} t)+(R_{n}-\mu)^{2}\frac{\sin^{2}(\Omega_{n} t)}{2 \Omega_{n}^{2}}\right),
\end{equation}
where $\rho_{nn}(0)=|c_{n}(0)|^{2}$. For the mentioned initial field states in (\ref{18}), (\ref{19}) and (\ref{20}) and one has:
\begin{equation}\lambdabel{221}
\rho^{CS}_{n,n}(0)= |c_n^{CS}(0)|^{2}= e^{-\lambdangle n \rangle}\frac{\lambdangle n \rangle ^{2 n}}{ n!},
\end{equation}
\begin{equation}\lambdabel{222}
\rho^{SS}_{2n,2n}(0)= |c_{2n}^{SS}(0)|^{2}=\frac{\lambdangle n\rangle^{n} (2n)!}{(2^{n}n!)^2(1+\lambdangle n\rangle)^{n+1/2}},\hspace{1cm} \rho^{SS}_{2n+1,2n+1}(0)= |c_{2n+1}^{SS}(0)|^{2}=0
\end{equation}
\begin{equation}\lambdabel{223}
\rho^{TS}_{n,n}(0)=\frac{\lambdangle n\rangle^n }{{(1+\lambdangle n\rangle)^{n+1}}},
\end{equation}
where $\lambdangle n\rangle$ for each of these states is given by
\begin{equation}\lambdabel{224}
\lambdangle n\rangle_{CS}=|\alpha|^{2},\hspace{0.5cm} \lambdangle n\rangle_{SS}=\sinh^{2}(r),\hspace{0.5cm}\lambdangle n\rangle_{TS}=\frac{1}{e^{\hbar\nu/k_{B}T}-1}.
\end{equation}\lambdabel{22,4}
From equation (\ref{22}) we can discuss the temporal evolution of the atomic inversion for different initial field situations. This will be presented in section 5 in detail.
\section{Entropy squeezing }
For a two-level atom, characterized by the Pauli operators $\sigma_{x}$, $\sigma_{y}$ and $\sigma_{z}$, the uncertainty relation for the information entropy is defined as follows \cite{Fang}
\begin{equation}\lambdabel{23}
\delta H(\sigma_{x})\delta H(\sigma_{y})\geq\frac{4}{\delta H(\sigma_{z})},\hspace{2cm}\delta H(\sigma_{\alpha})=\exp[H(\sigma_{\alpha})],
\end{equation}
where $ H(\sigma_{\alpha})$, as the information entropy of the operator $ \sigma_{\alpha}(\alpha=x,y,z)$, is given by
\begin{equation}\lambdabel{24}
H(\sigma_{\alpha})=-\sum_{i=1}^{2}P_{i}(\sigma_{\alpha})\ln P_{i}(\sigma_{\alpha}).
\end{equation}
Since for a two-level atom, the Pauli operators have two eigenvalues, one may expect that, $P_{i}(\sigma_{\alpha})$ denotes the
probability distribution of two possible outcomes of measurements of the operator $\sigma_{\alpha}$. Henceforth, it is defined as follows
\begin{equation}\lambdabel{25}
P_i (\sigma_{\alpha})= \lambdangle \mathcal{p}si_{\alpha_i}| \rho|\mathcal{p}si_{\alpha_i} \rangle ,
\end{equation}
where $\rho$ is the density operator of the system and $|\mathcal{p}si_{\alpha_i} \rangle$ is the eigenstate of the Pauli operators, i.e.,
\begin{equation}\lambdabel{26}
\sigma_{\alpha} | \mathcal{p}si_{\alpha_i} \rangle = \eta_{\alpha_i}| \mathcal{p}si_{\alpha_i} \rangle,\hspace{2cm}\alpha=x,y,z, \hspace{0.25cm} \hspace{.25cm} i=1,2.
\end{equation}
From equation (\ref{23}) the components $ \sigma_{\alpha}(\alpha=x,y)$ are said to be squeezed, if the information entropy $H(\sigma_{\alpha})$ of $\sigma_{\alpha}$ satisfies the inequality
\begin{equation}\lambdabel{27}
E(\sigma_{\alpha})=\delta H(\sigma_{\alpha})-\frac{2}{\sqrt{\delta H(\sigma_{z})}}<0,\hspace{2cm}\alpha=x \hspace{.25cm} or\hspace{.25cm} y.
\end{equation}
By using the equations (\ref{24}) and (\ref{25}) for the information entropies of the atomic operators $\sigma_{x}$, $\sigma_{y}$ and $\sigma_{z}$ we finally arrive at
\begin{eqnarray}\lambdabel{29}
H(\sigma_{x})=&-&\left[ \frac{1}{2}+Re(\rho_{ge}(t))\right] \ln\left[ \frac{1}{2}+Re(\rho_{ge}(t))\right]\nonumber\\&-&\left[ \frac{1}{2}-Re(\rho_{ge}(t))\right] \ln\left[ \frac{1}{2}-Re(\rho_{ge}(t))\right],
\end{eqnarray}
\begin{eqnarray}\lambdabel{30}
H(\sigma_{y})=&-&\left[ \frac{1}{2}+Im(\rho_{ge}(t))\right] \ln\left[ \frac{1}{2}+Im(\rho_{ge}(t))\right]\nonumber\\&-&\left[ \frac{1}{2}-Im(\rho_{ge}(t))\right] \ln\left[ \frac{1}{2}-Im(\rho_{ge}(t))\right],
\end{eqnarray}
\begin{eqnarray}\lambdabel{31}
H(\sigma_{z})=-\rho_{ee}(t) \ln\rho_{ee}(t)-\rho_{gg}(t)\ln\rho_{gg}(t).
\end{eqnarray}
By using the form of the wave function (\ref{7}), the density operator of the entire atom-field system at any time $t$ is given by
\begin{eqnarray}\lambdabel{28}
\hspace{-1cm}\rho_{\mathrm{atom-field}} =\sum_{n=0}^{\infty}\sum_{m=0}^{\infty} \lbrace c_{n,e}(t)c_{m,e}^{*}(t)|n,e\rangle \lambdangle e,m|+ c_{n+k,g}(t)c_{m+k,g}^{*}(t)|n+k,g\rangle \lambdangle g,m+k|\nonumber\\ \hspace*{-.41in} +c_{n,e}(t)c_{m+k,g}^{*}(t)|n,e\rangle \lambdangle g,m+k|+ c_{n+k,g}(t)c_{m,e}^{*}(t)|n+k,g\rangle \lambdangle e,m|\rbrace.
\end{eqnarray}
So, the necessary matrix elements of the reduced density operator in (\ref{29})-(\ref{31}) may be given in the following form
\begin{eqnarray}\lambdabel{32}
\rho_{ee}(t)=\sum_{n=0}^{\infty}|c_{n,e}(t)|^{2},
\end{eqnarray}
\begin{eqnarray}\lambdabel{33}
\rho_{eg}(t)=\sum_{n=0}^{\infty}c_{n+k,e}(t)c_{n+k,g}^{*}(t)=\rho_{ge}^{*}(t),
\end{eqnarray}
\begin{eqnarray}\lambdabel{34}
\rho_{gg}(t)=\sum_{n=0}^{\infty}|c_{n+k,g}(t)|^{2}.
\end{eqnarray}
By employing the above equations, we can study the temporal evolution of the entropy squeezing in terms of the variables $\sigma_{x}$ and $\sigma_{y}$, which will be done in the next section.
By using equation (\ref{22}) and replacing $\rho_{nn}(0)$ for different initial field states (coherent, squeezed and thermal states from (\ref {221}), (\ref{222}) and (\ref{223})), we can investigate the effects of the initial field state on the variation of the atomic inversion. Prior to everything it is necessary to select a particular nonlinearity function. As we mentioned previously, in this paper we choose $f(n)=\sqrt{n}$.
In all figures which are related to the atomic inversion $W(t)$, the left and the right plots respectively corresponds to the linear and nonlinear function.
All figures are drawn with particular values of $\mu=0.1$, $\lambdangle n \rangle=25$. Other used parameters are denoted in the related figure captions distinctly.
Figure 1 shows the temporal evolution of the atomic inversion in terms of the scaled time, for the functions $f(n)=\sqrt{n}$ and also $f(n)=1$ taking the ``coherent state'' in (\ref{18}) as the initial field state.
Figures 1(a) and 1(b) show the variation of the atomic inversion without Kerr and Stark effects.
The collapse and revival phenomena exist in these figures, but there is an increase in the number of fluctuations with regular behavior for the deformed case. Also, the amplitude of the fluctuation for this case is increased relative to $f(n)=1$. In other words, while we have partial revivals in the linear case, nearly complete revivals occur in the nonlinear regime.
To examine the effect of the Kerr medium on the behavior of the population inversion, figures 1(c) and 1(d) are plotted. Figure 1(d) which corresponds to $f(n)=\sqrt{n}$ shows a chaotic behavior of $W(t)$ around $0.99$, such that the amplitude of the fluctuations between maxima and minima of $W(t)$ are very small. Figure 1(c) indicates that, in the presence of the Kerr effect for the case $f(n)=1$, the result is very similar to figure 1(a). Altogether, if we use the value of $\chi$ larger (up to 0.03 \cite{Abdalla M S}) the Kerr effect will be visible.
| 2,740 | 16,765 |
en
|
train
|
0.163.4
|
In all figures which are related to the atomic inversion $W(t)$, the left and the right plots respectively corresponds to the linear and nonlinear function.
All figures are drawn with particular values of $\mu=0.1$, $\lambdangle n \rangle=25$. Other used parameters are denoted in the related figure captions distinctly.
Figure 1 shows the temporal evolution of the atomic inversion in terms of the scaled time, for the functions $f(n)=\sqrt{n}$ and also $f(n)=1$ taking the ``coherent state'' in (\ref{18}) as the initial field state.
Figures 1(a) and 1(b) show the variation of the atomic inversion without Kerr and Stark effects.
The collapse and revival phenomena exist in these figures, but there is an increase in the number of fluctuations with regular behavior for the deformed case. Also, the amplitude of the fluctuation for this case is increased relative to $f(n)=1$. In other words, while we have partial revivals in the linear case, nearly complete revivals occur in the nonlinear regime.
To examine the effect of the Kerr medium on the behavior of the population inversion, figures 1(c) and 1(d) are plotted. Figure 1(d) which corresponds to $f(n)=\sqrt{n}$ shows a chaotic behavior of $W(t)$ around $0.99$, such that the amplitude of the fluctuations between maxima and minima of $W(t)$ are very small. Figure 1(c) indicates that, in the presence of the Kerr effect for the case $f(n)=1$, the result is very similar to figure 1(a). Altogether, if we use the value of $\chi$ larger (up to 0.03 \cite{Abdalla M S}) the Kerr effect will be visible.
The effect of the Stark shift (in the presence of Kerr medium) can be seen for linear and nonlinear functions in figures 1(e) and 1(f). From figure 1(f), we observe that, the Stark shift increases the amplitude of the fluctuations as compared with figure 1(d). Also, this figure shows a chaotic behavior for $W(t)$ in the nonlinear regime.
Figure 1(e) shows the effect of the Stark shift (in the presence of Kerr medium) on the time variation of $W(t)$ for $f(n)=1$. One can see that, the temporal evolution of the atomic inversion reveals several revivals in the presence of both the Stark and Kerr effects.
Comparing figures 1(e) and 1(b) leads us to conclude that, the effect of the considered nonlinearity function (without the Kerr and Stark effects) is nearly equivalent to the Kerr and Stark effects in the linear case.
The effect of the detuning parameter $\mathcal{D}elta$ (defined as $\omega-k\nu=\mathcal{D}elta$), in the presence Kerr and Stark effects has been shown in figures 1(g) and 1(h). Comparing figure 1(g) with figure 1(e) indicates that the extremes of $W(t)$ (in the revivals) are regularly decreases for the linear system in our plotted figure (figure 1(g)). Altogether, $\mathcal{D}elta$ has a negligible effect in the presence of nonlinearity function (figure 1(h)).
We have plotted figure 2 taking into account the initial field as ``squeezed state" using (\ref{19}). Figures 2(a) and 2(b) show the time evolution of the atomic inversion for the linear and nonlinear function, in the absence of both Kerr and Stark effects. We can see from figure 2(a) that, $W(t)$ oscillates rapidly for the case $f(n)=1$, while in the presence of nonlinearity the behaviour of $W(t)$ is periodic (figure 2(b)). Figures 2(c) and 2(d) demonstrate the Kerr medium effect on the variation of $W(t)$ and in figures 2(e) and 2(f), we added the Stark shift, too. By comparison the figures 2(a) and 2(c), one finds that, the behavior of $W(t)$ for $f(n)=1$, with and without Kerr effect, are almost the same, however, for the linear case and in the presence of the Stark effect (figure 2(e)) the behavior of $W(t)$ is irregular. We examined the time evolution of the atomic inversion for the nonlinear case with different parameters in the right plots of figure 2. We can see a regular behavior for $W(t)$ in the absence of the Kerr medium, Stark effect and detuning (figure 2(b)).
But, the behavior of $W(t)$ in the figures 2(d), 2(f) and 2(h) is generally irregular.
Figures 2(g) and 2(h) show the effect of the detuning parameter for linear and nonlinear function. We can see partial revivals in the presence of detuning.
In figure 3, we assumed that the initial field is ``thermal state" which is defined in (\ref{20}) and again the effects of the Kerr medium, Stark shift and detuning are investigated on the behavior of $W(t)$. The evolution of the atomic inversion is shown for nonlinear and linear regimes, in the right and left plots of this figure, respectively.
We have shown the effect of Kerr medium in figure 3(c) for linear function, where one can see that, $W(t)$ is not so sensitive to the Kerr effect.
While in figure 3(b), the presence of nonlinearity without both Kerr and Stark effects, allows the (partial) collapses and revivals to be observed, the Kerr medium and Stark shift destroy the latter phenomena.
The above result seems to be in contrast to the linear case, i.e., the presence of Kerr and Stark shift effects can appear the (partial) collapses and revivals apparently.
We observe that the variation of $W(t)$ with $\mathcal{D}elta\neq0$ in the presence of the Kerr and Stark effects for linear and nonlinear function in figures 3(g) and 3(h), respectively. As in the previous states considered in this paper, unlike some changes in the numerical results, no qualitatively change can be observed. Altogether, generally in all three states discussed above, linear case is more sensitive to detuning parameter in comparison with nonlinear case.\\
In this part of the present section, we will analyze the temporal evolution of the entropy squeezing for different initial field states using the analytical results of section 4.
We will deal with nonlinear case with deformation function $f(n)=\sqrt{n}$ only. All figures are drawn with particular value of $\mu=0.1$. Other used parameters are denoted in the related figure captions distinctly.
Figures 4(a) and 4(b) display the time evolution of the entropy squeezing factors $E(\sigma_{x})$ and $E(\sigma_{y})$ if one concerns with the initial field as a ``coherent state" in (\ref {18}). It is obvious from these figures that, there exists entropy squeezing in $\sigma_{x}$ and $\sigma_{y}$ at some intervals of time.
In figures 4(c) and 4(d), we examine the influence of the Kerr effect on the evolution of the entropy squeezing for the variables $\sigma_{x}$ and $\sigma_{y}$ with the chosen parameters, respectively.
It is clear from these figures that, there is no squeezing in $\sigma _{x}$ and $\sigma_{y}$. Also, paying attention to figures 4(e) and 4(f) which are plotted in the presence of both Kerr and Stark effects, no squeezing can be seen in $\sigma_{x}$ and $\sigma_{y}$.
To study the effect of the initial mean photon number on the behavior of the entropy squeezing (with Kerr effect) figures 4(g) and 4(h) are plotted.
As is shown, by decrement the mean value of $ \lambdangle n \rangle $ from 25 to 1, the entropy squeezing for the variables $\sigma_{x}$ and $\sigma_{y}$ will be appeared in certain time ranges. The time evolution of the squeezing parameters $E(\sigma_{x})$ and $E(\sigma_{y})$ are shown in figure 5, for the field initially being in the ``squeezed state" in (\ref{19}). Specifically, in figures 5(a) and 5(b), the behavior of the squeezing $E(\sigma_{x})$ and $E(\sigma_{y})$ as a function of the scaled time in the absence of the Kerr and Stark effects have been shown. We see from these figures that, both $E(\sigma_{x})$ and $E(\sigma_{y})$ possess squeezing in the variables $\sigma_{x}$ and $\sigma_{y}$ when $ \lambdangle n \rangle =1$. It should be noticed that, according to our further calculations (not shown here) in this case (without Kerr and Stark effect), squeezing may be seen in the components $\sigma_{x}$ and $\sigma_{y}$ for $ \lambdangle n \rangle < 4$.
To investigate the effect of the Kerr medium, we have depicted the entropy squeezing $E(\sigma_{x})$ and $E(\sigma_{y})$ in terms of the scaled time in figures 5(c) and 5(d). $E(\sigma_{x})$ and $E(\sigma_{y})$ predict squeezing in the variables $\sigma_{x}$ and $\sigma_{y}$ on short time periods discontinuously.
A comparison of the figures 5(a), 5(b), 5(c) and 5(d) with similar figures for coherent state (figures 4(a), 4(b), 4(c) and 4(d)) shows that, while for the second set of figures the Kerr effect destroys the entropy squeezing completely, this is not so for the first set of figures.
We discuss the effects of Stark shift on the time evolution of the squeezing factors in figures 5(e) and 5(f). It is obvious from these figures that, there is no squeezing in the presence of the Kerr and Stark effects.
Figure 6 shows the time evolution of the entropy squeezing factors $E(\sigma_{x})$ and $E(\sigma_{y})$ for the case that, the field is initially prepared in the ``thermal state" in (\ref{20}). Figures 6(a) and 6(b) represent the entropy squeezing $E(\sigma_{x})$ and $E(\sigma_{y})$ in the absence of the Kerr and Stark effects. As is clear, squeezing in the components $\sigma_{x}$ and $\sigma_{y}$ exists at some intervals of time, obviously for different time intervals. Also, the depth of the entropy squeezing for $\sigma_{x}$ is larger than for $\sigma_{y}$.
We investigated the effect of the Kerr medium in figures 6(c) and 6(d). As is observed, squeezing may be occurred in the entropy squeezing factors $E(\sigma_{x})$ and $E(\sigma_{y})$ in a short range of time.
Finally, we examined the effect of the Stark shifts (when the Kerr effect is also in our consideration) in figures 6(e) and 6(f). In this case, there is no squeezing in $E(\sigma_{x})$ and $E(\sigma_{y})$.
| 2,677 | 16,765 |
en
|
train
|
0.163.5
|
\section{A discussion on the effect of three- and four-photon transitions}
We investigated the influence of one- and two-photon transitions on the temporal behaviour of atomic inversion and entropy squeezing in the previous sections.
In this section, we intend to discuss the effect of three- and four-photon processes on the time evolution of mentioned physical quantities in a general manner.
Obviously, adding all of the numerical results and related figures considering all quantities which concern with $k=3, 4$ will make the paper dramatically large.
Therefore, we present our obtained results qualitatively and make a comparison with the previous results for $k=1,2$. Clearly, due the numerous
parameters which are involved in the calculations, one can not reach a sharp result, so, our discussion is restricted to the particular used parameters.
According to our further calculations for $k=3$ and $k=4$ (not shown here), the following results have been extracted:
\begin{itemize}
\item The collapse and revival phenomena exist in a clear manner for three- and four-photon transitions in the linear regime ($f(n)=1$) when the filed is initially in the coherent state. As we observed, by increasing the number of photon transitions, the time interval between subsequent revivals will be decreased. In addition, the revival times turn shorter when the number of photon transition is increased. This result is in consistence with the outcome results of Ref. \cite{Kang} ([18] of RM).
Moreover, for $k=3,4$, no clear collapse-revival phenomenon is observed for the atom-field states in the linear regime ($f(n)=1$) which their initial field states are squeezed and thermal states.
\item The temporal behaviour of atomic inversion for $k=3$ and $k=4$ shows a chaotic behaviour for the nonlinear regime ($f(n)=\sqrt{n}$) in all cases.
As one may observe, when the initial field is thermal and coherent states, for the case $k=1$ in the absence of Kerr medium and detuning, the full collapse and revivals are revealed in the evolution of atomic inversion.
\item Our results show that, Kerr medium has a negligible effect on the time variation of atomic inversion for all cases with $k=3, 4$. For the detuning parameter
we observed that for the linear case with $k=3$, it has no critical effect for the coherent and thermal states as initial field, while for the squeezed initial state, the negative values of atomic inversion are considerably decreased, i.e., it gets positive values in main parts of time. The same statement will be weakly true for the case $k=4$.
\item In the nonlinear case ($f(n)=\sqrt{n}$), there is no (entropy) squeezing in $\sigma_{x}$ and $\sigma_{y}$ for $k=4$ with different initial field states and also, for $k=3$ with squeezed and thermal states as the initial states of the field. But, for $k=3$ in the absence of Kerr medium, squeezing exists in $\sigma_{y}$ in a very short intervals of times. In this case, there is no squeezing in $\sigma_{x}$, too.
\end{itemize}
\section{Summary and conclusion}
In this paper, we considered the full nonlinear interaction between a two-level atom with a nonlinear single-mode quantized field for $k$-photon transition in the presence of Kerr medium and Stark shift effect.
Also, we assumed that, the coupling between atom and field is time-dependent as well as intensity-dependent.
To the best of our knowledge, this problem in such a general form has not been considered in the literature up to now.
Fortunately, we could solve the dynamical problem and found the explicit form of the state vector of the whole atom-field system analytically.
We have considered the atom to be initially in the exited state and the field in three different possible states (coherent state, squeezed state and thermal state),
and then, the time variation of atomic inversion and entropy squeezing have been numerically studied and compared with each other. Even though our formalism can be used for any nonlinearity function, we particularly considered the nonlinearity function $f(n)=\sqrt{n}$ for our further numerical calculations. The obtained results are summarized as follow:\\
1. The temporal evolution of both atomic inversion and entropy squeezing is generally sensitive to the initial field state, but this fact is more visible for the atomic inversion in comparison with entropy squeezing. \\
2. The behavior of atomic inversion in the presence of nonlinearity (the right plots in all figures) is chaotic, except in some cases, i.e., figures 1(b), 2(b) and 3(b) which are plotted for the cases in which the Kerr and Stark effects are absent and the initial field state is coherent state, squeezed state and thermal state, respectively. As is observed, the collapse and revival phenomena are revealed in the figures 1(b) and 3(b).\\
3. The complete (partial) collapse and revival, as purely quantum mechanical features, are observed in the left plots of figure 1 (figure 3) corresponds to atomic inversion for initial coherent (thermal) state. \\
4. The detuning parameter has not a critical effect on atomic inversion, unless it causes some minor changes in the extremes of the investigated quantities, either with chaotic or collapse-revival behavior. \\
5. The variation of atomic inversion for different initial field states (coherent state, squeezed state and thermal state) shows that, the time dependent coupling leads to a time delaying which is twice the delay time for the time-independent case. This result is similarly to reported results in \cite{Abdalla M S}.\\
6. There is seen entropy squeezing in $\sigma_{x}$ and $\sigma_{y}$ at some intervals of time in some cases with different conditions, obviously for different time intervals, such that the uncertainty relation holds.\\
7. The presence of both Stark shift and Kerr medium simultaneously on the entropy squeezing for all cases (different initial field states) prevents the entropy squeezing to be occurred.\\
8. In the absence of Kerr medium, Stark shift, detuning and with constant coupling ($f(n)=1$), with considering the parameters which are used in Ref. \cite{Kang} ([18] of RM), our results recover the numerical results of Ref. \cite{Kang} successfully. In the absence of the mentioned effects with intensity-dependent but time-independent coupling ($f(n) =\sqrt{n}$, $\mu = 0$), our results are reduced to the ones reported in Ref. \cite{LiN}.\\
9. As previously mentioned, we nonlinearized the atom-field system which has been considered in \cite{Abdalla M S}. Consequently, as is expected, in the linear case ($f(n)=1$) the outcome results are the same as the results in this Ref.\\
Finally, we would like to mention that, our presented formalism has the potential ability to be applied for all well-known nonlinearity functions, such as the center of mass motion of trapped ion \cite{Vogel}, photon-added coherent states \cite{Agarwal,Sivakumar}, deformed photon-added coherent states \cite{Safaeian}, harmonious states \cite{Manko,Sudarshan}, $q$-deformed coherent states \cite{Naderi,Macfalane,Biedenharn,Chaichian} etc. We have not discussed the effect of the initial field photon number in detail, but it is obvious that, the results may be affected directly by this parameter, as well as all discussed parameters.
\end{document}
| 1,778 | 16,765 |
en
|
train
|
0.164.0
|
\begin{document}
{}_{\scriptscriptstyle(2)}itle{A Leray spectral sequence for noncommutative differential fibrations}
\author{Edwin Beggs\ \dag\ \ \&\ \ Ibtisam Masmali\ \ddag \\ \\
\dag\ College of Science, Swansea University, Wales \\
\ddag\ Jazan University, Saudi Arabia}
\title{A Leray spectral sequence for noncommutative differential fibrations}
\begin{abstract} This paper describes the Leray spectral sequence associated to a differential fibration. The differential fibration is described by base and total differential graded algebras. The cohomology used is noncommutative differential sheaf cohomology. For this purpose, a sheaf over an algebra is a left module with zero curvature covariant derivative. As a special case, we can recover the Serre spectral sequence for a noncommutative fibration.
\end{abstract}
\section{Introduction}
This paper uses the idea of noncommutative sheaf theory introduced in \cite{three}. This is a differential definition, so the algebras involved have to have a differential structure. Essentially
having zero derivative is used to denote `locally constant', which is a term of uncertain meaning for an algebra. Working rather vaguely, one might think of considering the total space of a sheaf over
a manifold as locally inheriting the differential structure of the manifold, via the homeomorphism between a neighbourhood of a point in the total space and an open set in the base space. This allows us to lift a vector at a point of the base space to a unique vector at every point of the preimage
of that point in the total space. This lifting should allow us to give a covariant derivative on the functions on the total space. Further, the local homeomorphisms suggest that the resulting covariant derivative has zero curvature. In \cite{three} it is shown that a zero curvature covariant derivative on a module really does allow us to reproduce some of the main results of sheaf cohomology.
In this paper we shall consider another of the main results of sheaf cohomology, the Leray
spectral sequence.
Ideally it would be nice to have a definition which did not involve differential structures, but there are several comments to be made on this:
When Connes calculated the cyclic cohomology of the noncommutative torus
\cite{ConnesIHES}, he used a subalgebra of rapidly decreasing sequences, effectively
placing differential methods at the heart of noncommutative cohomology. It is not obvious what a {}_{\scriptscriptstyle(2)}extit{calculable} purely algebraic (probably read $C^*$ algebraic) sheaf cohomology theory would be -- though maybe the theory of quantales \cite{MulQuant} might give a clue.
Secondly, even if there were a non-differential definition, it would likely be complementary to the differential definition. The relation between de Rham and topological cohomology theories is fundamental to a lot of mathematics, it would make no sense to delete either. Finally, in
mathematics today, differential graded algebras arising from
several constructions are considered interesting objects in their own right, and many applications to Physics are phrased in terms of differential forms or vector fields.
There are four main motivations behind this paper. One is that the Leray spectral sequence seems a natural continuation from the sheaf theory and Serre spectral sequence in \cite{three}. Another is a step in finding an analogue of the Borel-Weil-Bott theorem for representations of quantum groups
(see \cite{BWquant93}). One motivation we should look at in more detail is contained in the papers \cite{25,26}. These papers are about noncommutative fibrations. The differences in approach can be summarised in two sentences: We require that the algebras have differential structures, and \cite{25,26} do not. The papers \cite{25,26} require that the base is commutative, and we do not. One interesting point is that the method of \cite{26} makes use of the classical Leray spectral sequence
of a fibration with base a simplicial complex. The fourth motivation is noncommutative algebraic topology, where we would define a fibration on a category whose objects were differential graded algebras. The interesting question is then whether there is a corresponding idea of cofibration in the sense of model categories \cite{quillModel}.
The example of the noncommutative Hopf fibration in \cite{three} shows that a differential fibration need not have a commutative base.
The example in Section \ref{se1} was made by taking a differential picture of a fibration given as an example in \cite{25} (the base is the functions on the circle), and so it can be considered a noncommutative fibration in both senses. It would be useful to consider whether higher dimensional constructions, such as the 4-dimensional orthogonal quantum sphere in \cite{48}, also give examples of differential fibrations. As differential calculi on finite groups are quite well understood (e.g.\ see \cite{17,Ma:rief}), it would be interesting to ask what a differential fibration corresponds to in this context. From the point of view of methods in mathematical Physics, the quantisation of twistor theory
(see \cite{BraMa}) is likely to provide some examples.
This paper is based on part of the content of the Ph.D.\ thesis \cite{MasThesis}.
\section{Spectral sequences}
This is standard material, and we use \cite{11} as a reference. We will give quite general definitions, but likely not the most general possible.
\subsection{What is a spectral sequence?}
A spectral sequence consists of series of pages (indexed by $r$) and objects $ \mathcal{E}^{p,q}_{r}$ (e.g.\ vector spaces), where $r,p,q$ are integers. We take $r\geq 1$ and $p,q \geq 0$ , and set $ \mathcal{E}^{p,q}_{r} = 0$ if $p < 0$ or $q < 0$ . There is a differential $$\mathrm{d}_{r} : \mathcal{E}^{p,q}_{r} \longrightarrow \mathcal{E}^{p+r,q+1-r}_{r}$$
such that $\mathrm{d}_{r}\mathrm{d}_{r} = 0$.
As $\mathrm{d}_{r}\mathrm{d}_{r} = 0$, we can take a quotient (in our case, quotient of vector spaces) $$\frac{ \ker \, \mathrm{d}_{r} : \mathcal{E}^{p,q}_{r} \rightarrow \mathcal{E}^{p+r,q+1-r}_{r}}{\mathrm{im} \, \mathrm{d}_{r} : \mathcal{E}^{p-r,q+r-1}_{r} \rightarrow \mathcal{E}^{p,q}_{r}} = H^{p,q}_{r}$$ Then the rule for going from page $r$ to page $r+1$ is $ \mathcal{E}^{p,q}_{r+1} = H^{p,q}_{r}$.
The maps $d_{r+1}$ are given by a detailed formula on $H^{p,q}_{r}$.
The idea is that eventually the $ \mathcal{E}^{p,q}_{r}$ will become fixed for $r$ large enough.
The spectral sequence is said to converge to these limiting cases $ \mathcal{E}^{p,q}_{\infty}$ as $r$ increases.
\subsection{The spectral sequence of filtration}{{}_{\scriptscriptstyle(2)}riangleright}bel{a53}
A decreasing filtration of a vector space $V$ is a sequence of subspaces $F^m V$
($m\in\mathbb{N}$) for which $F^{m+1}V \subset F^m V$.
The reader should refer to \cite{11} for the details of the homological algebra used to construct the spectral sequence. We will merely quote the results.
\begin{remark}{{}_{\scriptscriptstyle(2)}riangleright}bel{sprem} Start with a differential graded module $C^n$ (for $n\ge 0$) and $ \mathrm{d} :C^n {}_{\scriptscriptstyle(2)}o C^{n+1}$ with $ \mathrm{d}^2=0$. Suppose that $C$ has a filtration $F^m C\subset C={}_{\scriptscriptstyle(1)}plus_{n\ge 0}C^n$ for $m\ge 0$ so that:\\
(1)\quad $ \mathrm{d} F^m C \subset F^m C$ for all $m\ge 0$ (i.e.\ the filtration is preserved by $ \mathrm{d}$); \\
(2)\quad $F^{m+1} C\subset F^m C$ for all $m\ge 0$ (i.e.\ the filtration is decreasing); \\
(3)\quad $F^0 C=C$ and $F^m C^n=F^m C\cap C^n=\{0\}$ for all $m>n$ (a boundedness condition). \\
Then there is a spectral sequence $(\mathcal{E}_r^{*,*}, \mathrm{d}_r)$ for $r\ge 1$ ($r$ counts the page of the spectral sequence) with $ \mathrm{d}_r$ of bidegree $(r,1-r)$ and
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{b7}
\mathcal{E}_1^{p,q} &=& H^{p+q}(F^pC/F^{p+1}C) \cr
&=& \frac{{\rm ker}\, \mathrm{d}:F^pC^{p+q}/F^{p+1}C^{p+q}{}_{\scriptscriptstyle(2)}o F^pC^{p+q+1}/F^{p+1}C^{p+q+1}}{{\rm im}\, \mathrm{d}:F^pC^{p+q-1}/F^{p+1}C^{p+q-1}{}_{\scriptscriptstyle(2)}o F^pC^{p+q}/F^{p+1}C^{p+q}}\ .
\end{eqnarray}
In more detail, we define
\begin{eqnarray*}
Z_{r}^{p,q} &=& F^{p} C^{p+q} \cap \mathrm{d}^{-1}(F^{p+r} C^{p+q+1})\ ,\cr
B_{r}^{p,q} &=& F^{p} C^{p+q} \cap \mathrm{d}(F^{p-r} C^{p+q-1})\ ,\cr
\mathcal{E}_{r}^{p,q} &=& Z_r^{p,q}/(Z_{r-1}^{p+1,q-1}+B_{r-1}^{p,q})\
.\end{eqnarray*}
The differential $ \mathrm{d}_{r}:\mathcal{E}_{r}^{p,q} {}_{\scriptscriptstyle(2)}o \mathcal{E}_{r}^{p+r,q-r+1}$ is the map induced on quotienting $ \mathrm{d}:Z_{r}^{p,q} {}_{\scriptscriptstyle(2)}o Z_{r}^{p+r,q-r+1}$. The diligent reader should remember an important point here, when reading the seemingly innumerable differentials in the pages to come. There is really only one differential $\mathrm{d}$ -- its domain or codomain may be different subspaces with different quotients applied, but the same $\mathrm{d}$ lies behind them all.
The spectral sequence converges to $H^*(C, \mathrm{d})$ in the sense that
\begin{eqnarray*}\mathcal{E}_\infty^{p,q} \cong \frac{F^p H^{p+q}(C, \mathrm{d})}{F^{p+1}H^{p+q}(C, \mathrm{d})}\ ,
\end{eqnarray*}
where $F^p H^*(C, \mathrm{d})$ is the image of the map $H^*(F^p C, \mathrm{d}){}_{\scriptscriptstyle(2)}o H^*(C, \mathrm{d})$ induced by inclusion $F^p C{}_{\scriptscriptstyle(2)}o C$.
\end{remark}
\subsection{The classical Leray spectral sequence}
The statement of the general Leray spectral sequence can be found in \cite{28}. We shall omit the supports and the subsets as we are only currently interested in a non commutative analogue of the spectral sequence.
Then the statement reads that, given $f : X \rightarrow Y$ and $\mathcal{S}$ a sheaf on $X$, that there is a spectral sequence$$E^{pq}_{2} = H^{p}(Y, H^{q}(f,f \vert \mathcal{S}))$$ converging to $H^{p+q}(X,\mathcal{S})$.
Here $H^{q}(f,f \vert \mathcal{S}) $ is a sheaf on $Y$ which is given by the presheaf for an open $U \subset Y$ $$U\longmapsto H^{q}(f^{-1}U; \mathcal{S} \vert_{f^{-1}U}).$$
Here $f^{-1}U$ is an open set of $X$, and $\mathcal{S} \vert_{f^{-1}U}$ is the sheaf $\mathcal{S}$ restricted to this open set.
We shall consider the special case of a differential fibration. This is the background to the Serre spectral sequence, but we consider a sheaf on the total space.
The Leray spectral sequence of a fibration is a spectral sequence whose input is the cohomology of the base space $B$ with coefficients in the cohomology of the fiber $F$, and converges to the cohomology of the total space $E$. Here$$\pi : E \rightarrow B$$ is a fibration with fiber $F$. The difference of this from the Serre spectral sequence is that the cohomology may have coefficients in a sheaf on $E$.
| 3,321 | 20,916 |
en
|
train
|
0.164.1
|
\section{Noncommutative differential calculi and sheaf theory}
Take a possibly noncommutative algebra $A$. Then a differential calculus $(\Omega^*A,\mathrm{d})$
is given by the following.
\begin{defin}{{}_{\scriptscriptstyle(2)}riangleright}bel{anwar}
A differential calculus $(\Omega^*A,\mathrm{d})$ on $A$
consists of vector spaces $\Omega^{n}A$ with operators $\wedge$ and $\mathrm{d}$ so that \\
1) $\wedge : \Omega^r A {}_{\scriptscriptstyle(1)}times \Omega^m A \longrightarrow \Omega^{r+m} A$ is associative (we do not assume any graded commutative property) \\
2) $\Omega^0 A = A $ \\
3) $\mathrm{d} : \Omega^n A \rightarrow \Omega^{n+1}A $ with $\mathrm{d}^2 =0$ \\
4) $\mathrm{d}(\xi \wedge \eta ) = \mathrm{d}\xi \wedge \eta + (-1)^r \xi \wedge \mathrm{d}\eta$ for $\xi \in \Omega^r A$ \\
5) $\Omega^1 A \wedge \Omega^n A = \Omega^{n+1} A$ . \\
6) $A.\mathrm{d} A = \Omega^{1}A$
\end{defin}
Note that many differential graded algebras do not obey (5), but those in classical differential geometry do, and it will be true in all our examples. There is only one place where we require (5), and we will point it out at the time.
A special case of $\wedge$
shows that each $\Omega^n A$ is an $A$-bimodule.
We will often use $\vert \xi \vert$ for the degree of $ \xi $, if $\xi \in \Omega^{n}A$, then $\vert \xi \vert = n$.
In the differential graded $(\Omega^{n} A,\wedge,\mathrm{d})$, we have $\mathrm{d}^{2} = 0$. This means that
$$\mathrm{im} \, \mathrm{d} : \Omega^{n-1} A \longrightarrow \Omega^{n} A \subset \ker \, \mathrm{d} : \Omega^{n} A \longrightarrow \Omega^{n+1} A\ .$$
Then we define the de Rham cohomology as
$$H^{n}_{\mathrm{dR}}(A)\ =\ \frac{\ker \, \mathrm{d} : \Omega^{n} A \longrightarrow \Omega^{n+1} A}{\mathrm{im} \, \mathrm{d} : \Omega^{n-1} A \longrightarrow \Omega^{n} A}\ .$$
We give the usual idea of covariant derivatives on left $A$ modules by using the left Liebnitz rule:
\begin{defin}{{}_{\scriptscriptstyle(2)}riangleright}bel{de1}
Given a left A-module E , a left A-covariant derivative is a map $\nabla : E \rightarrow \Omega^1 A {}_{\scriptscriptstyle(1)}times_{A} E$ which obeys the condition $\nabla ( a.e) = \mathrm{d}a {}_{\scriptscriptstyle(1)}times e + a . \nabla e $ for all $e \in E$ and $a \in A$.
\end{defin}
After the fashion of the de-Rham complex, we can attempt to extend the covariant derivative to a complex as follows:
\begin{defin}\cite{three}
Given $(E,\nabla )$ a left $A$-module with covariant derivative, define $$ \nabla^{[n]} : \Omega^{n} A {}_{\scriptscriptstyle(1)}times_{A} E \rightarrow \Omega^{n+1} A {}_{\scriptscriptstyle(1)}times_{A} E , \quad {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times e \mapsto \mathrm{d}{}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times e + (-1)^{n} {}_{\scriptscriptstyle(1)}mega \wedge \nabla e .$$ Then the curvature is defined as $R = \nabla^{[1]} \nabla : E \rightarrow \Omega^{2} A {}_{\scriptscriptstyle(1)}times E$, and is a left A-module map.
The covariant derivative is called flat if the curvature is zero.
\end{defin}
However, the curvature forms an obstruction to setting up a cohomology, as we now show:
\begin{propos}\cite{three}
For all $n \geq 0$, $\nabla^{[n+1]} \circ \nabla^{[n]} = \mathrm{id} \wedge R : \Omega^{n} A {}_{\scriptscriptstyle(1)}times_{A} E \rightarrow \Omega^{n+2} A {}_{\scriptscriptstyle(1)}times_{A} E .$
\end{propos}
We can now use this in a definition of a noncommutative sheaf \cite{three}.
\begin{defin}{{}_{\scriptscriptstyle(2)}riangleright}bel{key49}\cite{three}
Given $(E,\nabla )$ a left $A$-module with covariant derivative and zero curvature, define $H^*(A ; E, \nabla )$ to be the cohomology of the cochain complex $$E {}^{\scriptscriptstyle[2]}ackrel{\nabla} \longrightarrow \Omega^{1} A {}_{\scriptscriptstyle(1)}times_{A}E
{}^{\scriptscriptstyle[2]}ackrel{\nabla^{[1]}} \longrightarrow \Omega^{2} A {}_{\scriptscriptstyle(1)}times_{A} E {}^{\scriptscriptstyle[2]}ackrel{\nabla^{[2]}}\longrightarrow ........$$ Note that $ H^{0}(E , \nabla ) = \{ e \in E : \nabla e = 0 \}$, the flat sections of $E$. We will often write $H^* (A ;E)$ where there is no danger of confusing the covariant derivative .
\end{defin}
We will take this opportunity to make a couple of well known
statements about modules over algebras which we will use, as it may make the reading later easier for non-experts (see e.g.\ \cite{44}) .
\begin{defin} A right A-module $E$ is flat if every short exact sequence of left A-modules $$0 \longrightarrow L \longrightarrow M \longrightarrow N \longrightarrow 0$$
gives another short exact sequence $$0 \longrightarrow E{}_{\scriptscriptstyle(1)}times_{A} L \longrightarrow E{}_{\scriptscriptstyle(1)}times_{A} M \longrightarrow E{}_{\scriptscriptstyle(1)}times_{A} N \longrightarrow 0.$$
Similarly, a left A-module $F$ is called flat if $-\mathop{{}_{\scriptscriptstyle(1)}times}_A F$ preserves exactness
of short sequences of right modules.
\end{defin}
\begin{lemma}{{}_{\scriptscriptstyle(2)}riangleright}bel{raneem}
Given two short exact sequences of modules (left or right),
\begin{eqnarray*}
&& 0 \longrightarrow U {}^{\scriptscriptstyle[2]}ackrel{t}\longrightarrow V {}^{\scriptscriptstyle[2]}ackrel{f}\longrightarrow W \longrightarrow 0 \ ,\cr
&& 0 \longrightarrow U {}^{\scriptscriptstyle[2]}ackrel{t}\longrightarrow V {}^{\scriptscriptstyle[2]}ackrel{g}\longrightarrow X \longrightarrow 0\ ,
\end{eqnarray*}
there is an isomorphism $h : W \longrightarrow X $ given by $h(w) = g(v)$, where $f(v) = w$.
\end{lemma}
\section{Differential fibrations and the Serre spectral sequence}
\subsection{A simple differential fibration}{{}_{\scriptscriptstyle(2)}riangleright}bel{a52}
The reader may take this section as a justification of why the definition of a noncommutative
differential fibration which we will give in Definition \ref{b61} is reasonable.
Take a trivial fibration $\pi:\mathbb{R}^n {}_{\scriptscriptstyle(2)}imes \mathbb{R}^m {}_{\scriptscriptstyle(2)}o \mathbb{R}^n$ given by
$$(x_{1} , . . . .,x_{n},y_{1} , . . . .,y_{m}) \longmapsto (x_{1} , . . . .,x_{n})\ .$$
Here the base space is $B = \mathbb{R}^n$, the fiber is $\mathbb{R}^m$, and the total space is $E = \mathbb{R}^{n+m}$.
We can write a basis for the differential forms on the total space, putting the $B$ terms (the $\mathrm{d} x_{i}$) first. A form of degree $p$ in the base and $q$ in the fiber (total degree $p+q$) is
$$\mathrm{d} x_{\iota_{1}} \wedge . . . .\wedge \mathrm{d} x_{\iota_{p}} \wedge \mathrm{d} y_{j_{1}} \wedge . . . .\wedge \mathrm{d} y_{j_{q}}\ ,$$
for example $\mathrm{d} x_{2} \wedge \mathrm{d} x_{4} \wedge \mathrm{d} y_{1} \wedge \mathrm{d} y_{7} \wedge \mathrm{d} y_{9}$,
If we have the projection map $\pi : E \longrightarrow B$, we can write our example form as
$$\alpha \ =\ \pi^*(\mathrm{d} x_{2} \wedge \mathrm{d} x_{4}) \wedge (\mathrm{d} y_{1} \wedge \mathrm{d} y_{7} \wedge \mathrm{d} y_{9})$$
so we have a form in $\pi^* \Omega^2 B \wedge \Omega^3 E$. Another element of $\pi^* \Omega^2 B \wedge \Omega^3 E$ might be
$$\beta\ =\ \pi^*(\mathrm{d} x_{2} \wedge \mathrm{d} x_{4}) \wedge (\mathrm{d} x_{3} \wedge \mathrm{d} y_{1} \wedge \mathrm{d} y_{_{7}}).$$
Note, we now just look at $\Omega^3 E$, not the forms in the fiber direction, as in the noncommutative case we will not know (at least in the begining) what the fiber is. We need to describe the forms on the fiber space more indirectly.
Now look at the vector space quotient
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{vcfhgmk}
\frac{\pi^* \Omega^2 B \wedge \Omega^3 E}{\pi^* \Omega^3 B \wedge \Omega^2 E}\ .
\end{eqnarray}
Here $\beta$ is also an element of the bottom line of (\ref{vcfhgmk}), as we could write
$$\beta = \pi^*(\mathrm{d} x_{2} \wedge \mathrm{d} x_{4} \wedge \mathrm{d} x_{3}) \wedge (\mathrm{d} y_{1} \wedge \mathrm{d} y_{_{7}})$$
so, denoting the quotient by square brackets, $[\beta] = 0$. On the other hand, $\alpha$ is not in the bottom line of (\ref{vcfhgmk}), so $[\alpha] \neq 0$. We can now use $$\frac{\pi^* \Omega^p B \wedge \Omega^q E}{\pi^* \Omega^{p+1} B \wedge \Omega^{q-1} E}$$
to denote the forms on the total space which are of degree $p$ in the base and degree $q$ in the fiber, without explicitly having any coordinates for the fiber. This is just the idea of a
noncommutative differential fibration.
\subsection{Noncommutative differential fibrations}
In Subsection \ref{a52} we had a topological fibration $\pi:\mathbb{R}^{m+n}{}_{\scriptscriptstyle(2)}o \mathbb{R}^n$.
For algebras, we will reverse the arrows, and look at $ \iota : B \rightarrow A$, where $B$ is the `base algebra' and $A$ is the `total algebra'.
Suppose that both $A$ and $B$ have differential calculi, and that the algebra map $\iota : B \rightarrow A$
is differentiable. This means that $\iota : B \rightarrow A$ extends to a map of differential graded algebras
$\iota_* : \Omega^*B \rightarrow \Omega^*A$, and in particular that $\mathrm{d}\,\iota_*=\iota_*\,\mathrm{d}$
and $\iota_*\,\wedge=\wedge\,(\iota_*\mathop{{}_{\scriptscriptstyle(1)}times} \iota_*)$.
Now we set
\begin{eqnarray} {{}_{\scriptscriptstyle(2)}riangleright}bel{cvhgsuv}
D_{p,q} = \iota_{*} \Omega^{p} B \wedge \Omega^{q} A\quad\mathrm{and} \quad
N_{p,q} = \frac{D_{p,q}}{D_{p+1,q-1}}\ ,\quad N_{p,0} \,=\, \iota_*\Omega^{p} B.A\ .
\end{eqnarray}
Now we can finally define a differential fibration, remembering that we use $[\ ]$ to denote equivalence class in the quotient in (\ref{cvhgsuv}):
\begin{defin}{{}_{\scriptscriptstyle(2)}riangleright}bel{b61}
$\iota : B \longrightarrow A$ is a differential fibration if the map$$\xi {}_{\scriptscriptstyle(1)}times [x] \longrightarrow [\iota_{*} \xi \wedge x]$$ gives an isomorphism from $\Omega^{p}B {}_{\scriptscriptstyle(1)}times_{B} N_{0,q}$ to $N_{p,q}$ for all $p,q\ge 0$.
\end{defin}
\begin{example}{{}_{\scriptscriptstyle(2)}riangleright}bel{f2}
(See section 8.5 of \cite{three}.) Given the left covariant calculus on the quantum group $SU_{q}(2)$ given by Woronowicz \cite{worondiff}, the corresponding differential calculus on the quantum sphere $S^{2}_{q}$ gives a differential fibration $$\iota : S^{2}_{q} \longrightarrow SU_{q}(2)\ .$$Here the algebra $SU_{q}^{2}$ is the invariants of $SU_{q}(2)$ under a circle action, and $\iota$ is just the inclusion.
\end{example}
\noindent We will give another example in Section \ref{se1}. Now we have the following version of the Serre spectral sequence from \cite{three}.
\begin{theorem}
Suppose that $\iota : B \rightarrow X$ is a differential fibration. Then there is a spectral sequence converging to $H^{*}_{dR}(A)$ with $$E^{p,q}_{2} \cong H^{p}(B ; H^{q}(N_{0,*} ), \nabla )\ .$$
\end{theorem}
Here $\nabla$ is a zero curvature covariant derivative on the left $B$-modules $N_{0,n}$, whose construction we will not go further into, as we are about to something more general.
| 3,693 | 20,916 |
en
|
train
|
0.164.2
|
\section{The noncommutative Leray spectral sequence}
\subsection{A filtration of a cochain complex}{{}_{\scriptscriptstyle(2)}riangleright}bel{a54}
We suppose that $E$ is a left A module, with a left covariant derivative $$\nabla : E \longrightarrow \Omega^1 A {}_{\scriptscriptstyle(1)}times_{A}E$$
and that this covariant derivative is flat, i.e.\ that its curvature vanishes. Then $\nabla^{[n]} : \Omega^{n}A{}_{\scriptscriptstyle(1)}times_{A}E \longrightarrow \Omega^{n+1}A {}_{\scriptscriptstyle(1)}times_{A}E$ is a cochain complex (see definition \ref{key49}).
Suppose that $\iota_* : \Omega^*B \longrightarrow \Omega^*A$ is a map of differential graded algebras. We define a filtration of $\Omega^{n}A {}_{\scriptscriptstyle(1)}times_{A}E$ by
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{rana}
F^m(\Omega^{n}A {}_{\scriptscriptstyle(1)}times_{A} E) = \left\{ \begin{array}{ll}
\iota_{*} \Omega^m B \wedge \Omega^{n-m} A {}_{\scriptscriptstyle(1)}times_{A} E & \mbox{ $0 \leq m \leq n$};\\
0 & \mathrm{otherwise}.\end{array} \right.
\end{eqnarray}
\begin{propos}
The filtration in (\ref{rana}) satisfies the conditions of remark \ref{sprem}.\\
{}_{\scriptscriptstyle(2)}extbf{Proof:}
First \quad $ F^0 (\Omega^n A {}_{\scriptscriptstyle(1)}times_{A} E) = \iota_{*}\Omega^0 B \wedge \Omega^n A {}_{\scriptscriptstyle(1)}times_{A} E,$
but $1 \in \iota_{*} \Omega^0 B = \iota_{*}B$, so $ F^0 (\Omega^n A {}_{\scriptscriptstyle(1)}times_{A} E) = \Omega^n A {}_{\scriptscriptstyle(1)}times_{A} E.$.
To show it is decreasing, (using condition (5) from definition \ref{anwar})
\begin{eqnarray*}
F^{m+1} (\Omega^n A {}_{\scriptscriptstyle(1)}times_{A} E) &=& \iota_{*}\Omega^{m+1} B \wedge \Omega^{n-m-1} A {}_{\scriptscriptstyle(1)}times_{A} E \cr
&=& \iota_{*} \Omega^m B \wedge (\iota_{*} \Omega^{1} B \wedge \Omega^{n-m-1}A) {}_{\scriptscriptstyle(1)}times_{A} E\cr
&\subset & \iota_{*} \Omega^m B \wedge \Omega^{n-m}A {}_{\scriptscriptstyle(1)}times_{A} E \cr
&\subset & F^m (\Omega^n A {}_{\scriptscriptstyle(1)}times_{A} E)\ .
\end{eqnarray*}
To show that the filtration is preserved by $\mathrm{d}$, take $\iota_{*}
\xi \wedge \eta {}_{\scriptscriptstyle(1)}times e \in F^m (\Omega^n A {}_{\scriptscriptstyle(1)}times_{A} E)$
where $\xi \in \Omega^m B$, and $\eta \in \Omega^{n-m} A$. Then
$$\mathrm{d}(\iota_{*} \xi \wedge \eta {}_{\scriptscriptstyle(1)}times e) = \iota_{*} \mathrm{d}\xi \wedge \eta {}_{\scriptscriptstyle(1)}times e + (-1)^m \iota_{*} \xi \wedge \mathrm{d}\eta {}_{\scriptscriptstyle(1)}times e +(-1)^n \iota_{*} \xi \wedge \eta \wedge \nabla e$$
This is in $F^m C$, as the first term is in $F^{m+1}C \subset F^m C$, and the other two are in $F^m C$.\quad $\square$
\end{propos}
Now we have a spectral sequence which converges to $H^{*}_{d R}(A ; E)$. All we have to do is to find the first and second pages of the spectral sequence, though this is quite lengthy.
| 1,063 | 20,916 |
en
|
train
|
0.164.3
|
\subsection{The first page of the spectral sequence}{{}_{\scriptscriptstyle(2)}riangleright}bel{first}
From section \ref{a53}, to use the filtration in section \ref{a54} we need to work with
\begin{eqnarray} {{}_{\scriptscriptstyle(2)}riangleright}bel{cbdhsiouv}
M_{p,q} = \frac{F^p C^{p+q}}{F^{p+1} C^{p+q}} = \frac{\iota_{*}\Omega^p B \wedge \Omega^q A {}_{\scriptscriptstyle(1)}times_{A} E}{\iota_{*}\Omega^{p+1} B \wedge \Omega^{q-1} A {}_{\scriptscriptstyle(1)}times_{A} E}
\end{eqnarray}
Then we look, for $p$ fixed (following (\ref{b7})), at the sequence
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{b60}
\cdots M_{p,q-1} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow M_{p,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow M_{p,q+1} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow \cdots
\end{eqnarray}
as the cohomology of this sequence gives the first page of the spectral sequence.
Denote the quotient in $M_{p,q}$ by $[ \quad ]_{p,q}$, so if $x \in \iota_{*} \Omega^{p}B \wedge \Omega^{q} A {}_{\scriptscriptstyle(1)}times_{A} E$, then $[x]_{p,q} \in M_{p,q}$.
Then we have a map of left $B$ modules $$\Omega^{p}B {}_{\scriptscriptstyle(1)}times_{B} M_{0,q} \longrightarrow M_{p,q}\ ,\quad \xi {}_{\scriptscriptstyle(1)}times [y]_{0.q} \longmapsto [\iota_{* }\xi \wedge y]_{pq}.$$
Here $y \in \Omega^{q} A {}_{\scriptscriptstyle(1)}times_{A} E$ and the left action of $b \in B$ on $y$ is $\iota(b)y$.
\begin{propos}{{}_{\scriptscriptstyle(2)}riangleright}bel{aa66}
If $E$ is flat as a left A module, then $N_{p,q} {}_{\scriptscriptstyle(1)}times_{A} E \cong M_{p,q}$ with isomorphism $[z] {}_{\scriptscriptstyle(1)}times e \longmapsto [z {}_{\scriptscriptstyle(1)}times e]_{p,q}$.\end{propos}
{}_{\scriptscriptstyle(2)}extbf{Proof:} We have, by definition, a short exact sequence
using notation from (\ref{cvhgsuv}), where $\mathrm{inc}$ is inclusion and $[\, \,]$ is quotient, $$0 \longrightarrow D_{p+1,q-1} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{inc}}\longrightarrow D_{p,q} {}^{\scriptscriptstyle[2]}ackrel{[\, \,]}\longrightarrow N_{p,q} \longrightarrow 0.$$
As $E$ is flat, we get another short exact sequence, $$0 \longrightarrow D_{p+1,q-1} {}_{\scriptscriptstyle(1)}times_{A} E {}^{\scriptscriptstyle[2]}ackrel{\mathrm{inc} {}_{\scriptscriptstyle(1)}times \mathrm{id}}\longrightarrow D_{p,q} {}_{\scriptscriptstyle(1)}times_{A} E {}^{\scriptscriptstyle[2]}ackrel{[\, \,] {}_{\scriptscriptstyle(1)}times \mathrm{id}}\longrightarrow N_{p,q} {}_{\scriptscriptstyle(1)}times_{A} E \longrightarrow 0$$ but by definition we also have $$0 \longrightarrow D_{p+1,q-1} {}_{\scriptscriptstyle(1)}times_{A} E {}^{\scriptscriptstyle[2]}ackrel{\mathrm{inc} {}_{\scriptscriptstyle(1)}times \mathrm{id}}\longrightarrow D_{p,q} {}_{\scriptscriptstyle(1)}times_{A} E {}^{\scriptscriptstyle[2]}ackrel{[\, \,]_{p,q}}\longrightarrow M_{p,q} \longrightarrow 0.$$and the result follows from Lemma \ref{raneem}.\quad $\square$
\begin{propos}{{}_{\scriptscriptstyle(2)}riangleright}bel{g2}
If $E$ is a flat left A module, and $\iota : B \longrightarrow A$ is a fibering in the sense of definition \ref{b61}, then $$\Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} N_{0,q} {}_{\scriptscriptstyle(1)}times_{A} E \cong M_{p,q} $$ via the map
$$ \xi {}_{\scriptscriptstyle(1)}times [x] {}_{\scriptscriptstyle(1)}times e \longmapsto [\iota_{*} \xi \wedge x {}_{\scriptscriptstyle(1)}times e ]_{p,q}.$$
{}_{\scriptscriptstyle(2)}extbf{Proof:} Definition \ref{b61} gives an isomorphism
$$\Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} N_{0,q} \longrightarrow N_{p,q}$$
by $ \xi {}_{\scriptscriptstyle(1)}times [x] \longmapsto [\iota_{*} \xi \wedge x].$
Now use Proposition \ref{aa66}. \quad $\square$
\end{propos}
We now return to the problem of calculating the cohomology of the sequence (\ref{b60}). Take
$\xi {}_{\scriptscriptstyle(1)}times [x] {}_{\scriptscriptstyle(1)}times e \in \Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} N_{0,q} {}_{\scriptscriptstyle(1)}times_{A}E$
(for $x \in \Omega^{q}A)$)
which maps to $[\iota_{*} \xi \wedge x {}_{\scriptscriptstyle(1)}times e] \in M_{p,q}$, and apply the differential $\nabla^{[p+q]}$ to it to get
\begin{eqnarray}
&&d(\iota_{*} \xi \wedge x) {}_{\scriptscriptstyle(1)}times e + (-1)^{p+q}\ \iota_{*} \xi \wedge x \wedge \nabla e \cr
&=& \iota_{*} \mathrm{d}\xi \wedge x {}_{\scriptscriptstyle(1)}times e + (-1)^{p}\ \iota_{*} \xi \wedge \mathrm{d} x {}_{\scriptscriptstyle(1)}times e + (-1)^{p+q}\ \iota_{*} \xi \wedge x \wedge \nabla e\ .
\end{eqnarray}
But $\mathrm{d}\xi \in \Omega^{p+1} B$, and $$M_{p,q+1} = \frac{\iota_{*} \Omega^{p}B \wedge \Omega^{q+1} A {}_{\scriptscriptstyle(1)}times_{A}E}{\iota_{*} \Omega^{p+1}B \wedge \Omega^{q} A {}_{\scriptscriptstyle(1)}times_{A}E}\ ,$$
so the first term vanishes on applying $[\quad]_{p,q+1}$. Then
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{g1}
\mathrm{d}[\iota_{*} \xi \wedge x {}_{\scriptscriptstyle(1)}times e]_{p,q} = (-1)^{p}[\iota_{*} \xi \wedge (\mathrm{d} x {}_{\scriptscriptstyle(1)}times e +(-1)^{q}x \wedge \nabla e)]_{p,q+1}
\end{eqnarray}
Then, using Proposition \ref{g2}, we have an isomorphism
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{b1}
\Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} M_{0,q} \cong M_{p,q} \ ,\quad \xi {}_{\scriptscriptstyle(1)}times [y]_{0,q} \longmapsto [\iota_{*} \xi \wedge y]_{p,q}\ ,
\end{eqnarray}
and using this isomorphism, $\mathrm{d}$ on $M_{p,q}$ can be written as (see \ref{g1})
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{g4}
\mathrm{d}( \xi {}_{\scriptscriptstyle(1)}times [y]_{0,q} ) = (-1)^{p} \xi {}_{\scriptscriptstyle(1)}times [\nabla^{[q]} y]_{0,q+1}
\end{eqnarray}
where $y \in \Omega^{q} A {}_{\scriptscriptstyle(1)}times_{A} E$. From (\ref{g4}) we see that we should study $[\nabla^{[q]}] : M_{0,q} \longrightarrow M_{0,q+1}$, defined by $[y]_{0,q} \longmapsto [\nabla^{[q]} y]_{0,q+1}$.
\begin{propos} {{}_{\scriptscriptstyle(2)}riangleright}bel{cvauiuy}
$[\nabla^{[q]} ] : M_{0,q} \longrightarrow M_{0,q+1}$ is a left $B$ module map. The module structure is $b . [\eta {}_{\scriptscriptstyle(1)}times e] = [i(b) \eta {}_{\scriptscriptstyle(1)}times e ]$, for $b \in B$ and $\eta {}_{\scriptscriptstyle(1)}times e \in \Omega^{q} A {}_{\scriptscriptstyle(1)}times_{A} E$.
\end{propos}
{}_{\scriptscriptstyle(2)}extbf{Proof:} First,
\begin{eqnarray*}
[\nabla^{[q]}](b .[\eta {}_{\scriptscriptstyle(1)}times e]_{0,q}) &=& [\mathrm{d}(i(b)\eta ) {}_{\scriptscriptstyle(1)}times e+(-1)^{q}i(b)\eta \wedge \nabla e]_{0,q+1}\cr
&=& [\iota_{*}(\mathrm{d} b)\wedge \eta {}_{\scriptscriptstyle(1)}times e+i(b).\mathrm{d}\eta {}_{\scriptscriptstyle(1)}times e+(-1)^{q}i(b) \eta \wedge \nabla e]_{0,q+1}
\end{eqnarray*}
Now $$\iota_{*}(\mathrm{d} b)\wedge \eta {}_{\scriptscriptstyle(1)}times e \in \iota_{*} \Omega^{1} B \wedge \Omega^{q}A {}_{\scriptscriptstyle(1)}times_{A}E$$
so $[\iota_{*}(\mathrm{d} b)\wedge \eta {}_{\scriptscriptstyle(1)}times e]_{0,q+1} = 0$ in $M_{0,q+1}$. Then
\begin{eqnarray*}
[\nabla^{[q]}](b . [\eta {}_{\scriptscriptstyle(1)}times e]_{0,q})& =& [i(b).\mathrm{d}\eta {}_{\scriptscriptstyle(1)}times e+(-1)^{q}i(b) \eta \wedge \nabla e]_{0,q+1} \cr
&=&b .[\mathrm{d}\eta {}_{\scriptscriptstyle(1)}times e + (-1)^{q} \eta \wedge\nabla e]_{0,q+1}. \quad \square
\end{eqnarray*}
\begin{propos}{{}_{\scriptscriptstyle(2)}riangleright}bel{bob2}
If $\Omega^p B$ is flat as a right B module, the cohomology of the cochain complex $$\cdots M_{p,q-1} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow M_{p,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow M_{p,q+1} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow \cdots$$ is given by $\Omega^{p}B {}_{\scriptscriptstyle(1)}times_{B}\hat{H}_{q}$, where $\hat{H}_{q}$ is defined as the cohomology of the cochain complex
$$\cdots {}^{\scriptscriptstyle[2]}ackrel{[\nabla^{[q-1]}]} \longrightarrow M_{0,q} {}^{\scriptscriptstyle[2]}ackrel{[\nabla^{[q]}]} \longrightarrow M_{0,q+1} {}^{\scriptscriptstyle[2]}ackrel{[\nabla^{[q+1]}]} \longrightarrow \cdots.$$
If we write $\left{{}_{\scriptscriptstyle(2)}riangleright}ngle \quad \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{p,q}$ for the equivalence class in the cohomology of $M_{p,q}$, this isomorphism is given by, for $\xi \in \Omega^{p} B$ and $x \in \Omega^{q} A {}_{\scriptscriptstyle(1)}times_{A} E$,
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{u2}
\left{{}_{\scriptscriptstyle(2)}riangleright}ngle \iota_{*} \xi \wedge x \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{p,q} \longrightarrow \xi {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle x \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q}\ .
\end{eqnarray}
\end{propos}
{}_{\scriptscriptstyle(2)}extbf{Proof:} To calculate the cohomology, we need to find
${Z}_{p,q} = \mathrm{im} \, \mathrm{d} : M_{p,q-1} {}_{\scriptscriptstyle(2)}o M_{p,q}$ and
${K}_{p,q} = \ker \, \mathrm{d} : M_{p,q} {}_{\scriptscriptstyle(2)}o M_{p,q+1}$.
As we know from Proposition \ref{cvauiuy} that $\mathrm{d}=[\nabla^{[q]}]: M_{0,q} \longrightarrow M_{0,q+1}$ is a left B module map, we have an exact sequence of left $B$ modules, where the first map is inclusion,
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{where}
0 \longrightarrow K_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{inc}} \longrightarrow M_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow Z_{0,q+1} \longrightarrow 0\ .
\end{eqnarray}
Since $\Omega^{P}B$ is flat as a right $B$ module, we have another exact sequence,
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{b2}
0 \longrightarrow \Omega^{p}B{}_{\scriptscriptstyle(1)}times_{B} K_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{id} {}_{\scriptscriptstyle(1)}times \mathrm{inc}} \longrightarrow \Omega^{p}B{}_{\scriptscriptstyle(1)}times_{B} M_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{id} {}_{\scriptscriptstyle(1)}times \mathrm{d}} \longrightarrow \Omega^{p}B{}_{\scriptscriptstyle(1)}times_{B} Z_{0,q+1} \longrightarrow 0\ .
\end{eqnarray}
Now refer to the isomorphism given in (\ref{b1}), and then by (\ref{g4}) the last map $\mathrm{id} {}_{\scriptscriptstyle(1)}times \mathrm{d}$ is $(-1)^{p} \mathrm{d}$ on $M_{p,q}$, so
${Z}_{p,q} = \Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} Z_{0,q}$ and ${K}_{p,q} = \Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} K_{0,q}$.
| 3,870 | 20,916 |
en
|
train
|
0.164.4
|
\begin{propos}{{}_{\scriptscriptstyle(2)}riangleright}bel{bob2}
If $\Omega^p B$ is flat as a right B module, the cohomology of the cochain complex $$\cdots M_{p,q-1} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow M_{p,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow M_{p,q+1} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow \cdots$$ is given by $\Omega^{p}B {}_{\scriptscriptstyle(1)}times_{B}\hat{H}_{q}$, where $\hat{H}_{q}$ is defined as the cohomology of the cochain complex
$$\cdots {}^{\scriptscriptstyle[2]}ackrel{[\nabla^{[q-1]}]} \longrightarrow M_{0,q} {}^{\scriptscriptstyle[2]}ackrel{[\nabla^{[q]}]} \longrightarrow M_{0,q+1} {}^{\scriptscriptstyle[2]}ackrel{[\nabla^{[q+1]}]} \longrightarrow \cdots.$$
If we write $\left{{}_{\scriptscriptstyle(2)}riangleright}ngle \quad \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{p,q}$ for the equivalence class in the cohomology of $M_{p,q}$, this isomorphism is given by, for $\xi \in \Omega^{p} B$ and $x \in \Omega^{q} A {}_{\scriptscriptstyle(1)}times_{A} E$,
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{u2}
\left{{}_{\scriptscriptstyle(2)}riangleright}ngle \iota_{*} \xi \wedge x \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{p,q} \longrightarrow \xi {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle x \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q}\ .
\end{eqnarray}
\end{propos}
{}_{\scriptscriptstyle(2)}extbf{Proof:} To calculate the cohomology, we need to find
${Z}_{p,q} = \mathrm{im} \, \mathrm{d} : M_{p,q-1} {}_{\scriptscriptstyle(2)}o M_{p,q}$ and
${K}_{p,q} = \ker \, \mathrm{d} : M_{p,q} {}_{\scriptscriptstyle(2)}o M_{p,q+1}$.
As we know from Proposition \ref{cvauiuy} that $\mathrm{d}=[\nabla^{[q]}]: M_{0,q} \longrightarrow M_{0,q+1}$ is a left B module map, we have an exact sequence of left $B$ modules, where the first map is inclusion,
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{where}
0 \longrightarrow K_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{inc}} \longrightarrow M_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow Z_{0,q+1} \longrightarrow 0\ .
\end{eqnarray}
Since $\Omega^{P}B$ is flat as a right $B$ module, we have another exact sequence,
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{b2}
0 \longrightarrow \Omega^{p}B{}_{\scriptscriptstyle(1)}times_{B} K_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{id} {}_{\scriptscriptstyle(1)}times \mathrm{inc}} \longrightarrow \Omega^{p}B{}_{\scriptscriptstyle(1)}times_{B} M_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{id} {}_{\scriptscriptstyle(1)}times \mathrm{d}} \longrightarrow \Omega^{p}B{}_{\scriptscriptstyle(1)}times_{B} Z_{0,q+1} \longrightarrow 0\ .
\end{eqnarray}
Now refer to the isomorphism given in (\ref{b1}), and then by (\ref{g4}) the last map $\mathrm{id} {}_{\scriptscriptstyle(1)}times \mathrm{d}$ is $(-1)^{p} \mathrm{d}$ on $M_{p,q}$, so
${Z}_{p,q} = \Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} Z_{0,q}$ and ${K}_{p,q} = \Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} K_{0,q}$.
From the definition of $\hat{H}_{q}$ we have another short exact sequence,
\begin{eqnarray*}
0 \longrightarrow Z_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{inc}}\longrightarrow K_{0,q} \longrightarrow \hat{H}_{q} \longrightarrow 0\ ,
\end{eqnarray*}
and applying $\Omega^{p}B {}_{\scriptscriptstyle(1)}times_{B}$ gives, as $\Omega^{p}B$ is flat as a right B module,
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{g6}
0 \longrightarrow \Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} Z_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{id} {}_{\scriptscriptstyle(1)}times \mathrm{inc}}\longrightarrow \Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} K_{0,q} \longrightarrow \Omega^{p} B {}_{\scriptscriptstyle(1)}times_{B} \hat{H}_{q} \longrightarrow 0\ .
\end{eqnarray}
We deduce that the cohomology of $M_{p,q}$ is isomorphic to $\Omega^{p}B {}_{\scriptscriptstyle(1)}times_{B} \hat{H}_{q}$. \quad
$\square$
| 1,420 | 20,916 |
en
|
train
|
0.164.5
|
\subsection{The second page of the spectral sequence}
Now we move to the second page of the spectral sequence, in which we take the cohomology of the previous cohomology, i.e. the cohomology of
$$\mathrm{d} : \mathrm{cohomology} \, (M_{p,q} ) \longrightarrow \mathrm{cohomology} \, (M_{p+1,q}).$$
By the isomorphism discussed in Proposition \ref{bob2}, we can view this as
\begin{eqnarray}
\mathrm{d} : \Omega^{p}B {}_{\scriptscriptstyle(1)}times_{B} \hat{H}_{q}\longrightarrow \Omega^{p+1}B {}_{\scriptscriptstyle(1)}times_{B} \hat{H}_{q}\ .
\end{eqnarray}
\begin{propos}{{}_{\scriptscriptstyle(2)}riangleright}bel{b6}
The differential $\mathrm{d}$ gives a left covariant derivative $$\nabla_{q} : \hat{H}_{q} \longrightarrow \Omega^{1}B {}_{\scriptscriptstyle(1)}times_{B} \hat{H}_{q}.$$ If $\left{{}_{\scriptscriptstyle(2)}riangleright}ngle \xi {}_{\scriptscriptstyle(1)}times e\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q} \in \hat{H}_{q}$, this is given by using the isomorphism (\ref{u2}) as
$$\left{{}_{\scriptscriptstyle(2)}riangleright}ngle \xi {}_{\scriptscriptstyle(1)}times e\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q} \longmapsto \eta {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q}\ ,$$
where
$$\mathrm{d}\xi {}_{\scriptscriptstyle(1)}times e + (-1)^{q}\xi \wedge \nabla e = \iota_{*} \eta \wedge {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f
\in \iota_*\Omega^1 B\wedge\Omega^q A\mathop{{}_{\scriptscriptstyle(1)}times}_A E\ .
$$
\end{propos}
{}_{\scriptscriptstyle(2)}extbf{Proof}: Take $\left{{}_{\scriptscriptstyle(2)}riangleright}ngle x\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q} \in \hat{H}_{q}$, where $x \in K_{0,q} = \ker \, \mathrm{d} : M_{0,q} {}_{\scriptscriptstyle(2)}o M_{0,q+1}$, and suppose $x = \xi {}_{\scriptscriptstyle(1)}times e$, where $\xi \in \Omega^{q}A$ and $e \in E$ (summation implicit). As $x \in K_{0,q}$ we have
$$[\mathrm{d} x]_{0,q+1} = [\mathrm{d}\xi {}_{\scriptscriptstyle(1)}times e + (-1)^{q}\xi \wedge \nabla e]_{0,q+1} = 0$$
in $M_{0,q+1}$, so
$$\mathrm{d}\xi {}_{\scriptscriptstyle(1)}times e + (-1)^{q}\xi \wedge \nabla e \in \iota_{*} \Omega^{1}B \wedge \Omega^{q} A {}_{\scriptscriptstyle(1)}times_{A} E.$$ We write (summation implicit), for $\eta \in \Omega^{1} B$,
${}_{\scriptscriptstyle(1)}mega \in \Omega^{1} A$ and $f \in E$,
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{b3}
\mathrm{d}\xi {}_{\scriptscriptstyle(1)}times e +(-1)^{q} \xi \wedge \nabla e = \iota_{*}\eta \wedge {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f\ .
\end{eqnarray}
Under the isomorphism (\ref{b1}), this corresponds to $\eta {}_{\scriptscriptstyle(1)}times [{}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f]_{q} \in \Omega^{1}B {}_{\scriptscriptstyle(1)}times_{B} M_{0,q}$.
As the curvature of $E$ vanishes, we have from applying $\nabla^{[q+1]}$ to (\ref{b3}),
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{b4}
\iota_{*} \mathrm{d}\eta \wedge {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f - \iota_{*} \eta \wedge \mathrm{d}{}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f + (-1)^{q+1} \iota_{*} \eta \wedge {}_{\scriptscriptstyle(1)}mega \wedge \nabla f = 0 .
\end{eqnarray}
We take this as an element of $M_{1,q+1}$, so we apply $[\quad ] _{1,q+1}$ to (\ref{b4}). Then as the denominator of $M_{1,q+1}$ is $\iota_{*} \Omega^{2}B \wedge \Omega^{q} A {}_{\scriptscriptstyle(1)}times_{A} E$, we see that the first term of (\ref{b4}) vanishes on taking the quotient, giving
$$- [ \iota_{*} \eta \wedge (\mathrm{d}{}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f +(-1)^{q} {}_{\scriptscriptstyle(1)}mega \wedge \nabla f )] _{1,q+1} = 0.$$ Under the isomorphism (\ref{b1}) this corresponds to
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{b5}
- \eta {}_{\scriptscriptstyle(1)}times_{B} [\mathrm{d}{}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f + (-1)^{q} {}_{\scriptscriptstyle(1)}mega \wedge \nabla f ]_{0,q+1} = 0.
\end{eqnarray}
This means that $$\eta {}_{\scriptscriptstyle(1)}times [ {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f ]_{0,q} \in \Omega^{1}B {}_{\scriptscriptstyle(1)}times_{B} M_{0,q}$$
is in the kernel of the map $\mathrm{id} {}_{\scriptscriptstyle(1)}times \mathrm{d}$ in (\ref{b2}), and as (\ref{b2}) is an exact sequence we have $$\eta {}_{\scriptscriptstyle(1)}times [{}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f]_{0,q} \in \Omega^{1} B {}_{\scriptscriptstyle(1)}times_{B} K_{0,q},$$ so we can see take the cohomology class to get $$\eta {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q} \in \Omega^{1}B {}_{\scriptscriptstyle(1)}times_{B} \hat{H}_{q}.$$
This completes showing that $\nabla_{q}$ exists, but we also need to show that it is a left covariant derivative. For $b \in B$, we calculate $\nabla_{q}(b. \xi {}_{\scriptscriptstyle(1)}times e )$ to get $$\mathrm{d}(b.\xi) {}_{\scriptscriptstyle(1)}times e + (-1)^{q} b. \xi \wedge \nabla e = \mathrm{d} b \wedge \xi {}_{\scriptscriptstyle(1)}times e + b. (\mathrm{d}\xi {}_{\scriptscriptstyle(1)}times e + (-1)^{q} \xi \wedge \nabla e),$$
so we get $$\nabla_{q}\left{{}_{\scriptscriptstyle(2)}riangleright}ngle b. \xi {}_{\scriptscriptstyle(1)}times e\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q} = \mathrm{d} b {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle \xi {}_{\scriptscriptstyle(1)}times e\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q} + b. \nabla_{q}\left{{}_{\scriptscriptstyle(2)}riangleright}ngle \xi {}_{\scriptscriptstyle(1)}times e \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q}\ . \quad \square$$
\begin{propos}{{}_{\scriptscriptstyle(2)}riangleright}bel{b9}
The curvature $R_q$ of the covariant derivative $\nabla_{q}$ in Proposition \ref{b6} is zero.
\end{propos}
{}_{\scriptscriptstyle(2)}extbf{Proof:} Using the notation of Proposition \ref{b6}, equation (\ref{b3}) $$\nabla_{q}\left{{}_{\scriptscriptstyle(2)}riangleright}ngle \xi {}_{\scriptscriptstyle(1)}times e\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q} = \eta {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q}.$$
If we apply $\nabla_{q}^{[1]}$ (see Definition \ref{key49}) to this, we get
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{u4}
R_{q}\left{{}_{\scriptscriptstyle(2)}riangleright}ngle \xi {}_{\scriptscriptstyle(1)}times e\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q} = \mathrm{d}\eta {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q} - \eta \wedge \nabla_{q} \left{{}_{\scriptscriptstyle(2)}riangleright}ngle {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q}.
\end{eqnarray}
To find $\nabla_{q}\left{{}_{\scriptscriptstyle(2)}riangleright}ngle w {}_{\scriptscriptstyle(1)}times f\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q}$, referring to the proof of Proposition \ref{b6}, formula (\ref{b5}), we have
$$\eta {}_{\scriptscriptstyle(1)}times_{B}(\mathrm{d}{}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f + (-1)^{q} {}_{\scriptscriptstyle(1)}mega \wedge \nabla f ) \in \Omega^{1}B {}_{\scriptscriptstyle(1)}times_{B} (\iota_{*} \Omega^{1}B \wedge \Omega^{q}A {}_{\scriptscriptstyle(1)}times_{A}E).$$
This comes for tensoring the exact sequence
$$0 \longrightarrow \iota_{*} \Omega^{1}B \wedge \Omega^{q}A {}_{\scriptscriptstyle(1)}times_{A}E \longrightarrow \Omega^{q+1}A {}_{\scriptscriptstyle(1)}times_{A} E {}^{\scriptscriptstyle[2]}ackrel{[\, \,]_{0,q+1}}\longrightarrow M_{0,q+1}\longrightarrow 0$$ on the left by $\Omega^{1}B$, and using that $\Omega^{1}B$ is a flat right module. Now write (summation implicit),
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{u5}
\eta {}_{\scriptscriptstyle(1)}times (\mathrm{d}{}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f +(-1)^{q} {}_{\scriptscriptstyle(1)}mega \wedge \nabla f) = \eta^{\prime} {}_{\scriptscriptstyle(1)}times (\iota_{*} \kappa \wedge \zeta {}_{\scriptscriptstyle(1)}times g)
\end{eqnarray}
for $\eta^{\prime}$,$\kappa \in \Omega^{1} B$, $\zeta \in \Omega^{q}A$ and $g \in E$.
Then, from Proposition \ref{b6},
$$\eta \wedge \nabla_{q}\left{{}_{\scriptscriptstyle(2)}riangleright}ngle {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q} = \eta^{\prime} \wedge \kappa {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle \zeta {}_{\scriptscriptstyle(1)}times g\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q}$$ so from (\ref{u4}),
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{arwa}
R_{q}\left{{}_{\scriptscriptstyle(2)}riangleright}ngle \xi {}_{\scriptscriptstyle(1)}times e\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q} = \mathrm{d}\eta {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q} - \eta^{\prime} \wedge \kappa {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle \zeta {}_{\scriptscriptstyle(1)}times g\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle_{0,q}\ .
\end{eqnarray}
Now (\ref{u5}) implies that $$\iota_{*}\eta \wedge (\mathrm{d} {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f + (-1)^{q} {}_{\scriptscriptstyle(1)}mega \wedge \nabla f) = \iota_{*} \eta^{\prime} \wedge \iota_{* } \kappa \wedge \zeta {}_{\scriptscriptstyle(1)}times g\ ,$$
and substituting this into (\ref{b4}) gives
$$\iota_{*} \mathrm{d}\eta \wedge {}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f - \iota_{*} \eta^{\prime} \wedge \iota_{*} \kappa \wedge \zeta {}_{\scriptscriptstyle(1)}times g = 0\ ,$$
so on taking equivalence classes in $M_{2,q}$ we find, using the isomorphism (\ref{b1}),
$$\mathrm{d}\eta {}_{\scriptscriptstyle(1)}times [{}_{\scriptscriptstyle(1)}mega {}_{\scriptscriptstyle(1)}times f]_{0,q} - \eta^{\prime} \wedge \kappa {}_{\scriptscriptstyle(1)}times [\zeta {}_{\scriptscriptstyle(1)}times g]_{0,q} = 0\ ,$$
and this shows that $R_q=0$ by (\ref{arwa}). $\square$
| 3,735 | 20,916 |
en
|
train
|
0.164.6
|
\begin{theorem}
Given:\\
1) a map $\iota : B \longrightarrow A$ which is a differential fibration (see definition \ref{b61}),\\
2) a flat left A module $E$, with a zero-curvature left covariant derivative $\nabla_{E} : E {}_{\scriptscriptstyle(2)}o \Omega^{1}A {}_{\scriptscriptstyle(1)}times_{A} E$,\\
3) each $\Omega^{p}B$ is flat as a right B module,\\
then there is a spectral sequence converging to $ H^{*}(A, E, \nabla_{E})$ with second page $ H^{*}(B, \hat{H}_{q}, \nabla_{q})$ where $\hat{H}_{q}$ is defined as the cohomology of the cochain complex$$\cdots {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow M_{0,q} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow M_{0,q+1} {}^{\scriptscriptstyle[2]}ackrel{\mathrm{d}} \longrightarrow \cdots$$
where
\begin{eqnarray*}
M_{0,q} &=& \frac{\Omega^{q}A {}_{\scriptscriptstyle(1)}times_{A} E}{\iota_{*} \Omega^{1} B \wedge \Omega^{q-1} A {}_{\scriptscriptstyle(1)}times_{A}E}\ ,\cr
\mathrm{d}[x {}_{\scriptscriptstyle(1)}times e]_{0,q} &=& [\mathrm{d} x {}_{\scriptscriptstyle(1)}times e + (-1)^{q} x \wedge \nabla_{E} e]_{0,q+1}\ .
\end{eqnarray*}
The zero curvature left covariant derivative $\nabla_{q}: \hat{H}_{q} {}_{\scriptscriptstyle(2)}o \Omega^{1}B {}_{\scriptscriptstyle(1)}times_{B} \hat{H}_{q}$ is as defined in Proposition \ref{b6}.
\end{theorem}
{}_{\scriptscriptstyle(2)}extbf{Proof:} The first part of the proof is given in Proposition \ref{bob2}. Now we need to calculate the cohomology of $$\mathrm{d} : \Omega^{p}B {}_{\scriptscriptstyle(1)}times_{B} \hat{H}_{q} \longrightarrow \Omega^{p+1}B {}_{\scriptscriptstyle(1)}times_{B}\hat{H}_{q}$$ This is given for $\xi {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle \eta {}_{\scriptscriptstyle(1)}times e\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q}$ (for $\xi \in \Omega^{p}B$, $\eta \in \Omega^{q} A$ and $e \in E$) as follows: this element corresponds to $\iota_{*}\xi \wedge \eta {}_{\scriptscriptstyle(1)}times e$, and applying $\mathrm{d}$ to this gives $$\iota_{*} \mathrm{d}\xi \wedge \eta {}_{\scriptscriptstyle(1)}times e + (-1)^{p} \iota_{*} \xi \wedge \mathrm{d}\eta {}_{\scriptscriptstyle(1)}times e +(-1)^{p+q} \iota_{*} \xi \wedge \eta \wedge \nabla e.$$
But we have calculated the effect of $\mathrm{d}$ on $\hat{H}_{q}$ in Proposition \ref{b6}, so we get
$$\mathrm{d}(\xi {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle \eta {}_{\scriptscriptstyle(1)}times e \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q} ) = \mathrm{d}\xi {}_{\scriptscriptstyle(1)}times \left{{}_{\scriptscriptstyle(2)}riangleright}ngle \eta {}_{\scriptscriptstyle(1)}times e \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q} +(-1)^{p}\xi \wedge \nabla_{q} \left{{}_{\scriptscriptstyle(2)}riangleright}ngle \eta {}_{\scriptscriptstyle(1)}times e \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle _{0,q}.$$
The covariant derivative $\nabla_{q} $ has zero curvature by Proposition \ref{b9}. \quad $\square$
| 1,036 | 20,916 |
en
|
train
|
0.164.7
|
\section{Example: A fibration with fiber the noncommutative torus}{{}_{\scriptscriptstyle(2)}riangleright}bel{se1}
As discussed in the Introduction, the idea for this example came from \cite{25,26}.
\subsection{The Heisenberg group} {{}_{\scriptscriptstyle(2)}riangleright}bel{vcadgsh}
The Heisenberg group $H$ is defined to be following subgroup of $M_{3}\mathbb{(\mathbb{Z})}$ under multiplication.
\[ \Big\{ \left(
\begin{array}{ccc}
1&n&k\\
0&1&m\\
0&0&1
\end{array}
\right)
: n , m , k \in \mathbb{Z} \Big\} \]
We can take generators $u, v , w$ for the group, where $w$ is central and there is one more relation $uv=wvu$. These generators correspond to the matrices
\[ u= \left(
\begin{array}{ccc}
1&1&0\\
0&1&0\\
0&0&1
\end{array}
\right) ,\quad
v= \left(
\begin{array}{ccc}
1&0&0\\
0&1&1\\
0&0&1
\end{array}\right)\ ,\quad
w= \left(
\begin{array}{ccc}
1&0&1\\
0&1&0\\
0&0&1
\end{array}\right)\
\]
There is an isomorphism ${}_{\scriptscriptstyle(2)}heta : H \longrightarrow H $, for every matrix
\[\left(
\begin{array}{cc}
a&c\\
b&d
\end{array}\right)\
\in SL(2,\mathbb{Z}) ,
\]
given by ${}_{\scriptscriptstyle(2)}heta(u)=u^{a}\,v^{b}$, ${}_{\scriptscriptstyle(2)}heta(v)=u^{c}\,v^{d}$, ${}_{\scriptscriptstyle(2)}heta(w)=w$.
The group algebra $\mathbb{C}H$ of $H$ can be made into a star algebra by setting $x^*=x^{-1}$ for all $x \in \{ u , v , w \}$.
\subsection{A differential calculus on the Heisenberg group}
There is a differential calculus on the group algebra $\mathbb{C}H$ of $H$.
It is bicovariant, as set down by Woronowicz in \cite{worondiff}.
For a generator $ x \in \{u , v , w\}$, we write $e^x = x^{-1}.\mathrm{d} x$, a left invariant element of $\Omega^{1}\mathbb{C}H$.
We suppose that $\Omega^{1}\mathbb{C}H$ is free as left $\mathbb{C}H$ module, with generators $\{ e^{u} , e^{v} , e^{w} \}$. This means that every element of $\Omega^{1}\mathbb{C}H$ can be written uniquely as $a^{u} . e^{u} + a^{v} . e^{v} + a^{w} . e^{w}$, for $a^{u} , a^{v} , a^{w} \in \mathbb{C}H$. We have the following relations on $\Omega^{1}\mathbb{C}H$, for all $x \in \{ u , v , w \}$:\\
$x .e^x = e^x .x $ \\
$x .e^w = e^w . x $\\
$w .e^x = e^x .w $\\
$ u^{-n} .e^v. u^n = e^v - \frac{n}{2}\,e^w $ \\
$ v^{-n}. e^u. v^n = e^v + \frac{n}{2}\,e^w $\\
Further the map ${}_{\scriptscriptstyle(2)}heta$ in subsection \ref{vcadgsh}
extends to a map of 1-forms given by
$ {}_{\scriptscriptstyle(2)}heta(e^{w}) = e^{w} $ \\
$ {}_{\scriptscriptstyle(2)}heta(e^{u}) = a .e^{u} + b. e^{v} + \frac{ab}{2}. e^{w} $ \\
$ {}_{\scriptscriptstyle(2)}heta(e^{v}) = c .e^{u} + d. e^{v} + \frac{cd}{2}. e^{w} $ \\
Checking the braiding given by Woronowicz shows that, for $x,y \in \{u , v , w\}$,\\
$\mathrm{d} e^x=0$ \\
$e^x\wedge e^y=-e^y\wedge e^x$\ .\\
The star operation extends to the differential calculus, with $(e^x)^*=-e^x$.
\subsection{The differential fibration}{{}_{\scriptscriptstyle(2)}riangleright}bel{b10}
If we take $z$ to be the identity function :$S^{1} \rightarrow \mathbb{C}$, the map sending $z^n$ to $w^n$ gives an algebra map $\iota:C(S^1){}_{\scriptscriptstyle(2)}o \mathbb{C}H$. It is also a star algebra map, with the usual star structure $z^*=z^{-1}$ on $C(S^1)$.
The differential structure of the `fiber algebra' $F$ is
\begin{eqnarray}{{}_{\scriptscriptstyle(2)}riangleright}bel{fib1}
\Omega^n F = \frac{\Omega^n \mathbb{C}H}{\iota_*\Omega^1 C(S^1) \wedge \Omega^{n-1}\mathbb{C}H}\ ,
\end{eqnarray}
i.e.\ we put $ \mathrm{d} w=0$ in $\Omega^n F$ (i.e. put $e^{w} = 0$). This is because in (\ref{fib1}) we divide by everything of the form $e^{w} \wedge \xi $. To see that this gives a fibration, we note that a linear basis for the left invariant n-forms is as follows:\\
$\Omega^{1}A$: \quad $e^{u}, e^{v}, e^{w}$\\
$\Omega^{2}A$: \quad $e^{u} \wedge e^{v}$, $e^{w} \wedge e^{u}$ , $e^{w} \wedge e^{v}$\\
$\Omega^{3}A$: \quad $e^{v} \wedge e^{u} \wedge e^{w}$\\
Then the $N_{n,m}$ (see (\ref{cvhgsuv})) are, where $\langle...\rangle$ denotes the module generated by, and all others are zero:\\
$N_{0,0} = 1$, \quad $N_{1,0} = \left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{w} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle $, \quad $N_{m,0} = 0$ , $m > 1$ \\
$N_{0,1} = \frac{\left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{u}, e^{v}, e^{w}\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle }{\left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{w} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle } = \left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{u}, e^{v} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle $\\
$N_{0,2} = \frac{\left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{u} \wedge e^{v}, e^{w} \wedge e^{u}, e^{w} \wedge e^{v} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle }{\left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{w} \wedge e^{u}, e^{w} \wedge e^{v} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle } = \left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{u} \wedge e^{v} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle $\\
$N_{0,3} = \frac{\left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{w} \wedge e^{u} \wedge e^{v}\right{{}_{\scriptscriptstyle(2)}riangleleft}ngle }{\left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{w} \wedge e^{u} \wedge e^{v} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle } = 0$\\
$N_{0,n} = 0 $ \quad $n \geq 4$\\
$N_{1,1} = \frac{e^{w} \wedge \left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{w}, e^{u}, e^{v} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle }{\left{{}_{\scriptscriptstyle(2)}riangleright}ngle 0 \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle } = \left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{w} \wedge e^{u} , e^{w} \wedge e^{v} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle $\\
$N_{1,2} = \frac{e^{w} \wedge \left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{w} \wedge e^{u}, e^{w} \wedge e^{v}, e^{u} \wedge e^{v} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle }{\left{{}_{\scriptscriptstyle(2)}riangleright}ngle 0 \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle } = \left{{}_{\scriptscriptstyle(2)}riangleright}ngle e^{w} \wedge e^{u} \wedge e^{v} \right{{}_{\scriptscriptstyle(2)}riangleleft}ngle$ \\
Then the following map is one-to-one and onto,
$$\Omega^{1}C(S^{1}) {}_{\scriptscriptstyle(1)}times_{C(S^{1})} N_{0,n} \longrightarrow N_{1,n}$$
giving a differential fibration in the sense of Definition \ref{b61}.
As was done in \cite{25}, we note that this map does have a fiber in quite a classical sense.
The algebra $C(S^1)$ is commutative, and if we take $q\in S^1$, the fiber algebra
corresponding to $q$ is given by substituting $w\mapsto q$ in the algebra relations. We get unitary generators $u ,v$ and a relation $u\, v = q\, v\, u$ for a complex number $q$ of norm $1$. But this is exactly the noncommutative torus $\mathbb{T}^{2}_{q}$. The map ${}_{\scriptscriptstyle(2)}heta$ on the total algebra
$\mathbb{C}H$ is the identity on the base algebra $C(S^1)$, so it acts on each fiber.
\end{document}
| 2,778 | 20,916 |
en
|
train
|
0.165.0
|
\begin{document}
\draft
\title{CHARGE~RENORMALIZATION~IN~A~NEW~KIND~OF~NON-LOCAL
QUANTUM~ELECTRODYNAMICS}
\author{S.~S.~Sannikov}
\address{Physico-Technical Institute\\
1 Academichna St., 310108 Kharkov, {\bf UKRAINE}}
\author{A.~A.~Stanislavsky}
\address{Institute of Radio Astronomy of the
Ukrainian National Academy of Sciences\\
4 Chervonopraporna St., 310002 Kharkov, {\bf UKRAINE}\\
E-mail: [email protected]}
\date{\today}
\maketitle
\begin{abstract}
The goal of this message is to calculate radiative corrections
to the Sommerfeld fine structure constant in the
framework of a new QED in which particles are described by bilocal
fields. The bare constant is 1/136 where 136 is a
dimension of the dynamical group of the bihamiltonian system
underlying the suggested elementary particle theory. Our
calculations in the second order of perturbation theory give the
renormalized Sommerfeld constant 1/137.0345. We believe the
difference (137.0359 - 137.0345) between corresponding
experimental and theoretical values may be understood as
corrections of the fourth order.
\end{abstract}
\pacs{11.10.Gh, 11.10.Lm}
\section{Introduction}
The aim of this paper is to show how to calculate the main
radiative corrections in quantum electrodynamics improved on the
bases of the general elementary particle theory suggested in \cite{1}.
The keystone of the theory is the assumption that the true
mechanism of production of elementary particles is not interactions
between them (or between their hypothetical constituents),
but is a certain quantum-dynamical system determining the special
physics at supersmall distances where the space-time is discontinuum,
i.\ e.\ it is the quite non-connected manifold there. The transition
in such a system lead to the creation of fundamental particle fields
which are bilocal wave functions $\psi(X,Y)$ in our theory (the
Heisenberg-Schr$\rm\ddot o$dinger-Dirac theory postulates the
existance of local fields $\psi(X)$, but in that theory there are
ultraviolet devergences). The initial principles of our approach
to the elementary particle problem have been stated in the Russian
periodicals \cite{1,2,3,4,5}.
The dynamical system mentioned above has been described in
\cite{2} and named as a relativistic bi-Hamiltonian system. Owing
to the discontinuity of space in small, the quantum theory of the
system is non-unitary; the non-standard (non-Fock) representations
of the Heisenberg algebra $h^{(*)}_{16}$ described in \cite{4}
(extraction of square root of Dirac-Grassmann spinors leads
to such algebras \cite{3}) and non-unitary (infinite-dimensional)
representations of the rotation group $SO(3)$ and Lorentz group
$SO(3,1)$, induced by them and charaterized by the arbitrary complex
spin found in \cite{5} earlier, form the mathematical foundation of
this theory. Thus these representations stand for a new physical
reality. The elementary particle theory based on them is more
like the atomic spectrum theory rather than any composite model.
In the framework of this theory (quantum electrodynamics with
bilocal fields) we consider here only one question --- the charge
renormalization which was not solved up till now.
\section{Bilocal fields and their interactions}
The field bilocality $\psi(X,Y)$ is the direct consequence
of the semispinor structure of the particle fields $\psi^\Sigma
\sim\langle\dot f,O^\Sigma f\rangle$($O^\Sigma$ is elements
of the Heisenberg algebra $h^{(*)}_{16}$) discovered by means of
extracting the square root of Grassmann spinors, see \cite{3} (this
structure is quite analogous to the spinor structure of current
$j_\mu\sim\bar\psi\gamma_\mu\psi$ where $\gamma_\mu$ are
elements of the Clifford algebra discovered by means of extracting
the Dirac square root of vectors).
The bilocal field $\psi^\Sigma(X,Y)$ defined by the transition
amplitude $\langle\dot f(X-Y),O^\Sigma f(X+Y)\rangle$ where
$O^\Sigma f(x)$ and $\dot f(\dot x)$ are the initial (excited) and
final (ground) states of the relativistic bi-Hamiltonian system
respectively (the explicit form of these states found in \cite{1})
is written down as
\begin{equation}
\psi(X,Y)=\frac{1}{(2\pi)^{3/2}}\int e^{ipX+iqY}\theta(p_0+q_0)\,
\theta(p_0-q_0)\,\delta(p^2+q^2)\,\delta(pq)\,\delta(p^2-m^2)\,
\psi(p,q)\,d^4p\,\frac{d^4q}{2\pi}\ .
\label{eq1}
\end{equation}
Here $X_\mu$ are coordinates in Minkowsky space ($p_\mu$ is a
4-momentum of a particle) and $Y_\mu$ are internal coordinates
(which are not fixed in the experiment and therefore we call them
hidden) describing the space-time structure of particles ($q_\mu$
is a 4-momentum of tachyon; it is interesting to note that
analogous objects have already been introduced by Yukawa \cite{6}).
It follows from (\ref{eq1}) that if $\vert X\vert\gg\vert Y
\vert$ then the bilocal field $\psi(X,Y)$ transforms into the
usual local field $\psi(X)=\psi(X,0)$ (hence, in the suggested
scheme the local fields appear as asymptotic fields; it is a
principal point of a new correspondence principle). It also
follows from (\ref{eq1}) that $\psi(X,Y)$ may be represented in the
form of $\psi(X,Y)=F(Y,-i\frac{\partial}{\partial X})\psi(X)$
where $\psi(X)$ is a local field and $F$ is the so-called
smearing operator which has the form in the case of massive
particles ($p_\mu$ is a 4-momentum of such a particle)
\begin{equation}
F(Y,p)=\frac{1}{2\pi}\int e^{iqY}\delta(p^2+q^2)\,
\delta(pq)\,d^4q\ .
\label{eq2}
\end{equation}
Another form of the smearing operator takes place in the
case of massless particles (it follows from the explicit form
of the leptonic transition amlplitude; it is necessary to note
that operator (\ref{eq2}) does not transform into (\ref{eq3}) when
$p^2=0$; in the case we have the stochastic integral
\begin{displaymath}
\frac{1}{2}\int_{-1}^1 e^{i\alpha pY}\,d\alpha\,),
\end{displaymath}
namely:
\begin{equation}
F_0(Y,k)=e^{iYk}\ .
\label{eq3}
\end{equation}
It is a translation ($k_\mu$ is a 4-momentum of such a particle).
Interactions between bilocal fields are described by differential
equations in Minkowsky space. We are interested in the Dirac field
$\psi(X,Y)$ interacting with the electromagnetic field $A_\mu(X,Y')$
(a general mechanism driving interactions is described in \cite{1}).
In this case the equations are written in the form of
\begin{equation}
\left(\gamma_\mu\frac{\partial}{\partial X_\mu}+m\right)\psi(X)=
-iJ(X)
\label{eq4}
\end{equation}
where $J(X)=e\gamma_\mu\int\psi(X,Y)\,A_\mu(X,Y')\,d\mu(Y,Y')$
is the interaction ``current''. It transforms into the usual
local connection between local fields $e\gamma_\mu\psi(X)A_\mu(X)$
if $\vert X\vert\gg\vert Y\vert$ (it is the new correspondence
principle from which follows the explicit form of measure:
\begin{displaymath}
d\mu(Y,Y')=\frac{\kappa^8}{(2\pi)^4}\,e^{iYY'\kappa^2}\,d^4Y\,
d^4Y'\,.
\end{displaymath}
Here $\kappa$ is a new fundamental constant equal to $\kappa=
5\cdot 10^{13}cm^{-1}$, see \cite{1}; it will be convenient for
our further calculations to put $c=\hbar=\kappa=1$).
Proceeding from (\ref{eq4}) we may construct the
$S$ - matrix: $S=T\exp(i\int\pounds_i(X)\,d^4X$) where $\pounds_i(X)
=\frac{1}{2}[\bar\phi(X)J(X)+\bar J(X)\psi(X)]$ is an interaction
Lagrangian. In the perturbation theory the interaction picture
may be described by the well-known Feynman diagrams in vertices of
which the electron-photon formfactor arises
\begin{equation}
\rho(p,k)=\int F(Y,p)\,F_0(Y',k)\,d\mu(Y,Y')=
\frac{1}{2\pi}\int e^{iqk}\delta(p^2+q^2)\,
\delta(pq)\,d^4q\ .
\label{eq5}
\end{equation}
\section{The main formula}\label{kd}
First of all we state the result of our calculations of
radiative corrections to the Sommerfeld fine structure constant
$\alpha=e^2/4\pi$. In the suggested theory the renormalized constant
$\tilde\alpha$ connects with the ``bare'' constant $\alpha$ by the
formula
\begin{equation}
\tilde\alpha=\left(\frac{Z_1}{Z_2}\right)^{2}\frac{Z_4}{Z_3}\,\alpha
\label{eq6}
\end{equation}
where $Z_1,Z_2,Z_3,Z_4$ are the renormalization constants of the
fermion Green function, vertex function, Lagrangian of classical
electromagnetic field and three-tail, respectively. Here all these
quantities are calculated in the second order of perturbation theory.
In the suggested theory the ``bare'' constant $\alpha$ is
equal to 1/136 (Eddington formula) where 136 is the dimension of the
dynamical group (the group of automorphisms $Sp^{(*)}(8,{\bf C})$
for the Heisenberg algebra $h_{16}^{(*)}$) in our relativistic
bi-Hamiltonian system, see \cite{2}.
We see formula (\ref{eq6}) essentially differs from the local
theory formula $\tilde\alpha=Z_3^{-1}\alpha$ \cite{7} being a
consequence of the Ward identity $Z_1=Z_2$ (in this theorem
for regularized constants, see \cite{7}, the regularized fermion
self-energy operator $\Sigma(p)$ is assumed to be an analytic
function at point $p^2=0$; but it does not take place in the
suggested theory: it follows from \cite{8} that $\Sigma(p)\sim
\ln p^2$ when $p^2\to 0$) and also the Furry theorem (which does not
take place in the suggested theory too due to the presence of
hidden parameters $Y_\mu$ for bare particles and the absence of them
for bare antiparticles, see further).
\section{Calculation of $Z_1/Z_2$}
In our theory the Ward identity
\begin{displaymath}
\frac{\partial\Sigma(p)}{\partial p_\mu}+\Lambda_\mu(p,0)=0
\end{displaymath}
($\Lambda_\mu$ is the vertex function) is replaced by a more
general identity
\begin{equation}
\frac{\partial\Sigma(p)}{\partial p_\mu}+\Lambda_\mu(p,0)=
\Sigma_\mu(p)
\label{eq7}
\end{equation}
where $\Sigma_\mu(p)$ is the following operator
\begin{displaymath}
\Sigma_\mu(p)=\frac{e^2}{i(2\pi)^4}\int\frac{\gamma_\nu(\hat p-
\hat k+m)\gamma_\nu}{[(p-k)^2-m^2]\,k^2}\,\left[\frac{\partial}
{\partial p_\mu}\rho(p,k)\right]\,d^4k=
\end{displaymath}
\begin{displaymath}
=\frac{ie^2}{(2\pi)^4}
\int^1_0 dz\int_0^\infty\frac{d\sigma}{\sigma^2}\exp[i\frac{p^2}
{2\sigma}-i\sigma(m^2z-p^2z(1-z))]\left[p_\mu(2m-\hat p(1-z))+
\frac{z}{3}(p_\mu\hat p-\gamma_\mu p^2)\right].
\end{displaymath}
To express the quantity $(Z_1/Z_2-1)$ of interest to us in terms
of $\Sigma_\mu(p)$, it is necessary to take the operator on the mass
shell $\bar p=m$ by means of the formula
\begin{displaymath}
\left(\frac{Z_1}{Z_2}\right)\gamma_\mu=\Sigma_\mu(m)=
-\gamma_\mu\frac{e^2}{4\pi^2}m^2\int^1_0 z\,(1+z)\,K_1(m^2z)\,dz
\end{displaymath}
where $K_1$ is the MacDonald function. From here we get
\begin{equation}
\frac{Z_1}{Z_2}=\cases{1-\frac{3\alpha}{2\pi}\ \ \ ,\ m\ll 1;\cr
1-\frac{\alpha}{2m^2}\ ,\ m\gg 1.\cr}
\label{eq8}
\end{equation}
| 3,557 | 8,559 |
en
|
train
|
0.165.1
|
\section{The main formula}\label{kd}
First of all we state the result of our calculations of
radiative corrections to the Sommerfeld fine structure constant
$\alpha=e^2/4\pi$. In the suggested theory the renormalized constant
$\tilde\alpha$ connects with the ``bare'' constant $\alpha$ by the
formula
\begin{equation}
\tilde\alpha=\left(\frac{Z_1}{Z_2}\right)^{2}\frac{Z_4}{Z_3}\,\alpha
\label{eq6}
\end{equation}
where $Z_1,Z_2,Z_3,Z_4$ are the renormalization constants of the
fermion Green function, vertex function, Lagrangian of classical
electromagnetic field and three-tail, respectively. Here all these
quantities are calculated in the second order of perturbation theory.
In the suggested theory the ``bare'' constant $\alpha$ is
equal to 1/136 (Eddington formula) where 136 is the dimension of the
dynamical group (the group of automorphisms $Sp^{(*)}(8,{\bf C})$
for the Heisenberg algebra $h_{16}^{(*)}$) in our relativistic
bi-Hamiltonian system, see \cite{2}.
We see formula (\ref{eq6}) essentially differs from the local
theory formula $\tilde\alpha=Z_3^{-1}\alpha$ \cite{7} being a
consequence of the Ward identity $Z_1=Z_2$ (in this theorem
for regularized constants, see \cite{7}, the regularized fermion
self-energy operator $\Sigma(p)$ is assumed to be an analytic
function at point $p^2=0$; but it does not take place in the
suggested theory: it follows from \cite{8} that $\Sigma(p)\sim
\ln p^2$ when $p^2\to 0$) and also the Furry theorem (which does not
take place in the suggested theory too due to the presence of
hidden parameters $Y_\mu$ for bare particles and the absence of them
for bare antiparticles, see further).
\section{Calculation of $Z_1/Z_2$}
In our theory the Ward identity
\begin{displaymath}
\frac{\partial\Sigma(p)}{\partial p_\mu}+\Lambda_\mu(p,0)=0
\end{displaymath}
($\Lambda_\mu$ is the vertex function) is replaced by a more
general identity
\begin{equation}
\frac{\partial\Sigma(p)}{\partial p_\mu}+\Lambda_\mu(p,0)=
\Sigma_\mu(p)
\label{eq7}
\end{equation}
where $\Sigma_\mu(p)$ is the following operator
\begin{displaymath}
\Sigma_\mu(p)=\frac{e^2}{i(2\pi)^4}\int\frac{\gamma_\nu(\hat p-
\hat k+m)\gamma_\nu}{[(p-k)^2-m^2]\,k^2}\,\left[\frac{\partial}
{\partial p_\mu}\rho(p,k)\right]\,d^4k=
\end{displaymath}
\begin{displaymath}
=\frac{ie^2}{(2\pi)^4}
\int^1_0 dz\int_0^\infty\frac{d\sigma}{\sigma^2}\exp[i\frac{p^2}
{2\sigma}-i\sigma(m^2z-p^2z(1-z))]\left[p_\mu(2m-\hat p(1-z))+
\frac{z}{3}(p_\mu\hat p-\gamma_\mu p^2)\right].
\end{displaymath}
To express the quantity $(Z_1/Z_2-1)$ of interest to us in terms
of $\Sigma_\mu(p)$, it is necessary to take the operator on the mass
shell $\bar p=m$ by means of the formula
\begin{displaymath}
\left(\frac{Z_1}{Z_2}\right)\gamma_\mu=\Sigma_\mu(m)=
-\gamma_\mu\frac{e^2}{4\pi^2}m^2\int^1_0 z\,(1+z)\,K_1(m^2z)\,dz
\end{displaymath}
where $K_1$ is the MacDonald function. From here we get
\begin{equation}
\frac{Z_1}{Z_2}=\cases{1-\frac{3\alpha}{2\pi}\ \ \ ,\ m\ll 1;\cr
1-\frac{\alpha}{2m^2}\ ,\ m\gg 1.\cr}
\label{eq8}
\end{equation}
\section{Calculation of $Z_4/Z_3$}
Similarly, another Ward identity
\begin{displaymath}
\frac{\partial\Pi_{\mu\nu}(k)}{\partial k_\sigma}+
\Delta_{\mu\nu\sigma}(k,0)=0
\end{displaymath}
($\Pi_{\mu\nu}$ is the polarization tensor), see \cite{9}, is
replaced by a more general identity
\begin{equation}
\frac{\partial\Pi_{\mu\nu}(k)}{\partial k_\sigma}+
\Delta_{\mu\nu\sigma}(k,0)=\Pi_{\mu\nu\sigma}(k)
\label{eq9}
\end{equation}
where $\Pi_{\mu\nu\sigma}(k)$ is the following expression
\begin{displaymath}
\Pi_{\mu\nu\sigma}^{(1/2)}(k)=\frac{ie^2}{(2\pi)^4}2\int
\frac{2p_\mu p_\nu+2p_\mu k_\nu -\delta_{\mu\nu}(p^2+pk)}
{p^2\,(p+k)^2}\,\left[\frac{\partial}{\partial k_\sigma}
\rho\Bigl((p+k)^2\Bigr)\right]\,d^4p
\end{displaymath}
in the case of Weyl's dissociation, and
\begin{displaymath}
\Pi_{\mu\nu\sigma}^{(0)}(k)=-\frac{ie^2}{(2\pi)^4}4\int
\frac{p_\mu p_\nu+p_\mu k_\nu}
{p^2\,(p+k)^2}\,\left[\frac{\partial}{\partial k_\sigma}
\rho\Bigl((p+k)^2\Bigr)\right]\,d^4p
\end{displaymath}
in the case of Klein-Gordon's dissociation.
Speaking about the electromagnetic wave dissociation we should
explain two points. Firstly calculating $\Pi_{\mu\nu}$ we use quite
another formfactor not (\ref{eq5}), but
\begin{equation}
\rho(p^2)=\int F(Y,p)\,F(Y',p)\,d\mu(Y,Y')=
\frac{\sin p^2}{p^2}
\label{eq10}
\end{equation}
because the Lagrangian $\hat A_\mu\hat{\bar\psi}\gamma_\mu\hat\psi$
all fields of which are quantized does not give any contribution to
the charge renormalization, see \cite{8}. Another Lagrangian, namely
$A_\mu\hat{\bar\psi}\gamma_\mu\hat\psi$ ($A_\mu$ is a classical
field), gives such a contribution. If the wave function of photons
$A_\mu(X,Y)$ has the internal variables $Y_\mu$, then the classical
one (Maxwell field $A_\mu(X)$), as an essential alloy of indefinite
number of photons (light molecule), does not have such variables.
Therefore in the case only internal variables of intermediate
particles (not antiparticles) are paired. This operation leads to
the formfactor (\ref{eq10}).
It is important to note that the bare particles as objects
being created in the transition $f\to\dot f$ have the additional
variables $Y$. The bare antiparticles arised in consequence of
interactions do not have such variables ($T$-asymmetry of 100 per
cent or complete fermion-antifermion asymmetry of the theory,
see \cite{1}). Under these circumstances the well-known Furry
theorem is invalid.
Secondly, the polarization tensor $\Pi_{\mu\nu}$ having a
finite value $\Pi_{\mu\nu}(k)=(k_\mu k_\nu-\delta_{\mu\nu}k^2)
\Pi(k^2)+\delta_{\mu\nu}d(k^2)$ where
\begin{displaymath}
d(k,m)=-\frac{e^2}{4\pi^2}\int^1_{-1}\frac{d\alpha}{2}\int^1_0
dz\int_0^\infty\frac{\sigma\,d\sigma}{(\sigma+\alpha)^2}\,
\left[m^2-\frac{i}{\sigma+\alpha}-k^2\frac{\sigma z}{\sigma+
\alpha}\left(1-\frac{\sigma z}{\sigma+\alpha}\right)\right]\times
\end{displaymath}
\begin{displaymath}
\times\exp\left[-i\sigma m^2+ik^2\sigma z\left(1-
\frac{\sigma z}{\sigma+\alpha}\right)\right]
\end{displaymath}
for both the Dirac and Kemmer-Duffin (or Klein-Gordon)
polarizations (the expression for $\Pi$ is not given here) must be
a gauge-invariant quantity. Therefore we require $d(k)=0$ at least
in the region $k^2=0$. The last condition leads to the equation
\begin{displaymath}
\int_0^\infty\frac{\sin x}{x+m^2}\,dx+m^2\int_0^\infty
\frac{\cos x}{x+m^2}\,dx=\frac{\pi}{2}
\end{displaymath}
which has the only solution $m=0$.
Hence a classical electromagnetic wave may dissociate on
massless particles only. Essentially, in the suggested theory there
are two and only two charged particles with zero bare mass: positron
(in our scheme it is the fundamental fermion with spin 1/2;
electron is antifermion) and $\pi$-meson (quantum of degeneration
fields with spin 0). Therefore we consider only two these cases.
Since $\Pi_{\mu\nu\sigma}$ leads the Lagrangian to the form
of $\Pi_{\mu\nu\sigma}(k)\,A_\mu(k)\,A_\nu(k)\,A_\sigma(0)$ and in
consequence of the Lorentz-gauge $k_\mu A_\mu(k)=k_\nu A_\nu(k)=0$
we should hold only the term $\delta_{\mu\nu}k_\sigma$ in
$\Pi_{\mu\nu\sigma}(k)$. Therefore we write $\Pi_{\mu\nu\sigma}(k)
=\delta_{\mu\nu}k_\sigma I(k)$. Our calculations give
\begin{displaymath}
I^{(1/2)}(k)=\frac{e^2}{4\pi^2}\int^1_{-1}\frac{\alpha\,d\alpha}
{2}\int^1_0 dz\int_0^\infty\frac{\sigma\,d\sigma}{(\sigma+
\alpha)^3}\,\left(\frac{1}{2}-\frac{2\sigma z}{\sigma+\alpha}\right)\,
\exp\left[ik^2\sigma z\left(1-\frac{\sigma z}{\sigma+\alpha}\right)
\right],
\end{displaymath}
\begin{displaymath}
I^{(0)}(k)=-\frac{e^2}{4\pi^2}\int^1_{-1}\frac{\alpha\,d\alpha}
{2}\int^1_0 z\,dz\int_0^\infty\frac{\sigma^2\,d\sigma}{(\sigma+
\alpha)^4}\,\exp\left[ik^2\sigma z\left(1-\frac{\sigma z}
{\sigma+\alpha}\right)\right].
\end{displaymath}
On the mass shell $k^2=0$ we get
\begin{displaymath}
I^{(1/2)}(0)=-\frac{e^2}{48\pi^2}\,,\qquad
I^{(0)}(0)=-\frac{e^2}{24\pi^2}\,.
\end{displaymath}
The quantity ($Z_4/Z_3-1$) of interest to us is determined
by the sum $I^{(1/2)}(0)+I^{(0)}(0)$ and we have
\begin{equation}
\frac{Z_4}{Z_3}=1-\frac{\alpha}{12\pi}-\frac{\alpha}{6\pi}=
1-\frac{\alpha}{4\pi}\ .
\label{eq11}
\end{equation}
\section{The principal result}
Expressions (\ref{eq8}) and (\ref{eq11}) together give
\begin{displaymath}
\left(\frac{Z_2}{Z_1}\right)^{2}\frac{Z_3}{Z_4}=
\left(1+\frac{3\alpha}{\pi}\right)\left(1+\frac{\alpha}{4\pi}\right)=
1+\frac{13\alpha}{4\pi}\ .
\end{displaymath}
From (\ref{eq6}) it follows now
\begin{equation}
\tilde\alpha^{-1}=\alpha^{-1}+\frac{13}{4\pi}\ .
\label{eq12}
\end{equation}
Since in the suggested theory $\alpha^{-1}$ = 136, the
renormalized constant $\tilde\alpha^{-1}$ is $\tilde\alpha^{-1}$
= 136+1.0345 = 137.0345. The modern experimental value of this
constant is 137.0359 \cite{10}. We believe the difference 0.0014
(indeed, 0.00085 only) may be explained by the fourth order
radiative corrections.
| 3,364 | 8,559 |
en
|
train
|
0.165.2
|
\section{The principal result}
Expressions (\ref{eq8}) and (\ref{eq11}) together give
\begin{displaymath}
\left(\frac{Z_2}{Z_1}\right)^{2}\frac{Z_3}{Z_4}=
\left(1+\frac{3\alpha}{\pi}\right)\left(1+\frac{\alpha}{4\pi}\right)=
1+\frac{13\alpha}{4\pi}\ .
\end{displaymath}
From (\ref{eq6}) it follows now
\begin{equation}
\tilde\alpha^{-1}=\alpha^{-1}+\frac{13}{4\pi}\ .
\label{eq12}
\end{equation}
Since in the suggested theory $\alpha^{-1}$ = 136, the
renormalized constant $\tilde\alpha^{-1}$ is $\tilde\alpha^{-1}$
= 136+1.0345 = 137.0345. The modern experimental value of this
constant is 137.0359 \cite{10}. We believe the difference 0.0014
(indeed, 0.00085 only) may be explained by the fourth order
radiative corrections.
\section{Fermion anomalous magnetic moment}
According to the suggested theory, calculations of the vertex
operator in the third order of the perturbation theory lead to
the following formula of the fermion anomalous magnetic moment
\begin{equation}
\Delta\mu=\frac{\alpha}{\pi}\ m^2\int^1_0 z\,(1-z)\,K_1(m^2z)\,dz\ .
\label{eq13}
\end{equation}
a) In the case $m\ll 1$ the formula (\ref{eq13}) gives
Schwinger's result $\frac{\alpha}{2\pi}$ with a correction
\begin{displaymath}
\Delta\mu\simeq\frac{\alpha}{2\pi}\left[1+\frac{m^4}{12}
\left(C-\frac{13}{12}-\ln 2+\ln m^2\right)\right]\ .
\end{displaymath}
The electron has $m=\frac{m_e c}{\kappa h}=5\cdot 10^{-4}$ and the
correction $\frac{\alpha}{2\pi}\,\frac{m^4}{12}\left(C-\frac{13}
{12}-\ln 2+\ln m^2\right)\simeq -9.8\cdot 10^{-17}$ is far beyond
the experimental possibilities of today. The $\mu$-meson has $m_\mu=
0.1$ and the correction is equal $-5.6\cdot 10^{-8}$ within the
bounds of possibility. The correction should be added to the factor
$\left(\frac{g-2}{2}\right)_{\rm theory}=\frac{\alpha}{2\pi}+0.76
\left(\frac{\alpha}{\pi}\right)^2=$0.0011655102 calculted by means
of the local theory. Its experimental value is $\left(\frac{g-2}{2}
\right)_{\rm exper}=$0.001165923 . The difference $\left(\frac{g-2}
{2}\right)_{\rm exper}-\left(\frac{g-2}{2}\right)_{\rm theory}=$
0.000000413 (together with our correction the value is equal
0.000000493) is usually accounted for by influence of the strong
interaction the correct theory of which is known to be wanting as
yet (and all the calculations are not strictly defined). However
there is a correction close to it in magnitude because of
nonlocality (of both electromagnetic and strong interactions),
i.\ e.\ owing to the finite third fundamental constant $\kappa$.
b) For $m\gg 1$ the formula (\ref{eq13}) gives
\begin{displaymath}
\frac{g-2}{2}\simeq\frac{\alpha}{2m^2}\ .
\end{displaymath}
Let us apply it to the $\tau$-meson having $m_\tau=$1.78 and
obtain $\frac{g-2}{2}=$0.001151584 .
\begin{references}
\bibitem{1}
S.~S.~Sannikov and I.~I.~Uvarov, Problems of Nuclear Physics and
Space Rays, No.~31, 76(Kharkov University Press, Kharkov, 1989);
S.~S.~Sannikov, Kiev Report No. ITP-91-72R, 1992.
\bibitem{2}
S.~S.~Sannikov and I.~I.~Uvarov, Izvestiya Vysshikh Uchebnykh
Zavedenii, seriya Fizika, No.~10, 5(1990)(in translation Soviet
Physics Journal).
\bibitem{3}
S.~S.~Sannikov, Dokl.~Akad.~Nauk~SSSR {\bf 172}, 37(1967);
\bibitem{4}
S.~S.~Sannikov and I.~I.~Uvarov, Problems of Nuclear Physics and
Space Rays, No.~32, 31(Kharkov University Press, Kharkov, 1989);
\bibitem{5}
S.~S.~Sannikov, J.Nucl.Phys. {\bf 2}, 570(1965);
Teor.Math.Phys. {\bf 34}, 34(1978);
\bibitem{6}
H.~Yukawa, Phys.Rev. {\bf 77}, 219(1950).
\bibitem{7}
N.~N.~Bogolubov and D.~V.~Shirkov, {\it Introduction to the Theory of
Fields}, 3rd ed. (Wiley, 1980).
\bibitem{8}
S.~S.~Sannikov and A.~A.~Stanislavsky, Izvestiya Vysshikh Uchebnykh
Zavedenii, seriya Fizika, No.~6, 76(1994)(translation in Russian
Physics Journal).
\bibitem{9}
L.~H.~Ryder {\it Quantum Field Theory} (Cambridge University Press,
Cambridge, 1985).
\bibitem{10}
Review of Particles Properties, Phys.Lett. {\bf B239}, (1990).
\end{references}
\end{document}
| 1,638 | 8,559 |
en
|
train
|
0.166.0
|
\begin{document}
\title{
On Some Problems \\
Related to a Simplex and a Ball}
\author{Mikhail Nevskii\footnote{Department of Mathematics,
P.G.~Demidov Yaroslavl State University, Sovetskaya str., 14, Yaroslavl, 150003, Russia
orcid.org/0000-0002-6392-7618
[email protected]}
}
\date{May 5, 2019}
\maketitle
\begin{abstract}
Let $C$ be a convex body and let $S$ be a nondegenerate
simplex in ${\mathbb R}^n$.
Denote by $\xi(C;S)$ the minimal
$\tau>0$ such that $C$ is a subset of the simplex $\tau S$.
By $\alpha(C;S)$ we mean the minimal $\tau>0$ such that
$C$ is contained in a translate of $\tau S$. Earlier the author
has proved the equalities
$\xi(C;S)=(n+1)\max\limits_{1\leq j\leq n+1}
\max\limits_{x\in C}(-\lambda_j(x))+1$ \ (if $C\not\subset S$), \
$\alpha(C;S)=
\sum\limits_{j=1}^{n+1} \max\limits_{x\in C} (-\lambda_j(x))+1.$
Here $\lambda_j$ are linear functions called the
basic Lagrange polynomials corresponding to $S$.
In his previous papers, the author has investigated
these formulae if
$C=[0,1]^n$.
The present paper is related to the case
when
$C$ coincides with the unit Euclidean ball $B_n=\{x: \|x\|\leq 1\},$ where
$\|x\|=\left(\sum\limits_{i=1}^n x_i^2 \right)^{1/2}.$ We establish
various relations for
$\xi(B_n;S)$ and $\alpha(B_n;S)$, as well as we give their
geometric interpretation.
\noindent Keywords:
$n$-dimensional simplex, $n$-dimensional ball, homothety, absorption index
\end{abstract}
\section{Preliminaries}\label{nev_s1}
Everywhere further $n\in{\mathbb N}.$ An element
$x\in{\mathbb R}^n$ is written in the form
$x=(x_1,\ldots,x_n).$
By definition,
$$\|x\|=\sqrt{(x,x)}=\left(\sum\limits_{i=1}^n x_i^2\right)^{1/2},$$
$$B\left(x^{(0)};\varrho\right):=\{x\in{\mathbb R}^n: \|x-x^{(0)}\|\leq \varrho \}
\quad \left(x^{(0)}\in {\mathbb R}^n,
\varrho>0\right),$$ \
$$B_n:=B(0;1), \quad
Q_n:=[0,1]^n, \quad
Q_n^\prime:=[-1,1]^n.$$
Let $C$ be a convex body in ${\mathbb R}^n$.
Denote by $\tau C$ the image of $C$ under the homothety with center of homothety
in the center of gravity of $C$ and ratio of homothety
$\tau.$
For an~$n$-dimensional nondegenerate simplex $S$,
consider the value
$\xi(C;S):=\min \{\sigma\geq 1: C\subset \sigma S\}.$
We call this number the {\it absorption index
of $S$ with respect to $C$.}
Define $\alpha(C;S)$ as minimal $\tau>0$ such that convex body
$C$ is a subset of the simplex $\tau S$.
By ${\rm ver}(G)$ we mean the set of vertices
of convex polytope
$G$.
Let
$x^{(j)}=\left(x_1^{(j)},\ldots,x_n^{(j)}\right),$
$1\leq j\leq n+1,$ be the vertices of simplex $S$.
The matrix
$${\bf A} :=
\left( \begin{array}{cccc}
x_1^{(1)}&\ldots&x_n^{(1)}&1\\
x_1^{(2)}&\ldots&x_n^{(2)}&1\\
\vdots&\vdots&\vdots&\vdots\\
x_1^{(n+1)}&\ldots&x_n^{(n+1)}&1\\
\end{array}
\right)$$
is nondegenerate.
By definition, put
${\bf A}^{-1}$ $=(l_{ij})$.
Linear polynomials
$\lambda_j(x)=
l_{1j}x_1+\ldots+
l_{nj}x_n+l_{n+1,j}$
whose coefficients make up the columns of
${\bf A}^{-1}$
have the property
$\lambda_j\left(x^{(k)}\right)$ $=$
$\delta_j^k$, where $\delta_j^k$ is the Kronecker $\delta$-symbol.
We call $\lambda_j$ the
{\it basic Lagrange polynomials corresponding to
$S$.}
The numbers $\lambda_j(x)$
are barycentric coordinates of a point
$x\in{\mathbb R}^n$ with respect to
$S$. Simpex $S$ is given by the system of linear inequalities
$\lambda_j(x)\geq 0$. For more details about $\lambda_j$,
see [3; Chapter\,1].
The equality $\xi(C;S)=1$ is equivalent to the inclusion
$C\subset S.$ If $C\not\subset S$, then
\begin{equation}\label{ksi_cs_equality}
\xi(C;S)=(n+1)\max_{1\leq j\leq n+1}
\max_{x\in C}(-\lambda_j(x))+1.
\end{equation}
(the proof was given in [2]; see also [3;\S\,1.3]).
The relation
\begin{equation}\label{relation_cs}
\max\limits_{x\in C} \left(-\lambda_1(x)\right)=
\ldots=
\max\limits_{x\in C} \left(-\lambda_{n+1}(x)\right)
\end{equation}
holds true if and only if
the simplex $\xi(C;S)S$
is circumscribed around
convex body $C.$
In the case $C=Q_n$ equality (\ref{ksi_cs_equality}) can be reduced to the form
$$\xi(Q_n;S)=(n+1)\max_{1\leq j\leq n+1}
\max_{x\in {\rm ver}(Q_n)}(-\lambda_j(x))+1 $$
and (\ref{relation_cs}) is equivalent to the relation
\begin{equation}\label{relation_qs}
\max\limits_{x\in {\rm ver}(Q_n)} \left(-\lambda_1(x)\right)=
\ldots=
\max\limits_{x\in {\rm ver}(Q_n)} \left(-\lambda_{n+1}(x)\right).
\end{equation}
For any $C$ and $S$, we have $\xi(C;S)\geq\alpha(C;S)$. The equality
$\xi(C;S)=\alpha(C;S)$ holds only in the case when
the simplex
$\xi(C;S)S$ is circumscribed around
convex body $C.$
This is equivalent to
(\ref{relation_cs}) and also to
(\ref{relation_qs}) when $C=Q_n$.
It was proved in [4] (see also
[3; \S\,1.4])
that
\begin{equation}\label{alpha_cs_equality}
\alpha(C;S)=
\sum_{j=1}^{n+1} \max_{x\in C} (-\lambda_j(x))+1.
\end{equation}
If $C=Q_n$, then this formula
can be written in rather more geometric way:
\begin{equation}\label{alpha_d_i_formula}
\alpha(Q_n;S)
=\sum_{i=1}^n\frac{1}{d_i(S)}.
\end{equation}
Here $d_i(S)$ is {\it the $i$th axial diameter of simplex $S$,} i.\,e.,
the length of a longest segment in $S$
parallel to the $i$th
coordinate axis. Equality (\ref{alpha_d_i_formula})
was obtained in [11].
When $S\subset Q_n,$ we have $d_i(S)\leq 1.$ Therefore, for these simplices,
(\ref{alpha_d_i_formula}) gives
\begin{equation}\label{ksi_alpha_n_ineq}
\xi(Q_n;S)\geq\alpha(Q_n;S)
=\sum_{i=1}^n\frac{1}{d_i(S)}\geq n.
\end{equation}
Earlier the author established the equality
\begin{equation}\label{d_i_l_ij_formula}
\frac{1}{d_i(S)}=\frac{1}{2}\sum_{j=1}^{n+1} |l_{ij}|
\end{equation}
(see [2]).
Being combined together, (\ref{alpha_d_i_formula}) and (\ref{d_i_l_ij_formula}) yield
\begin{equation}\label{alpha_qs_formula}
\alpha(Q_n;S)=\frac{1}{2}\sum_{i=1}^n\sum_{j=1}^{n+1} |l_{ij}|.
\end{equation}
Note that $\alpha(C;S)$ is invariant under parallel translation of the sets
and for $\tau>0$ we have
$\alpha(\tau C;S)=\tau\alpha(C;S).$ Since
$Q_n^\prime=[-1,1]^n$
is a translate of the cube $2Q_n$,
after replacing $Q_n$ with
$Q_n^\prime$ we obtain from (\ref{alpha_qs_formula})
an even simpler formula:
\begin{equation}\label{alpha_q_prime_s_formula}
\alpha(Q_n^\prime;S)=\sum_{i=1}^n\sum_{j=1}^{n+1} |l_{ij}|.
\end{equation}
Let us define the value
$$\xi_n:=\min \{ \xi(Q_n;S): \,
S \mbox{ --- $n$-мерный симплекс,} \,
S\subset Q_n, \, {\rm vol}(S)\ne 0\}.$$
Various estimates of $\xi_n$ were obtained first by the author and then
by the author and A.\,Yu.~Ukhalov
(e.\,g., see papers [1], [2], [5], [6], [7], [8], [12]
and book
[3]).
Always $n\leq \xi_n<n+1$.
Nowaday the precise values of $\xi_n$
are known for $n=2,5,9$ and also for the infinite set of odd $n$'s
for any of which there exists an Hadamard matrix of order $n+1$.
If $n\ne 2$, then every known value of
$\xi_n$ is equal to $n$, whereas $\xi_2=1+\frac{3\sqrt{5}}{5}=2.34\ldots$
Still remains unknown
is there exist
an even $n$ with the property
$\xi_n=n$.
There are some other open problems concerning the numbers
$\xi_n$.
In this article, we will discuss
the analogues of the above characteristics
for a simplex and an Euclidean ball.
Replacing a cube with a ball makes many
questions much more simpler. However, geometric interpretation of general results
has a certain interest also in this particular case.
Besides, we will note some new applications of the basic Lagrange polynomials.
Numerical characteristics connecting
simplices and subsets of ${\mathbb R}^n$
have applications for obtaining various estimates
in polynomial interpolation of functions defined
on mul\-ti\-dimen\-sional domains. This approach and the corresponding analytic
methods
in detailes
were described in
[3]. Lately these questions have been managed to study
also by computer methods (see, e.\,g.,
[5], [6], [8], [12]).
| 3,161 | 14,143 |
en
|
train
|
0.166.1
|
\section{The value $\alpha(B_n;S)$}\label{nev_s2}
The {\it inradius of an $n$-dimensional simplex $S$} is the
maximum of the radii of balls contained within $S$.
The center of this unique maximum ball is called the {\it incenter of $S$.}
The boundary of the maximum ball is a sphere that has a single common point
with each $(n-1)$-dimensional face of $S$. By the {\it circumradius of S}
we mean the minimum of the radii of balls containing $S$.
The boundary of this unique minimal ball does not necessarily contain all the
vertices of $S$. Namely, this is only when the center of the minimal ball
lies inside the simplex.
The inradius $r$ and the circumradius $R$ of a simplex $S$
satisfy the so-called {\it Euler inequality}
\begin{equation}\label{euler_ineq}
R\geq nr.
\end{equation}
Equality in
(\ref{euler_ineq}) takes place if and only if
$S$ is a regular simplex.
Concerning the proofs of the Euler inequality, its history and generalizations,
see, e.\,g., [10], [13], [14].
In connection with
(\ref{euler_ineq}), let us remark an analogue to the following property being
true for parallelotopes
(see [11], [3; \S\,1.8]).
{\it Let $S$ be a nondegenerate simplex and let
$D,$ $D^*$~be parallelotopes in ${\mathbb R}^n.$ Suppose
$D^*$ is a homothetic copy of $D$
with ratio $\tau>1.$
If
$D\subset S \subset D^*,$
then $\tau\geq n.$
}
This proposition holds true also for balls.
In fact, the Euler inequality is equivalent to
the following statement.
{\it Suppose $B$ is a ball with radius $r_1$ and
$B^*$ is a ball with radius $r_2$. If
$B\subset S\subset B^*$, then $r_1\leq nr_2.$
Equality takes place if and only if $S$ is a regular simplex
inscribed into $B^*$ and
$B$ is the ball inscribed into $S$.} Another
equivalent form of these propositions is given by
Theorem 2
(see the note after the proof of this theorem).
Let $x^{(1)},$ $\ldots,$
$x^{(n+1)}$ be the vertices and let $\lambda_1,$ $\ldots,$
$\lambda_{n+1}$ be the basic Lagrange polynomials of an nondegenerate simplex
$S\subset {\mathbb R}^n$ (see Section 1).
In what follows $\Gamma_j$ is the $(n-1)$-dimensional hyperplane given
by the equation $\lambda_j(x)=0$, by
$\Sigma_j$ we mean the $(n-1)$-dimensional face of $S$ contained
in $\Gamma_j$,
symbol $h_j$ denotes the height of $S$ conducted from the vertex $x^{(j)}$
onto~$\Gamma_j$,
and $r$ denotes the inradius of $S$. Define
$\sigma_j$ as $(n-1)$-measure of $\Sigma_j$ and put
$\sigma:=\sum\limits_{j=1}^{n+1} \sigma_j$.
Consider the vector $a_j:=\{l_{1j},\ldots,l_{nj}\}$. This vector is orthogonal
to $\Gamma_j$ and directed into the subspace containing
$x^{(j)}$. Obviously,
$$\lambda_j(x)=
l_{1j}x_1+\ldots+
l_{nj}x_n+l_{n+1,j}=(a_j,x)+l_{n+1,j}=(a_j,x)+\lambda_j(0).$$
{\bf Theorem 1.}
{\it The following equalities are true:
\begin{equation}\label{alpha_bs_sum_l_ij_equality}
\alpha(B_n;S)=
\sum_{j=1}^{n+1}\left(\sum_{i=1}^n l_{ij}^2\right)^{1/2},
\end{equation}
\begin{equation}\label{alpha_bs_h_j_equality}
\alpha(B_n;S)=\sum_{j=1}^{n+1}\frac{1}{h_j},
\end{equation}
\begin{equation}\label{alpha_bs_1_r_equality}
\alpha(B_n;S)= \frac{1}{r},
\end{equation}
\begin{equation}\label{alpha_bs_sigma_nV}
\alpha(B_n;S)=\frac{\sigma}{n{\rm vol}(S)}.
\end{equation}
}
{\it Proof.} Let us obtain these pairwise-equivalent equalities from the top up to
the bottom.
First we note that
\begin{equation}\label{alpha_bs_equality}
\alpha(B_n;S)=
\sum_{j=1}^{n+1} \max_{x\in B} (-\lambda_j(x))+1.
\end{equation}
Formula (\ref{alpha_bs_equality})
is the particular case of (\ref{alpha_cs_equality}) in the situation $C=B_n$.
By the Cauchy inequality,
\begin{equation}\label{cauchy_ineq}
-\|a_j\|\|x\|\leq (a_j,x)\leq
\|a_j\|\|x\|,
\end{equation}
$$-\|a_j\|\|x\|-\lambda_j(0)\leq -\lambda_j(x)\leq
\|a_j\|\|x\|-\lambda_j(0).$$
Both the upper and the lower bounds in
(\ref{cauchy_ineq}) are reachable. This gives
$$\max_{x\in B_n} (-\lambda_j(x))=
\max_{\|x\|\leq 1} (-\lambda_j(x))=
\|a_j\|-\lambda_j(0).$$
Therefore,
$$\alpha(B_n;S)=
\sum_{j=1}^{n+1} \max_{x\in B_n} (-\lambda_j(x))+1=
\sum_{j=1}^{n+1}\|a_j\|-\sum_{j=1}^{n+1}\lambda_j(0)+1=
\sum_{j=1}^{n+1}\left(\sum_{i=1}^n l_{ij}^2\right)^{1/2}.
$$
We made use of the equality
$\sum\limits_{j=1}^{n+1}\lambda_j(0)=1.$
Since $\lambda_j\left(x^{(j)}\right)=1$, we have
$$h_j={\rm dist}\left(x^{(j)};\Gamma_j\right)=
\frac{\left|\lambda_j\left(x^{(j)}\right)\right|}{\|a_j\|}=
\frac{1}{\|a_j\|}=\frac{1}{\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2}}.$$
Consequently,
$$\alpha(B_n;S)=
\sum_{j=1}^{n+1}\left(\sum_{i=1}^n l_{ij}^2\right)^{1/2}
=\sum_{j=1}^{n+1}\frac{1}{h_j}.$$
We have obtained both (\ref{alpha_bs_sum_l_ij_equality})
and (\ref{alpha_bs_h_j_equality}).
Let us prove
(\ref{alpha_bs_1_r_equality}).
The ball $B_n$ is a subset of a tranlate
of the simplex $\alpha(B_n;S)S$. This means that a translate of the ball
$\frac{1}{\alpha(B_n;S)}B_n$
is contained in $S$. Since the maximum of the radii of balls being contained
in $S$ is equal to $r$, holds true
$\frac{1}{\alpha(B_n;S)}\leq r,$ i.\,e.,
$\alpha(B_n;S)\geq \frac{1}{r}$.
To obtaine the inverse inequality, denote by
$B^\prime$ a ball of radius $r$ inscribed into $S$. Then the ball
$B_n=\frac{1}{r}B^\prime$
is a subset of some translate of
$\frac{1}{r}S$.
Using the definition of $\alpha(B_n;S)$ we can write
$\alpha(B_n;S)\leq \frac{1}{r}$.
So, we have $\alpha(B_n;S)=\frac{1}{r}$.
Finally, in order to establish
(\ref{alpha_bs_sigma_nV}), it is sufficient to
utilize (\ref{alpha_bs_1_r_equality})
and the formula ${\rm vol}(S)=\frac{1}{n}\sigma r$. The latter equality
one can obtain from an ordinary formula for the volume of a simplex
after subdividing $S$ onto $n+1$ simplices in such a way that $j$th of these simplices
has a vertex in the center of the inscribed ball and is supported on $\Sigma_j$.
$\Box$
{\bf Corollary 1.}
{\it We have
$$\frac{1}{r}=\sum_{j=1}^{n+1}\frac{1}{h_j}.$$
}
For proving, it is sufficient to apply
(\ref{alpha_bs_h_j_equality}) and
(\ref{alpha_bs_1_r_equality}).
It seems to be interesting
that this geometric relation (which evidently can be obtained also
in a direct way) occurs to be equivalent to general formula for
$\alpha(C;S)$ in the particular
case when a conveх body $C$ coincide with an Euclidean unit ball.
{\bf Corollary 2.}
{\it The inradius $r$ and the incenter $z$ of a simplex $S$ can be calculated
by the following formulae:
\begin{equation}\label{r_formula}
r=\frac{1}{ \sum\limits_{j=1}^{n+1}\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2}},
\end{equation}
\begin{equation}\label{z_formula}
z=\frac{1}{ \sum\limits_{j=1}^{n+1}\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2}}
\sum\limits_{j=1}^{n+1}\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2} x^{(j)}.
\end{equation}
The tangent point of the ball $B(z;r)$ and facet
$\Sigma_k$ has the form
\begin{equation}\label{y_k_formula}
y^{(k)}=\frac{1}{ \sum\limits_{j=1}^{n+1}\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2}}
\left[\sum\limits_{j=1}^{n+1}\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2} x^{(j)}
-\frac{1}{\left(\sum\limits_{i=1}^n l_{ik}^2\right)^{1/2}} \left(l_{1k},\ldots,l_{nk}\right)
\right].
\end{equation}
}
{\it Proof.} Equality
(\ref{r_formula}) follows immediately from
(\ref{alpha_bs_sum_l_ij_equality}) and
(\ref{alpha_bs_1_r_equality}). To obtain
(\ref{z_formula}), let us remark that
$$r=
{\rm dist}(z;\Gamma_j)=
\frac{|\lambda_j(z)|}{\|a_j\|}.$$
Since $z$ lies inside $S$, each barycentric coordinate of this point
$\lambda_j(z)$ is positive, i.\,e.,
$\lambda_j(z)=r\|a_j\|.$
Consequently,
$$z=\sum_{j=1}^{n+1}\lambda_j(z)x^{(j)}=
r\sum_{j=1}^{n+1} \|a_j\| x^{(j)}.$$
This coincides with (\ref{z_formula}).
Finally, since
vector $a_k=\{l_{1k},\ldots,l_{nk}\}$
is orthogonal to
$\Sigma_k$ and is directed
from this facet inside the simplex, a unique common point of
$B(z;r)$ and $\Sigma_k$ has the form
$$y^{(k)}=z-\frac{r}{\|a_k\|}a_k=r\left( \sum_{j=1}^{n+1} \|a_j\| x^{(j)}-\frac{1}{\|a_k\|}
a_k\right).$$
The latter is equivalent to (\ref{y_k_formula}).
$\Box$
It is interesting to compare
(\ref{alpha_bs_sum_l_ij_equality}) with the formula
(\ref{alpha_q_prime_s_formula}) for $\alpha(Q_n^\prime;S)$. Since $B_n$ is
a subset of the cube
$Q_n^\prime=[-1,1]^n$, we have $\alpha(B_n;S)\leq \alpha(Q_n^\prime;S)$.
Analytically, this also follows from the estimate
$$\left(\sum_{i=1}^n l_{ij}^2\right)^{1/2}\leq
\sum_{i=1}^n |l_{ij}|.$$
For arbitrary $x^{(0)}$ and $\varrho>0$, the number
$\alpha\left(B(x^{(0)};\varrho);S\right)$ can be calculated
with the use of Theorem 1
and the equality
$\alpha(B(x^{(0)};\varrho);S)$ $=$ $\varrho\alpha(B_n;S)$.
If $S\subset Q_n$, then all the axial diameters
$d_i(S)$ do not exceed $1$ and
(\ref{alpha_d_i_formula}) immediately gives $\alpha(Q_n;S)\geq n$. Moreover,
the equality $\alpha(Q_n;S)=n$ holds when and only when each
$d_i(S)=1$.
The following proposition
expresses the analogues of these properties for simplices contained
in a ball.
{\bf Theorem 2.}
{\it If $S\subset B_n$, then $\alpha(B_n;S)\geq n.$ The equality
$\alpha(B_n;S)=n$ holds true if and only if
$S$ is a regular simplex inscribed into $B_n$.
}
{\it Proof.}
By the definition of $\alpha(B_n;S)$, the ball $B_n$
is contained in a translate of the simplex
$\alpha(B_n;S)S$. Hence, some translate
$B^\prime$ of the ball
$\frac{1}{\alpha(B_n;S)}B_n$ is a subset of
$S$. So, we have the inclusions $B^\prime\subset S\subset B_n$. Since the radius of
$B^\prime$ is equal to
$\frac{1}{\alpha(B_n;S)}$, the inradius
$r$ and the circumradius $R$ of $S$ satisfy the inequalities
$\frac{1}{\alpha(B_n;S)}\leq r,$ $R\leq 1$.
Making use of the Euler inequality
$R\geq nr$, we can write
\begin{equation}\label{theor2_ineqs}
\frac{1}{\alpha(B_n;S)}\leq r\leq \frac{R}{n}\leq \frac{1}{n}.
\end{equation}
Therefore, $\alpha(B_n;S)\geq n.$
| 3,959 | 14,143 |
en
|
train
|
0.166.2
|
{\bf Corollary 2.}
{\it The inradius $r$ and the incenter $z$ of a simplex $S$ can be calculated
by the following formulae:
\begin{equation}\label{r_formula}
r=\frac{1}{ \sum\limits_{j=1}^{n+1}\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2}},
\end{equation}
\begin{equation}\label{z_formula}
z=\frac{1}{ \sum\limits_{j=1}^{n+1}\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2}}
\sum\limits_{j=1}^{n+1}\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2} x^{(j)}.
\end{equation}
The tangent point of the ball $B(z;r)$ and facet
$\Sigma_k$ has the form
\begin{equation}\label{y_k_formula}
y^{(k)}=\frac{1}{ \sum\limits_{j=1}^{n+1}\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2}}
\left[\sum\limits_{j=1}^{n+1}\left(\sum\limits_{i=1}^n l_{ij}^2\right)^{1/2} x^{(j)}
-\frac{1}{\left(\sum\limits_{i=1}^n l_{ik}^2\right)^{1/2}} \left(l_{1k},\ldots,l_{nk}\right)
\right].
\end{equation}
}
{\it Proof.} Equality
(\ref{r_formula}) follows immediately from
(\ref{alpha_bs_sum_l_ij_equality}) and
(\ref{alpha_bs_1_r_equality}). To obtain
(\ref{z_formula}), let us remark that
$$r=
{\rm dist}(z;\Gamma_j)=
\frac{|\lambda_j(z)|}{\|a_j\|}.$$
Since $z$ lies inside $S$, each barycentric coordinate of this point
$\lambda_j(z)$ is positive, i.\,e.,
$\lambda_j(z)=r\|a_j\|.$
Consequently,
$$z=\sum_{j=1}^{n+1}\lambda_j(z)x^{(j)}=
r\sum_{j=1}^{n+1} \|a_j\| x^{(j)}.$$
This coincides with (\ref{z_formula}).
Finally, since
vector $a_k=\{l_{1k},\ldots,l_{nk}\}$
is orthogonal to
$\Sigma_k$ and is directed
from this facet inside the simplex, a unique common point of
$B(z;r)$ and $\Sigma_k$ has the form
$$y^{(k)}=z-\frac{r}{\|a_k\|}a_k=r\left( \sum_{j=1}^{n+1} \|a_j\| x^{(j)}-\frac{1}{\|a_k\|}
a_k\right).$$
The latter is equivalent to (\ref{y_k_formula}).
$\Box$
It is interesting to compare
(\ref{alpha_bs_sum_l_ij_equality}) with the formula
(\ref{alpha_q_prime_s_formula}) for $\alpha(Q_n^\prime;S)$. Since $B_n$ is
a subset of the cube
$Q_n^\prime=[-1,1]^n$, we have $\alpha(B_n;S)\leq \alpha(Q_n^\prime;S)$.
Analytically, this also follows from the estimate
$$\left(\sum_{i=1}^n l_{ij}^2\right)^{1/2}\leq
\sum_{i=1}^n |l_{ij}|.$$
For arbitrary $x^{(0)}$ and $\varrho>0$, the number
$\alpha\left(B(x^{(0)};\varrho);S\right)$ can be calculated
with the use of Theorem 1
and the equality
$\alpha(B(x^{(0)};\varrho);S)$ $=$ $\varrho\alpha(B_n;S)$.
If $S\subset Q_n$, then all the axial diameters
$d_i(S)$ do not exceed $1$ and
(\ref{alpha_d_i_formula}) immediately gives $\alpha(Q_n;S)\geq n$. Moreover,
the equality $\alpha(Q_n;S)=n$ holds when and only when each
$d_i(S)=1$.
The following proposition
expresses the analogues of these properties for simplices contained
in a ball.
{\bf Theorem 2.}
{\it If $S\subset B_n$, then $\alpha(B_n;S)\geq n.$ The equality
$\alpha(B_n;S)=n$ holds true if and only if
$S$ is a regular simplex inscribed into $B_n$.
}
{\it Proof.}
By the definition of $\alpha(B_n;S)$, the ball $B_n$
is contained in a translate of the simplex
$\alpha(B_n;S)S$. Hence, some translate
$B^\prime$ of the ball
$\frac{1}{\alpha(B_n;S)}B_n$ is a subset of
$S$. So, we have the inclusions $B^\prime\subset S\subset B_n$. Since the radius of
$B^\prime$ is equal to
$\frac{1}{\alpha(B_n;S)}$, the inradius
$r$ and the circumradius $R$ of $S$ satisfy the inequalities
$\frac{1}{\alpha(B_n;S)}\leq r,$ $R\leq 1$.
Making use of the Euler inequality
$R\geq nr$, we can write
\begin{equation}\label{theor2_ineqs}
\frac{1}{\alpha(B_n;S)}\leq r\leq \frac{R}{n}\leq \frac{1}{n}.
\end{equation}
Therefore, $\alpha(B_n;S)\geq n.$
The equality $\alpha(B_n;S)=n$ means that the left-hand value in
(\ref{theor2_ineqs}) coincides with the right-hand one. Thus,
all the inequalities in this chain turn into equalities. We obtain
$R=1,$ $r=\frac{1}{n}$.
Since in this case the Euler inequality
(\ref{euler_ineq})
also becomes an equality,
$S$ is a regular simplex inscribed into $B_n$.
Conversely, if $S$ is a regular simplex inscribed
into $B_n$, then
$r=\frac{1}{n}$, i.\,e., $\alpha(B_n;S)=\frac{1}{r}=n$.
$\Box$
We see that Theorem 2 follows from the Euler inequality
(\ref{euler_ineq}). In fact, these statements are equivalent.
Indeed, suppose $S$ is an arbitrary $n$-dimensional simple,
$r$ is the inradius and $R$ is the circumradius of $S$.
Let us denote by $B$ the ball containing $S$ and having radius $R$.
Then some translate $S^\prime$ of the simplex $\frac{1}{R}S$
is contained in $B_n$.
By Theorem~1,
$\alpha(B_n;S^\prime)$ is the inverse to the inradius
of $S^\prime$, i.\,e., is equal to $\frac{R}{r}.$
Now assume that Theorem~2 is true.
Let us apply this theorem to the simplex
$S^\prime\subset B_n$. This gives
$\alpha(B_n;S^\prime)=\frac{R}{r}\geq n$
and we have (\ref{euler_ineq}). Finally, if $R=nr,$ then
$\alpha(B_n;S^\prime)=n$. From Theorem 2 we obtain that both
$S^\prime$ and
$S$ are regular simplices.
It follows from
(\ref{ksi_alpha_n_ineq}) that the minimum value of
$\alpha(Q_n;S)$ for $S\subset Q_n$ also is equal to
$n$.
This minimal value corresponds to those and only those
$S\subset Q_n$ for which every axial diameter
$d_i(S)$ is equal to $1$.
The noted property is fulfilled for
the maximum volume simplices in $Q_n$
(see [3]), but not for the only these simplices,
if $n>2$.
| 2,112 | 14,143 |
en
|
train
|
0.166.3
|
\section{The value $\xi(B_n;S)$}\label{nev_s3}
In this section, we will obtain the computational
formula for
the absorption index
of a simplex $S$ with respect to an Euclidean ball.
We use the previous denotations.
{\bf Theorem 3.}
{\it Suppose $S$ is a nondegenerate simplex in
${\mathbb R}^n$, $x^{(0)}\in {\mathbb R}^n$,
$\varrho>0$.
If $B\left(x^{(0)};\varrho\right)
\not\subset S$, we have
\begin{equation}\label{ksi_b_x0_ro_s_l_ij_equality}
\xi\left(B\left(x^{(0)};\varrho\right);S\right)=
(n+1)\max_{1\leq j\leq n+1}
\left[\varrho\left(\sum_{i=1}^n l_{ij}^2\right)^{1/2}-
\sum_{i=1}^n l_{ij}x_i^{(0)}-l_{n+1,j}\right]+1.
\end{equation}
In particular, if
$B_n\not\subset S$, then
\begin{equation}\label{ksi_bs_l_ij_equality}
\xi(B_n;S)=
(n+1)\max_{1\leq j\leq n+1}\left[\left(\sum_{i=1}^n l_{ij}^2\right)^{1/2}
-l_{n+1,j}\right]+1.
\end{equation}
}
{\it Proof.}
Let us apply the general formula
(\ref{ksi_cs_equality})
in the case $C=B\left(x^{(0)};\varrho\right)$.
The Cauchy inequality yields
\begin{equation}\label{cauchy_for_ksi_ineq}
-\|a_j\|\|x-x^{(0)}\|\leq (a_j,x-x^{(0)})\leq
\|a_j\|\|x-x^{(0)}\|.
\end{equation}
If $\|x-x^{(0)}\|\leq \varrho$, we see that
$$
-\varrho\|a_j\|\leq
(a_j,x)-(a_j,x^{(0)})
\leq \varrho\|a_j\|,
$$
$$-\lambda_j(x)=-(a_j,x)-l_{n+1,j}\leq
\varrho\|a_j\|-(a_j,x^{(0)})-l_{n+1,j}.$$
Since both the upper and the lower bounds in
(\ref{cauchy_for_ksi_ineq}) are reachable,
$$\max_{\|x-x^{(0)}\|\leq \varrho} (-\lambda_(x))=
\varrho\left(\sum_{i=1}^n l_{ij}^2\right)^{1/2}-
\sum_{i=1}^n l_{ij}x_i^{(0)}-l_{n+1,j}.$$
It follows that
$$\xi\left(B\left(x^{(0)};\varrho\right);S\right)=
(n+1)\max_{1\leq j\leq n+1, \|x-x^{(0)}\|\leq \varrho} (-\lambda_j(x))+1=$$
$$=(n+1)\max_{1\leq j\leq n+1}
\left[\varrho\left(\sum_{i=1}^n l_{ij}^2\right)^{1/2}-
\sum_{i=1}^n l_{ij}x_i^{(0)}-l_{n+1,j}\right]+1,$$
and we obtain
(\ref{ksi_b_x0_ro_s_l_ij_equality}). Equality
(\ref{ksi_bs_l_ij_equality}) appears from
(\ref{ksi_b_x0_ro_s_l_ij_equality}) for $x^{(0)}=0, \varrho=1$.
$\Box$
| 1,005 | 14,143 |
en
|
train
|
0.166.4
|
\section{The equality $\beta_n=n$. Commentaries}\label{nev_s4}
{\bf Theorem 4.}
{\it
If $S\subset B_n$, then $\xi(B_n;S)\geq n.$ The equality
$\xi(B_n;S)=n$ takes place if and only if
$S$ is a regular simplex inscribed into $B_n$.
}
{\it Proof.} The statement immediately follows from Theorem 2
and the inequality
$\xi(B_n;S)\geq \alpha(B_n;S)$. We give
here also a direct proof without applying the Euler inequality
that was used to obtain the estimate
$\alpha(B_n;S)\geq n$.
First let $S$ be a regular simplex inscribed into $ B_n $.
Then $\alpha(B_n;S)=n$ and the inradius of
$S$ is equal to $\frac{1}{n}$.
Since the simplex $\xi(B_n;S)S$ is circumscribed around $B_n$, we have
the equalities
$\xi(S;B_n)=\alpha(S;B_n)=n$ and also
relation (\ref{relation_cs}) with $C=B_n$.
It follows from (\ref{ksi_cs_equality}) that for any
$j=1,\ldots,n+1$
$$\max_{x\in B_n} (-\lambda_j(x))=\frac{n-1}{n+1},$$
where $\lambda_j$ are the basic Lagrange polynomials
related to $S$.
Now suppose simplex $S$ is contained in $B_n$ but is not regular or is not
inscribed into the ball.
Denote the Lagrange polynomials of this simplex by
$\mu_j$. There exist a regular simplex
$S^*$ inscribed into $B_n$ and an integer $k$ such that
$S$ is contained in the strip $0\leq\lambda_k(x)\leq 1$, the $k$th $(n-1)$-dimensional
faces of $S$ and $S^*$ are parallel, and $S$ has not any common points with
at least one of the boundary hyperplanes of this strip.
Here $\lambda_j$ are the basic Lagrange polynomials of $S^*$.
The vertex $x^{(k)}$ of the simplex $S^*$ does not lie in its
$k$th facet.
Assume $u$ is a point of the boundary of $B_n$
most distant from $x^{(k)}$. Then
$u$ is the maximum point of polynomial $-\lambda_k(x)$, i.\,e.,
$- \lambda_j(u)=\frac{n-1}{n+1}$.
Consider the straight line connecting
$x^{(k)}$ and $u$.
Denote by $y,z$ and $t$ the inersection points
of this line and pairwize parallel hyperplanes
$\mu_k(x)=1,$ $\mu_k=0$ and $\lambda_k(x)=0$ respectively.
We have
\begin{equation}\label{ineqs_one_strong}
\|x^{(k)}-t\|\geq \|y-z\|, \quad \|t-u\|\leq \|z-u\|.
\end{equation}
At least one of these inequalities is fulfilled
in the strict form.
The linearity of the basic Lagrange polynomials means that
$$\frac{\mu_k(z)-\mu_k(u)}{\mu_k(y)-\mu_k(z)}=
\frac{\|z-u\|}{\|y-z\|}, \quad
\frac{\lambda_k(t)-\lambda_k(u)}{\lambda_k\left(x^{(k)}\right)-\lambda_k(t)}=
\frac{\|t-u\|}{\left\|x^{(k)}-t\right\|}.$$
Since $\mu_k(y)=1,$ $\mu_k(z)=0,$ $\lambda_k\left(x^{(k)}\right)=1,$ and $\lambda_k(t)=0$,
we get
$$-\mu_k(u)= \frac{\|z-u\|}{\|y-z\|} >
\frac{\|t-u\|}{\left\|x^{(k)}-t\right\|}=-\lambda_k(u)=\frac{n-1}{n+1}.$$
We made use of (\ref{ineqs_one_strong}) and took into account that
at least one of the inequalities is strict.
The application of (\ref{ksi_cs_equality}) yields
$$\xi(B_n;S)=(n+1)\max_{1\leq j\leq n+1}
\max_{x\in B_n}(-\mu_j(x))+1\geq (n+1)(-\mu_k(u))+1>n.$$
Thus, if $S$ is not regular simplex inscribed into
$B_n$, then
$\xi(B_n;S)>n$.
We see that each simplex $S\subset B_n$ satisfies the estimate
$\xi(B_n;S)\geq n$. The equality takes place if and only if
$S$ is a regular simplex inscribed into $B_n$.
$\Box$
By analogy with the value
$\xi_n=\min\{\xi(Q_n;S): S\subset Q_n\}$
defined through the unit cube, let us
introduce the similar numerical characteristic given by the unit ball:
$$\beta_n:=\min \{ \xi(B_n;S): \,
S \mbox{ --- $n$-мерный симплекс,} \,
S\subset B_n, \, {\rm vol}(S)\ne 0\}.$$
Many problems concerning $\xi_n$ yet have not been solved.
For example, $\xi_2 = 1 + \frac{3\sqrt{5}}{5}$ still remains
the only accurate value of
$\xi_n $ for even $n$; moreover,
this value was discovered in a rather difficult way
(see [3; Chapter\,2]).
Compared to $\xi_n$
the problem on numbers $\beta_n $ turns out to be trivial.
{\bf Corollary 3.}
{\it For any $n$, we have $\beta_n=n$.
The only simplex $S\subset B_n$ extremal with respect to $\beta_n$
is an arbitrary regular simplex inscribed into $B_n$.}
{\it Proof.} It is sufficient to apply Theorem 4.
$\Box$
The technique developed for a ball makes it possible
to illustrate some results
having been earlier got for a cube.
Here we note a proof of the following known statement
which differs from the proofs given in
[3; \S\,3.2] and [12].
{\bf Corollary 4.}
{\it If
there exists an Hadamard matrix
of order $n+1$, then $\xi_n=n.$
}
{\it Proof.} It is known (see, e.\,g., [9])
that for these and only these~$n$ we can inscribe into $Q_n$ a regular simplex $S$
so that all the vertices of $S$ will coincide with vertices of
the cube. Let us denote by $B$ the ball with radius $\frac{\sqrt{n}}{2}$
having the center in center
of the cube.
Clearly, $Q_n$ is inscribed into $B$, therefore,
the simplex is inscribed into the ball as well.
Since $S$ is regular, by Theorem 4 and by similarity reasons,
we have
$\xi(B;S)=n.$
The inclusion $Q_n\subset B$ means that
$\xi(Q_n;S)\leq \xi(B;S),$ i.\,e.~$\xi(Q_n;S)\leq n$.
From (\ref{ksi_alpha_n_ineq}) it follows that the inverse inequality
$\xi(Q_n;S)\geq n$ is also true. Hence,
$\xi(Q_n;S)=n$. Simultaneously
(\ref{ksi_alpha_n_ineq}) gives
$\xi_n=\xi(Q_n;S)=n$.
$\Box$
This argument is based on the following fact:
if $S$ is a regular simplex with the vertices in vertices of
$Q_n$, then the simplex $nS$ absorbs not only the cube $Q_n$ but also
the ball $B$ circumscribed around the cube.
The corresponding absorption index $n$ is the minimum possible
both for the cube and the ball. In addition, we mention the following
property.
{\bf Corollary 5.}
{\it Assume that $S\subset Q_n\subset nS$
and simplex $S$ is not regular. Then
$B\not\subset nS$.
}
{\it Proof.} The inclusion $B\subset nS$ implies that $\xi(B;S)=n$. This
way $S$ is a regular simplex inscribed into the ball
$B$. But since this is not so,
$B$ is not a subset of $nS$.
$\Box$
Simplices satisfying the condition of Corollary 5
exist at least for
$n=3, 5,$ and $9$ (see [12]).
The relations (\ref{ksi_alpha_n_ineq}) mean that always
$\xi_n\geq n$. Since $\xi_2=1+\frac{3\sqrt{5}}{5}>2$,
there exist $n$'s such that
$\xi_n>n$.
Besides the cases when $n+1$ is an Hadamard number,
the equality $\xi_n=n$ is established for $n=5$ and $n=9$
(the extremal simplices in
${\mathbb R}^5$ and ${\mathbb R}^9$ are given in
[12]).
For all such dimensions holds true
$\xi_n=\beta_n$, i.\,e., with respect to the minimum
absorption index of an internal simplex,
both the convex bodies, an $n$-dimensional cube and
an $n$-dimensional ball, have the same behavoir.
The equality $\xi_n=n$ is equivalent to the existence
of simplices satisfying the inclusions $S\subset Q_n\subset nS$.
Some properties of such simplices (e.\,g., the fact that the center of gravity of
$S$ coincides with the center of the cube; see [7])
are similar to the properties of regular simplices
inscribed into the ball.
However,
the problem to describe the set of all dimensions
where exist those simplices, seems to be very difficult
and nowaday is far from solution.
\centerline{\bf\Large References}
\begin{itemize}
\item[1.]
Nevskij,~M.\,V., On a certain relation for the minimal norm of an interpolational
projection,
{\it Model. Anal. Inform. Sist.}, 2009, vol.~16, no.~1, pp.~24--43 (in~Russian).
\item[2.]
Nevskii,~M.\,V. On a property of $n$-dimensional simplices,
{\it Math. Notes}, 2010, vol.~87, no.~4, pp.~543--555.
\item[3.]
Nevskii,~M.\,V.,
{\it Geometricheskie ocenki v polinomialnoi interpolyacii}
(Geometric Estimates in Polynomial
Interpolation), Yaroslavl': Yarosl. Gos. Univ., 2012 (in~Russian).
.
\item[4.]
Nevskii,~M.\,V., On the minimal positive homothetic image of a simplex containing a
convex body,
{\it Math. Notes}, 2013, vol.~93, no.~3--4, pp.~470--478.
\item[5.]
Nevskii,~M.\,V., and Ukhalov, A.\,Yu.,
On numerical charasteristics of a simplex and their estimates,
{\it Model. Anal. Inform. Sist.}, 2016, vol.~23, no.~5, pp.~603--619
(in~Russian).
English transl.: {\it Aut.
Control Comp. Sci.}, 2017, vol.~51, no.~7, pp.~757--769.
\item[6.]
Nevskii,~M.\,V., and Ukhalov, A.\,Yu.,
New estimates of numerical values related to a simplex,
{\it Model. Anal. Inform. Sist.}, 2017, vol.~24, no.~1, pp.~94--110
(in~Russian).
English transl.: {\it Aut.
Control Comp. Sci.}, 2017, vol.~51, no.~7, pp.~770--782.
\item[7.]
Nevskii,~M.\,V., and Ukhalov, A.\,Yu.,
On $n$-dimensional simplices satisfying inclusions
$S\subset [0,1]^n\subset nS$,
{\it Model. Anal. Inform. Sist.}, 2017, vol.~24, no.~5, pp.~578--595
(in~Russian).
English transl.: {\it Aut.
Control Comp. Sci.}, 2018, vol.~52, no.~7, pp.~667--679.
\item[8.]
Nevskii,~M.\,V., and Ukhalov, A.\,Yu.,
On minimal absorption index for an $n$-dimensional simplex,
{\it Model. Anal. Inform. Sist.}, 2018, vol.~25, no.~1, pp.~140--150
(in~Russian).
English transl.: {\it Aut.
Control Comp. Sci.}, 2018, vol.~52, no.~7, pp.~680--687.
\item[9.]
Hudelson,~M., Klee, V., and Larman,~D.,
Largest $j$-simplices in $d$-cubes: some relatives of the
Hadamard maximum determinant problem,
{\it Linear Algebra Appl.}, 1996, vol.~241--243, pp.~519--598
\item[10.]
Klamkin~M.\,S., and Tsifinis~G.\,A.,
Circumradius--inradius inequality for a simplex,
{\it Mathematics Magazine}, 1979, vol.~52, no.~1, pp.~20--22.
\item[11.]
Nevskii,~M.,
Properties of axial diameters of a simplex,
{\it Discr. Comput. Geom.}, 2011, vol.~46, no.~2, pp.~301--312.
\item[12.]
Nevskii,~M., and Ukhalov A.,
Perfect simplices in ${\mathbb R}^5$,
{\it
Beitr. Algebra Geom.},
2018, vol.~59, no.~3, pp.~501--521.
\item[13.]
Yang~S., and Wang~J.,
Improvements of $n$-dimensional Euler inequality,
{\it Journal of~Geometry}, 1994, vol.~51, pp.~190--195
\item[14.]
Vince~A., A simplex contained in a sphere,
{\it Journal of~Geometry}, 2008, vol.~89, no.~1--2,
pp.~169--178.
\end{itemize}
\end{document}
| 3,906 | 14,143 |
en
|
train
|
0.167.0
|
\begin{equation}gin{document}
\begin{equation}gin{abstract}
We study convergence of 3D lattice sums via expanding spheres. It is well-known that, in contrast to summation via expanding cubes, the expanding spheres method may lead to formally divergent series (this will be so e.g. for the classical NaCl-Madelung constant). In the present paper we prove that these series remain convergent in Cesaro sense. For the case of second order Cesaro summation, we present an elementary proof of convergence and the proof for first order Cesaro summation is more involved and is based on the Riemann localization for multi-dimensional Fourier series.
\end{abstract}
\subjclass[2010]{11L03, 42B08, 35B10, 35R11}
\keywords{lattice sums, Madelung constants, Cesaro summation, Fourier series, Riemann localization}
\varthetaanks{The first author has been partially supported by the LMS URB grant 1920-04. The second author is partially supported by the EPSRC grant EP/P024920/1}
\maketitle
\tableofcontents
\section{Introduction}\lambdabel{s0}
Lattice sums of the form
\begin{equation}gin{equation}\lambdabel{0.lattice}
\sum_{(n,k,m)\in\mathbb Z^3}\frac{e^{i(nx_1+kx_2+mx_3)}}{(a^2+n^2+k^2+m^2)^s}
\end{equation}
and various their extensions naturally appear in many branches of modern analysis including analytic number theory (e.g. for study the number of lattice points in spheres or balls), analysis of PDEs (e.g. for constructing Green functions for various differential operators in periodic domains, finding best constants in interpolation inequalities, etc.), harmonic analysis as well as in applications, e.g. for computing the electrostatic potential of a single ion in a crystal (the so-called Madelung constants), see \cite{Flap,MFS,BDZ,Bor13,mar,mar2000,Ram21,ZI} and references therein. For instance, the classical Madelung constant for the NaCl crystal is given by
\begin{equation}gin{equation}\lambdabel{0.M}
M=\sideset{}{'}\sum_{(i,j,k)\in \mathbb Z^3}\frac{(-1)^{i+j+k}}{(i^2+j^2+k^2)^{1/2}},
\end{equation}
where the index ${'}$ means that the sum does not contain the term which corresponds to $(i,j,k)=0$.
\par
The common feature of series \eqref{0.lattice} and \eqref{0.M} is that the decay rate of the terms is not strong enough to provide absolute convergence, so they are often only conditionally convergent and their convergence/divergence strongly depends on the method of summation. The typical methods of summation are summation by expanding cubes/rectangles or summation by expanding spheres, see sections \S\ref{s1} and \S\ref{s2} for definitions and \cite{Bor13} for more details. For instance, when summation by expanding spheres is used, the formula for the Madelung constant has an especially elegant form
\begin{equation}gin{equation}\lambdabel{2.Ms}
M=\sum_{n=1}^\infty (-1)^n\frac{r_3(n)}{\sqrt{n}},
\end{equation}
where $r_3(n)$ is the number of integer point in a sphere of radius $\sqrt{n}$. Exactly this formula is commonly used in physical literature although it has been known for more than 70 years that series \eqref{2.Ms} is {\it divergent}, see \cite{Emer}. Thus, one should either switch from expanding spheres to expanding cubes/rectangles for summation of \eqref{0.M} (which is suggested to do e.g. in \cite{Bor13} and where such a convergence problem does not appear) or to use more advanced methods for summation of \eqref{2.Ms}, for instance Abel or Cesaro summation. Surprisingly, the possibility to justify \eqref{2.Ms} in such a way is not properly studied (although there are detailed results concerning Cesaro summation for different methods, e.g. for the so called summation by diamonds, see \cite{Bor13}) and the main aim of the present notes is to cover this gap.
\par
Namely, we will study the following generalized Madelung constants:
\begin{equation}gin{equation}\lambdabel{2.Mg}
M_{a,s}=\sideset{}{'}\sum_{(i,j,k)\in \mathbb Z^3}\frac{(-1)^{i+j+k}}{(a^2+i^2+j^2+k^2)^s}=\sum_{n=1}^\infty(-1)^n\frac{r_3(n)}{(a^2+n)^s},
\end{equation}
where $a\in\R$ and $s>0$ and the sum in the RHS is understood in the sense of Cesaro (Cesaro-Riesz) summation of order $\kappa$, see Definition \ref{Def2.Cesaro} below. Our presentation of the main result consists of two parts.
\par
First, we present a very elementary proof of convergence for second order Cesaro summation which is based only on counting the number of lattice points in spherical layers by volume comparison arguments. This gives the following result
\begin{equation}gin{theorem}\lambdabel{Th0.c2} Let $a\in\R$ and $s>0$. Then
\begin{equation}gin{equation}
M_{a,s}=\lim_{N\to\infty}\sum_{n=1}^N(-1)^n\left(1-\frac nN\right)^2 \frac{r_3(n)}{(a^2+n)^s}.
\end{equation}
In particular, the limit in the RHS exists.
\end{theorem}
Second, we establish the convergence for the first order Cesaro summation.
\begin{equation}gin{theorem}\lambdabel{Th0.c1} Let $a\in\R$ and $s>0$. Then
\begin{equation}gin{equation}
M_{a,s}=\lim_{N\to\infty}\sum_{n=1}^N(-1)^n\left(1-\frac nN\right) \frac{r_3(n)}{(a^2+n)^s}.
\end{equation}
In particular, the limit in the RHS exists.
\end{theorem}
In contrast to Theorem \ref{Th0.c2}, the proof of this result is more involved and is based on an interesting connection between the convergence of lattice sums and Riemann localization for multiple Fourier series, see section \S\ref{s22} for more details. Note that Theorem \ref{Th0.c2} is a formal corollary of Theorem \ref{Th0.c1}, but we prefer to keep both of them not only since the proof of Theorem \ref{Th0.c2} is essentially simple, but also since it possesses extensions to other methods of summation, see the discussion in section \S\ref{s3}. Also note that the above convergence results have mainly theoretical interest since much more effective formulas
for Madelung constants are available for practical computations, see \cite{Bor13} and references therein.
\par
The paper is organized as follows. Some preliminary results concerning lattice sums and summation by rectangles are collected in \S\ref{s1}. The proofs of Theorems \ref{Th0.c2} and \ref{Th0.c1} are given in sections \S\ref{s21} and \S\ref{s22} respectively. Some discussion around the obtained results, their possible generalizations and numerical simulations are presented in section \S\ref{s3}.
| 1,955 | 14,842 |
en
|
train
|
0.167.1
|
\section{Preliminaries}\lambdabel{s1}
In this section, we recall standard results about lattice sums and prepare some technical tools which will be used in the sequel. We start with the simple lemma which is however crucial for what follows.
\begin{equation}gin{lemma}\lambdabel{Lem1.block} Let the function $f:\R^3\to\R$ be 3 times continuously differentiable in a cube $Q_{I,J,K}:=[I,I+1]\times[J,J+1]\times[K,K+1]$. Then
\begin{equation}gin{multline}\lambdabel{1.E}
\min_{x\in Q_{2I,2J,2K}}\{-\partial_{x_1}\partial_{x_2}\partial_{x_3}f(x)\}\le\\\le E_{I,J,K}(f):=\sum_{i=2I}^{2I+1}\sum_{j=2J}^{2J+1}
\sum_{k=2K}^{2K+1}(-1)^{i+j+k}f(i,j,k)\le\\\le \max_{x\in Q_{2I,2J,2K}}\{-\partial_{x_1}\partial_{x_2}\partial_{x_3}f(x)\}.
\end{multline}
\end{lemma}
\begin{equation}gin{proof} Indeed, it is not difficult to check using the Newton-Leibnitz formula that
$$
E_{I,J,K}(f)=-\int_0^1\int_0^1\int_0^1 \partial_{x_1}\partial_{x_2}\partial_{x_3}f(2I+s_1,2J+s_2,2K+s_3)\,ds_1\,ds_2\,ds_3
$$
and this formula gives the desired result.
\end{proof}
\noindent A typical example of the function $f$ is the following one
\begin{equation}gin{equation}\lambdabel{1.pol}
f_{a,s}(x)=(a^2+|x|^2)^s,\ \ |x|^2=x_1^2+x_2^2+x_3^2.
\end{equation}
In this case,
$$
\partial_{x_1}\partial_{x_2}\partial_{x_3}f= 8s(s-1)(s-2)x_1x_2x_3(a^2+|x|^2)^{s-3/2}
$$
and, therefore,
\begin{equation}gin{equation}\lambdabel{1.bet}
|E_{I,J,K}(f)|\le C(a^2+I^2+J^2+K^2)^{s-\frac32}.
\end{equation}
One more important property of the function \eqref{1.pol} is that the term $E_{I,J,K}$ is sign-definite in the octant $I,J,K\ge0$.
\par
At the next step, we state a straightforward extension of the integral comparison principle to the case of multi-dimensional series. We recall that, in one dimensional case, for a positive monotone decreasing function $f:[A,B]\to\R$, $A,B\in\mathbb Z$, $B>A$, we have
$$
f(B)+\int_A^{B}f(x)\,dx\le \sum_{n=A}^Bf(n)\le f(A)+\int_{A}^{B}f(x)\,dx
$$
which, in turn, is an immediate corollary of the estimate
$$
f(n+1)\le\int_n^{n+1}f(x)\,dx\le f(n).
$$
\begin{equation}gin{lemma}\lambdabel{Lem1.int} Let the continuous function $f:\R^3\setminus\{0\}\to\R_+$ be such that
\begin{equation}gin{equation}\lambdabel{1.good}
C_2\max_{x\in Q_{i,j,k}} f(x)\le \min_{x\in Q_{i,j,k}}f(x)\le C_1\max_{x\in Q_{i.j,k}} f(x),
\end{equation}
$(i,j,k)\in\mathbb Z^3$ and the constants $C_1$ and $C_2$ are positive and are independent of $Q_{i,j,k}\not\owns 0$.
Let also $\Omegaega\subset\R^3$ be a domain which does not contain $0$ and
\begin{equation}gin{equation}\lambdabel{1.lat}
\Omegaega_{lat}:=\{(i,j,k)\in\mathbb Z^3:\,\exists Q_{I,J,K}\subset\Omegaega,\ \ (i,j,k)\in Q_{I,J,K},\ 0\notin Q_{I,J,K}\}.
\end{equation}
Then,
\begin{equation}gin{equation}\lambdabel{1.comp}
\sum_{(i,j,k)\in\Omegaega_{lat}}f(i,j,k)\le C\int_\Omegaega f(x)\,dx,
\end{equation}
where the constant $C$ is independent of $\Omegaega$ and $f$. If assumption \eqref{1.good}
is satisfied for all $(I,J,K)$, the condition $0\notin\Omegaega$ and $0\notin Q_{I,J,K}$ can be removed.
\begin{equation}gin{comment}
Then, for every $N,M,K\in\mathbb N$, we have
\begin{equation}gin{multline}
\sum_{(i,j,k)\in\Pi_{N,M,K}' }f(i,j,k)\le \int_{x\in\Pi_{M,N,K}'}f(x)\,dx+f(1,1,1)+\\+\int_{(x_1,x_2)\in\Pi_{N,M}'}f(x_1,x_2,0)\,dx_1\,dx_2+
\int_{(x_2,x_3)\in\Pi_{M,K}'}f(0,x_2,x_3)\,dx_2\,dx_3+\\+
\int_{(x_1,x_3)\in\Pi_{M,K}'}f(x_1,0,x_3)\,dx_1\,dx_3
+f(1,1,0)+f(0,1,1)+f(1,0,1)+\\+f(1,0,0)+f(0,0,1)+f(0,1,0)+\\+\int_1^Nf(x_1,0,0)\,dx_1+
\int_1^Mf(0,x_2,0)\,dx_2+\int_1^Kf(0,0,x_3)\,dx_3
\end{multline}
\end{comment}
\end{lemma}
\begin{equation}gin{proof} Indeed, assumption \eqref{1.good} guarantees that
\begin{equation}gin{equation}\lambdabel{1.mult}
C_2\int_{Q_{I,J,K}}f(x)\,dx\le f(i,j,k)\le C_1\int_{Q_{I,J,K}}f(x)\,dx
\end{equation}
for all $Q_{I,J,K}$ which do not contain zero and all $(i,j,k)\in Q_{I,J,K}\cap\mathbb Z^3$. Since any point $(i,j,k)\in\mathbb Z$ can belong no more than $8$ different cubes $Q_{I,J,K}$, \eqref{1.mult} implies \eqref{1.comp} (with the constant $C=8C_1$) and finishes the proof of the lemma.
\end{proof}
| 1,929 | 14,842 |
en
|
train
|
0.167.2
|
We will mainly use this lemma for functions $f_{a,s}(x)$ defined by \eqref{1.pol}. It is not difficult to see that these functions satisfy assumption \eqref{1.good}. For instance, this follows from the obvious estimate
$$
|\nabla f_{a,s}(x)|\le \frac{C_{s}}{\sqrt{a^2+|x|^2}}f_{a,s}(x)
$$
and the mean value theorem. Moreover, if $a\ne0$, condition \eqref{1.good} holds for $Q_{i,j,k}\owns 0$ as well. As a corollary, we get the following estimate for summation "by spheres":
\begin{equation}gin{multline}\lambdabel{1.as}
\sideset{}{'}\sum_{(i,j,k)\in B_n\cap\mathbb Z^3} f_{a,s}(i,j,k)\le C_s\int_{x\in B_n\setminus B_1}(a^2+x^2)^{s}\,dx\le\\\le 4\pi C_s\int_1^{\sqrt n} R^2(a^2+R^2)^s\,dR\le 4\pi C_s\int_1^{\sqrt n} R(a^2+R^2)^{s-1/2}\,dR=\\=\frac {4\pi C_s}{2s+3}\left((a^2+n)^{s+3/2}-(a^2+1)^{s+3/2}\right),
\end{multline}
where $B_n:=\{x\in\R^3\,:\,|x|^2\le n\}$ and $\sum'$ means that $(i,j,k)=0$ is ex\-clu\-ded. Of course, in the case $s=-\frac32$, the RHS of \eqref{1.as} reads as $2\pi C_s\ln\frac{a^2+n^2}{a^2+1}$. In particular, if $s>\frac32$, passing to the limit $n\to\infty$ in \eqref{1.as}, we see that
\begin{equation}gin{equation}\lambdabel{1.simple}
\sideset{}{'}\sum_{(i,j,k)\in\mathbb Z^3}\frac1{(a^2+i^2+j^2+k^2)^s}=
\sideset{}{'}\sum_{(i,j,k)\in\mathbb Z^3}f_{a,-s}(i,j,k)\le \frac {C_s}{(a^2+1)^{s-\frac32}}.
\end{equation}
Thus, the series in the LHS is absolutely convergent if $s>\frac32$ and its sum tends to zero as $a\to\infty$. It is also well-known that condition $s>\frac32$ is sharp and the series is
divergent if $s\le \frac32$.
\par
We also mention that Lemmas \ref{Lem1.block} and \ref{Lem1.int} are stated for 3-dimensional case just for simplicity. Obviously, their analogues hold for any dimension. We will use this observation later.
\par
We now turn to the alternating version of lattice sums \eqref{1.simple}
\begin{equation}gin{equation}\lambdabel{1.main}
M_{a,s}:=\sideset{}{'}\sum_{(i,j,k)\in\mathbb Z^3}\frac{(-1)^{i+j+k}}{(a^2+i^2+j^2+k^2)^s}
\end{equation}
which is the main object of study in these notes. We recall that, due to \eqref{1.simple}, this series is absolutely convergent for $s>\frac32$, so the sum is independent of the method of summation. In contrast to this, in the case $0<s\le\frac32$, the convergence is not absolute and depends strongly to the method of summation, see \cite{Bor13} and references therein for more details. Note also that $M_{a,s}$ is analytic in $s$ and, similarly to the classical Riemann zeta function, can be extended to a holomorphic function on $\mathbb C$ with a pole at $s=0$, but this is beyond the scope of our paper, see e.g. \cite{Bor13} for more details. Thus, we are assuming from now on that $0<s\le\frac32$. We start with the most studied case of summation by expanding rectangles/parallelograms.
\begin{equation}gin{definition} Let $\Pi_{I,J,K}:=[-I,I]\times[-J,J]\times[-K,K]$, $I,J,K\in\mathbb N$, and
$$
S_{\Pi_{I,J,K}}(a,s):=\sideset{}{'}\sum_{(i,j,k)\in\Pi_{I,J,K}\cap\mathbb Z^3}\frac{(-1)^{i+j+k}}{(a^2+i^2+j^2+k^2)^s}.
$$
We say that \eqref{1.main} is summable by expanding rectangles if the following triple limit exists and finite
$$
M_{a,s}=\lim_{(I,J,K)\to\infty} S_{\Pi_{I,J,K}}(a,s).
$$
\end{definition}
To study the sum \eqref{1.main}, we combine the terms belonging to cubes $Q_{2i,2j,2k}$ and introduce the partial sums
\begin{equation}gin{equation}
E_{\Pi_{I,J,K}}(a,s):=\sideset{}{'}\sum_{(2i,2j,2k)\in\Pi_{I,J,K}\cap2\mathbb Z^3}E_{i,j,k}(a.s),
\end{equation}
where $E_{i,j,k}(a,s):=E_{i,j,k}(f_{a,-s})$ is defined in \eqref{1.E}.
\begin{equation}gin{theorem} Let $0<s\le \frac32$. Then,
\begin{equation}gin{equation}\lambdabel{1.equiv}
\bigg|S_{\Pi_{I,J,K}}(a,s)-E_{\Pi_{I,J,K}}(a,s)\bigg|\le \frac {C_s}{\left(a^2+\min\{I^2,J^2,K^2\}\right)^{s}},
\end{equation}
where the constant $C_s$ is independent of $a$ and $I,J,K$.
\end{theorem}
\begin{equation}gin{proof} We first mention that, according to Lemma \ref{Lem1.block} and estimate \eqref{1.simple}, we see that
\begin{equation}gin{equation}\lambdabel{1.e-conv}
|E_{\Pi_{I,J,K}}(a,s)|\le \frac{C_s}{(a^2+1)^s}
\end{equation}
uniformly with respect to $(I,J,K)$.
\par
The difference between $S_{\Pi_{I,J,K}}$ and $E_{\Pi_{I,J,K}}$ consists of the alternating sum of $f_{a,-s}(i,j,k)$ where $(i,j,k)$ belong to the boundary of $\Pi_{I,J,K}$. Let us write an explicit formula for the case when all $I,J,K$ are even (other cases are considered analogously):
\begin{equation}gin{multline}\lambdabel{1.huge}
S_{\Pi_{2I,2J,2K}}(a,s)-E_{\Pi_{2I,2J,2K}}(a,s)=\!\!\!\sideset{}{'}\sum_{\substack{-2J\le j\le-2J\\-2K\le k\le2K}}(-1)^{j+k}f_{a,-s}(2I,j,k)+\\+\sideset{}{'}\sum_{\substack{-2I\le i\le-2I\\-2K\le k\le2K}}(-1)^{i+k}f_{a,-s}(i,2J,k)+\sideset{}{'}\sum_{\substack{-2I\le i\le-2I\\-2J\le j\le2J}}(-1)^{i+j}f_{a,-s}(i,j,2K)-\\-
\sideset{}{'}\sum_{-2I\le i\le-2I}(-1)^{i}f_{a,-s}(i,2J,2K)-\sideset{}{'}\sum_{-2J\le j\le-2J}(-1)^{j}f_{a,-s}(2I,j,2K)-\\-
\sideset{}{'}\sum_{-2K\le k\le-2K}(-1)^{k}f_{a,-s}(2I,2J,k)+f_{a.-s}(2I,2J,2K).
\end{multline}
In the RHS of this formula we see the analogues of lattice sum \eqref{1.main} in lower dimensions one or two and, thus, it allows to reduce the dimension. Indeed, assume that the analogues of estimate \eqref{1.equiv} are already established in one and two dimensions. Then, using the lower dimensional analogue of \eqref{1.e-conv} together with the fact that
$$
f_{a,-s}(2I,j,k)=f_{\sqrt{a^2+4I^2},-s}(i,j),
$$
where we have 2D analogue of the function $f_{a,-s}$ in the RHS, we arrive at
\begin{equation}gin{multline}\lambdabel{1.huge1}
\bigg|S_{\Pi_{2I,2J,2K}}(a,s)-E_{\Pi_{2I,2J,2K}}(a,s)\bigg|\le\\\le
\frac{C_s}{(a^2+4I^2+1)^s}+\frac{C_s}{(a^2+\min\{J^2,K^2\})^s}+
\frac{C_s}{(a^2+4J^2+1)^s}+\\+\frac{C_s}{(a^2+\min\{I^2,K^2\})^s}+
\frac{C_s}{(a^2+4K^2+1)^s}+\frac{C_s}{(a^2+\min\{I^2,J^2\})^s}+\\+
+\frac{C_s}{(a^2+4I^2+4K^2\})^s}+\frac{C_s}{(a^2+4I^2+4J^2\})^s}+
\frac{C_s}{(a^2+4J^2+4K^2\})^s}+\\+
\frac{C_s}{(a^2+4I^2+4J^2+4K^2\})^s}\le \frac{C_s'}{(a^2+\min\{I^2,J^2,K^2\})^s}.
\end{multline}
Since in 1D case the desired estimate is obvious, we complete the proof of the theorem by induction.
\end{proof}
\begin{equation}gin{corollary}\lambdabel{Cor1.main} Let $s>0$. Then series \eqref{1.main} is convergent by expanding rectangles and
\begin{equation}gin{equation}\lambdabel{1.rep}
M_{a,s}=\sideset{}{'}\sum_{(i,j,k)\in\mathbb Z^3}E_{i,j,k}(a,s).
\end{equation}
In particular, the series in RHS of \eqref{1.rep} is absolutely convergent, so the method of summation for it is not important.
\end{corollary}
Indeed, this fact is an immediate corollary of estimates \eqref{1.equiv}, \eqref{1.bet} and \eqref{1.simple}.
| 2,912 | 14,842 |
en
|
train
|
0.167.3
|
\section{Summation by expanding spheres}\lambdabel{s2}
We now turn to summation by expanding spheres. In other words, we want to write the formula \eqref{1.main} in the form
\begin{equation}gin{equation}\lambdabel{2.sphere}
M_{a,s}=\lim_{N\to\infty}\sideset{}{'}\sum_{i^2+j^2+k^2\le N}\frac{(-1)^{i+j+k}}{(a^2+i^2+j^2+k^2)^s}.
\end{equation}
Moreover, since $(i+j+k)^2=i^2+j^2+k^2+2(ij+jk+ik)$, we have $(-1)^{i+j+k}=(-1)^{i^2+j^2+k^2}$, so formula \eqref{2.sphere} can be rewritten in the following elegant form
\begin{equation}gin{equation}\lambdabel{2.sp}
M_{a,s}=\sum_{n=1}^\infty (-1)^n\frac{r_3(n)}{(a^2+n)^s},
\end{equation}
where $r_3(n)$ is the number of integer points on a sphere of radius $\sqrt{n}$ centered at zero, see e.g. \cite{Ram21} and reference therein for more details about this function. However, the convergence of series \eqref{2.sp} is more delicate. In particular, it is well-known that this series is divergent for $s\le\frac12$, see \cite{Emer,Bor13}. For the convenience of the reader, we give the proof of this fact below.
\begin{equation}gin{lemma}\lambdabel{Lem2.div} Let $c>0$ be small enough. Then, there are infinitely many values of $n\in\mathbb N$ such that
\begin{equation}gin{equation}\lambdabel{2.bad}
r_3(n)\ge c\sqrt{n}
\end{equation}
and, particularly, series \eqref{2.sp} is divergent for all $s\le\frac12$.
\end{lemma}
\begin{equation}gin{proof} Indeed, by comparison of volumes, we see that the number $M_N$ of integer points in a spherical layer $N\le i^2+j^2+k^2\le 2N$ can be estimated from above as
$$
M_N=\sum_{n=N}^{2N}r_3(n)\ge \frac43\pi\left((\sqrt{2N}-\sqrt3)^{3}-(\sqrt{N}+\sqrt3)^{3}\right)\ge cN^{3/2}
$$
for sufficiently small $c>0$. Thus, for every sufficiently big $N\in\mathbb N$, there exists $n\in[N,2N]$ such that $r_3(n)\ge c\sqrt{n}$ and estimate \eqref{2.bad} is verified. The divergence of \eqref{2.sp} for $s\le \frac12$ is an immediate corollary of this estimate since the $n$th term $(-1)^n\frac{r_3(n)}{(a^2+n)^s}$ does not tend to zero under this condition and the lemma is proved.
\end{proof}
\begin{equation}gin{remark} The condition that $c>0$ is small can be removed using more sophisticated methods. Moreover, it is known that the inequality
$$
r_3(n)\ge c\sqrt{n}\ln\ln n
$$
holds for infinitely many values of $n\in\mathbb N$ (for properly chosen $c>0$). On the other hand,
for every $\varepsilon>0$, there exists $C_\varepsilon>0$ such that
$$
r_3(n)\le C_\varepsilon n^{\frac12+\varepsilon},
$$
see \cite{Ram21} and references therein. Thus, we cannot establish divergence of \eqref{2.sp} via the $n$th term test if $s>\frac12$. Since this series is alternating, one may expect convergence for $s>\frac12$. However, the behavior of $r_3(n)$ as $n\to\infty$ is very irregular and, to the best of our knowledge, this convergence is still an open problem for $\frac12<s\le\frac{25}{34}$, see \cite{Bor13} for the convergence in the case $s>\frac{25}{34}$ and related results.
\end{remark}
Thus, one should use weaker concepts of convergence in order to justify equality \eqref{2.sp}. The main aim of these notes is to establish the convergence in the sense of Cesaro.
\begin{equation}gin{definition}\lambdabel{Def2.Cesaro} Let $\kappa>0$. We say the series \eqref{2.sp} is $\kappa$-Cesaro (Cesaro-Riesz) summable if the sequence
$$
C^\kappa_N(a,s):=\sum_{n=1}^N\left(1-\frac nN\right)^\kappa(-1)^n\frac{r_3(n)}{(a^2+n)^s}
$$
is convergent. Then we write
$$
(C,\kappa)-\sum_{N=1}^\infty (-1)^n\frac{r_3(n)}{(a^2+n)^s}:=\lim_{N\to\infty}C_N^\kappa(a,s).
$$
Obviously, $\kappa=0$ corresponds to the usual summation and if a series is $\kappa$-Cesaro summable, then it is also $\kappa_1$-Cesaro summable for any $\kappa_1>\kappa$, see e.g.~\cite{Ha}.
\end{definition}
\subsection{Second order Cesaro summation}\lambdabel{s21} The aim of this subsection is to present a very elementary proof of the fact that the series \eqref{2.sp} is second order Cesaro summable. Namely, the following theorem holds.
\begin{equation}gin{theorem}\lambdabel{Th2.2c} Let $s>0$. Then the series \eqref{2.sp} is second order Cesaro summable and
\begin{equation}gin{equation}\lambdabel{2.2good}
M_{a,s}=(C,2)-\sum_{N=1}^\infty (-1)^n\frac{r_3(n)}{(a^2+n)^s},
\end{equation}
where $M_{a,s}$ is the same as in \eqref{1.main} and \eqref{1.rep}.
\end{theorem}
\begin{equation}gin{proof}
For every $N\in\mathbb N$, let us introduce the sets
\begin{equation}gin{equation*}
D_N:=\bigcup\limits_{\substack{(I,J,K)\in2\mathbb Z^3\\Q_{I,J,K}\subset B_N}}Q_{I,J,K},\ \ \ D_N':=B_N\setminus D_N
\end{equation*}
and split the sum $C^2_N(a,s)$ as follows
\begin{equation}gin{multline}
C^2_N(a,s)=\sideset{}{'}\sum_{(i,j,k)\in B_N\cap\mathbb Z^3}\left(1-\frac{i^2+j^2+k^2}{N}\right)^2\frac{(-1)^{i+j+k}}{(a^2+i^2+j^2+k^2)^s}=\\=\sideset{}{'}\sum_{(i,j,k)\in D_N\cap\mathbb Z^3}\left(1-\frac{i^2+j^2+k^2}{N}\right)^2\frac{(-1)^{i+j+k}}{(a^2+i^2+j^2+k^2)^s}+\\+
\!\!\!\sideset{}{'}\sum_{(i,j,k)\in D_N'\cap\mathbb Z^3}\!\!\!\left(1\!-\!\frac{i^2+j^2+k^2}{N}\right)^2\!\!\!\frac{(-1)^{i+j+k}}{(a^2+i^2+j^2+k^2)^s}\!:=
A_N(a,s)+R_N(a,s).
\end{multline}
Let us start with estimating the sum $R_N(a,s)$. To this end we use the elementary fact that
$$
\sqrt{N}-\sqrt3\le \sqrt{i^2+j^2+k^2}\le \sqrt{N}
$$
for all $(i,j,k)\in D_N'$ ($\sqrt{3}$ is the length of the diagonal of the cube $Q_{I,J,K}$). Therefore,
\begin{equation}gin{equation}\lambdabel{2.R}
|R_N(a,s)|\le \left(1-\frac{(\sqrt{N}-\sqrt3)^2}{N}\right)^2\frac{\#\left(D'_M\cap\mathbb Z^3\right)}{\left(a^2+(\sqrt {N}-\sqrt3)^2\right)^s}.
\end{equation}
Using again the fact that all integer points of $D'_N$ belongs to the spherical layer
$\sqrt{N}-\sqrt{3}\le |x|^2\le\sqrt{N}$ together with the volume comparison arguments, we conclude
that
$$
\#\left(D'_M\cap\mathbb Z^3\right)\le \frac43\pi\left((\sqrt{N}+\sqrt3)^2-(\sqrt{N}-\sqrt3)^2\right)\le c_0 N
$$
for some positive $c_0$. Therefore,
\begin{equation}gin{equation}
|R_N(a,s)|\le \frac{C}{N}\frac{c_0N}{\left(a^2+(\sqrt {N}-\sqrt3)^2\right)^s}=\frac{C}{\left(a^2+(\sqrt {N}-\sqrt3)^2\right)^s}\to0
\end{equation}
as $N\to\infty$. Thus, the term $R_N$ is not essential and we only need to estimate the sum $A_N$. To this end, we rewrite it as follows
\begin{equation}gin{multline}\lambdabel{2.huge2}
A_N(a,s)=\left(1-\frac{a^2}N\right)^2\sideset{}{'}\sum_{(i,j,k)\in D_N\cap\mathbb Z^3}\frac{(-1)^{i+j+k}}{(a^2+i^2+j^2+k^2)^s}+\\+\frac2N\left(1-\frac {a^2}N\right)\sideset{}{'}\sum_{(i,j,k)\in D_N\cap\mathbb Z^3}\frac{(-1)^{i+j+k}}{(a^2+i^2+j^2+k^2)^{s-1}}+\\+
\frac1{N^2}\sideset{}{'}\sum_{(i,j,k)\in D_N\cap\mathbb Z^3}\frac{(-1)^{i+j+k}}{(a^2+i^2+j^2+k^2)^{s-2}}=\\=
\left(1-\frac{a^2}N\right)^2\sideset{}{'}\sum_{(i,j,k)\in \frac12D_{N}\cap\mathbb Z^3}E_{i,j,k}(a,s)+\\+\frac2N\left(1-\frac {a^2}N\right)\sideset{}{'}\sum_{(i,j,k)\in \frac12D_{N}\cap\mathbb Z^3}E_{i,j,k}(a,s-1)+\\+
\frac1{N^2}\sideset{}{'}\sum_{(i,j,k)\in \frac12D_{N}\cap\mathbb Z^3}E_{i,j,k}(a,s-2).
\end{multline}
From Corollary \ref{Cor1.main}, we know that the first sum in the RHS of \eqref{2.huge2} converges to $M_{a,s}$ as $N\to\infty$. Using estimates \eqref{1.bet} and \eqref{1.as}, we also conclude that
\begin{equation}gin{equation}
\bigg|\sideset{}{'}\sum_{(i,j,k)\in \frac12D_N\cap\mathbb Z^3}E_{i,j,k}(a,s-1)\bigg|\le CN^{1-s}
\end{equation}
and
\begin{equation}gin{equation}
\bigg|\sideset{}{'}\sum_{(i,j,k)\in \frac12D_N\cap\mathbb Z^3}E_{i,j,k}(a,s-2)\bigg|\le CN^{2-s}.
\end{equation}
Thus, two other terms in the RHS of \eqref{2.huge2} tend to zero as $N\to\infty$ and the theorem is proved.
\end{proof}
| 3,221 | 14,842 |
en
|
train
|
0.167.4
|
\subsection{First order Cesaro summation}\lambdabel{s22} We may try to treat this case analogously to the proof of Theorem \ref{Th2.2c}. However, in this case, we will have the multiplier $(1-\frac{(\sqrt{N}-\sqrt3)^2}{N})$ without the extra square and this leads to the extra technical assumption $s>\frac12$. In particular, this method does not allow us to establish the convergence for the case of classical NaCl-Madelung constant ($a=0$, $s=\frac12$). In this subsection, we present an alternative method based on the Riemann localization principle for multiple Fourier series which allows us to remove the technical condition $s>\frac12$. The key idea of our method is to introduce the function
\begin{equation}gin{equation}\lambdabel{2.F}
M_{a,s}(x):=\sideset{}{'}\sum_{(n,k,l)\in\mathbb Z^3}\frac{e^{i(nx_1+kx_2+lx_3)}}{(a^2+n^2+k^2+l^2)^s}.
\end{equation}
The series is clearly convergent, say, in $\mathcal D'(\mathbb T^3)$ and defines (up to a constant) a fundamental solution for the fractional Laplacian $(a^2-\Delta)^s$ on a torus $\mathbb T^3$ defined on functions with zero mean. Then, at least formally,
$$
M_{a,s}=M_{a,s}(\pi,\pi,\pi)
$$
and justification of this is related to the convergence problem for multi-dimensional Fourier series.
\par
Let $G_{a,s}(x)$ be the fundamental solution for $(a^2-\Delta)^s$ in the whole space $\R^3$, i.e.
$$
G_{a,s}(x)=-\frac{1}{2^{\frac12+s}\pi^{\frac32}\Gamma(s)}\frac1{|x|^{3-2s}}\Psi(a|x|),\ \ \Psi(z):=z^{\frac32-s}K_{\frac32-s}(z),
$$
where $K_\nu(z)$ is a modified Bessel function of the second kind and $\Gamma(s)$ is the Euler gamma function, see e.g. \cite{SL,Watson}. In particular, passing to the limit $a\to0$ and using that $\Psi(0)=2^{\frac12-s}\Gamma(\frac32-s)$, we get the fundamental solution for the case $a=0$:
$$
G_{0,s}(x)=-\frac{\Gamma(\frac32-s)}{2^{2s}\pi^{\frac32}\Gamma(s)}\,\frac1{|x|^{3-2s}}.
$$
Then, as known, the periodization of this function will be the fundamental solution on a torus:
\begin{equation}gin{equation}\lambdabel{2.Poisson}
M_{a,s}(x)=C_0+\frac1{(2\pi)^3}\sum_{(n,k,l)\in\mathbb Z^3}G_{a,s}\left(x-2\pi(n,k,l)\right),
\end{equation}
where the constant $C_0$ is chosen in such a way that $M_{a,s}(x)$ has a zero mean on the torus, see \cite{Flap,Trans} and references therein. Recall that, for $a>0$, the function $G_{a,s}(x)$ decays exponentially as $|x|\to\infty$, so the convergence of \eqref{2.Poisson} is immediate (and identity \eqref{2.Poisson} is nothing more than the Poisson Summation Formula applied to \eqref{2.F}). However, when $a=0$, the convergence of \eqref{2.Poisson} is more delicate since $G_{0,s}(x)\sim |x|^{2s-3}$ and the decay rate is not strong enough to get the absolute convergence. Thus, some regularization should be done and the method of summation also becomes important, see \cite{Bor13,CR,mar2000} and reference therein. Recall also that we need to consider the case $s\le\frac12$ only (since for $s>\frac12$, we have convergence of the first order Cesaro sums by elementary methods).
\begin{equation}gin{lemma}\lambdabel{Lem2.Green} Let $0<s<1$. Then
\begin{equation}gin{multline}\lambdabel{2.Poisson0}
M_{0,s}(x)=C_0'+\frac1{(2\pi)^3}
G_{0,s}(x)+\\+\frac1{(2\pi)^3}\sideset{}{'}\sum_{(n,k,l)\in\mathbb Z^3}\bigg(G_{0,s}\left(x-2\pi(n,k,l)\right)-G_{0,s}(2\pi(n,k,l))\bigg),
\end{multline}
where the convergence is understood in the sense of convergence by expanding rectangles and $C'_0$ is chosen in such a way that the mean value of the expression in the RHS is zero.
\end{lemma}
\begin{equation}gin{proof}[Sketch of the proof] Although this result seems well-known, we sketch below the proof of convergence of the RHS (the equality with the LHS can be established after that in a standard way, e.g. passing to the limit $a\to0$ in \eqref{2.Poisson}).
\par
To estimate the terms in the RHS, we use the following version of a mean value theorem for second differences:
\begin{equation}gin{multline}
f(p+x)+f(p-x)-2f(p)=[f(p+x)-f(p)]-[f(p)-f(p-x)]\\=x\int_0^1(f'(p+\kappa x)-f'(p-\kappa x))\,d\kappa=\\=
2x^2\int_0^1\int_0^1\kappa_1\kappa f''(p+\kappa(1-2\kappa_1)x)\,d\kappa\,d\kappa_1
\end{multline}
applying this formula to the function $G_{0,s}(x)$, we get
\begin{equation}gin{multline*}
\bigg|\sum_{\varepsilon_i=\pm1,\, i=1,2,3 }\bigg(G_{0,s}(2\pi n+\varepsilon_1x_1,2\pi k+\varepsilon_2 x_2,2\pi l+\varepsilon_3x_3)-G_{0,s}(2\pi(n,k,l))\bigg)\bigg|\\\le
C\sum_{i=1}^3\|\partial^2_{x_i}G_{0,s}\|_{C(2\pi(n,k,l)+\mathbb T^3)}\le \frac{C_1}{(n^2+k^2+l^2)^{\frac32-2(s-1)}}.
\end{multline*}
Thus, we see that, if we combine together in the RHS of \eqref{2.Poisson0} the terms corresponding to 8 nodes $(\pm n,\pm k,\pm l)$ (for every fixed $(n,k,l)$), the obtained series will become absolutely convergent (here we use the assumption $s<1$).
\par
It remains to note that the parallelepipeds $\Pi_{N,M,K}$ enjoy the property: $(n,m,k)\in\Pi_{N,M,K}$ implies that all 8 points $(\pm n,\pm m,\pm k)\in \Pi_{N,M,K}$. This implies the convergence by expanding rectangles and finishes the proof of the lemma.
\end{proof}
\begin{equation}gin{corollary}\lambdabel{Cor2.Grsm} Let $0<s<\frac32$ and $a>0$ or $a=0$ and $0<s<1$. Then, the function $M_{a,s}(x)$ is $C^\infty(\mathbb T^3\setminus\{0\})$ and $G_{a,s}(x)\sim \frac C{|x|^{3-2s}}$ near zero. In particular. $M_{a,s}\in L^{1+\varepsilon}(\mathbb T^3)$ for some positive $\varepsilon=\varepsilon(s)$.
\end{corollary}
\begin{equation}gin{proof} Indeed, the infinite differentiability follows from \eqref{2.Poisson} and \eqref{2.Poisson0} since differentiation of $G_{a,s}(x)$ in $x$ can only improve the rate of convergence. In addition, $M_{a,s}(x)-\frac1{(2\pi)^3}G_{a,s}(x)$ is smooth on the whole $\mathbb T^3$, so $M_{a,s}$ belongs to the same Lebesgue space $L^p$ as the function $|x|^{2s-3}$.
\end{proof}
\begin{equation}gin{remark} The technical assumption $s<1$ can be removed using the fact that $(-\Delta)^{s_1}(-\Delta)^{s_2}=(-\Delta)^{s_1+s_2}$ and, therefore
$$
G_{a,s_1+s_2}=G_{a,s_1}*G_{a,s_2}
$$
using the elementary properties of convolutions. Note that the result of Corollary \ref{Cor2.Grsm} can be obtained in a straightforward way using the standard PDEs technique, but we prefer to use the explicit formulas \eqref{2.Poisson} and \eqref{2.Poisson0} which look a bit more transparent. In addition, using the Poisson Summation Formula in a more sophisticated way (e.g. in the spirit of \cite{mar}, see also references therein), we can obtain much better (exponentially convergent) series for $M_{0,s}(x)$.
\end{remark}
We are now ready to state and prove the main result of this section.
\begin{equation}gin{theorem}\lambdabel{Th2.1c} Let $s>0$. Then
\begin{equation}gin{equation}\lambdabel{2.1cesaro}
M_{a,s}=M_{a,s}(\pi,\pi,\pi)=\lim_{N\to\infty}\sum_{n=1}^n\left(1-\frac nN\right)\frac{(-1)^nr_3(n)}{(a^2+n)^s}
\end{equation}
and, therefore, \eqref{2.sphere} is first order Cesaro summable by expanding spheres.
\end{theorem}
\begin{equation}gin{proof} As already mentioned above, it is sufficient to consider the case $0<s<1$ only. We also recall that \eqref{2.F} is nothing more than formal Fourier expansions for the function $M_{a,s}(x)$, therefore, to verify the second equality in \eqref{2.1cesaro}, we need to check the convergence of Fourier expansions of $M_{a,s}(x)$ at $x=(\pi,\pi,\pi)$ by first Cesaro expanding spheres. To do this, we use the analogue of Riemann localization property for multi-dimensional Fourier series. Namely, as proved in \cite{stein}, this localization is satisfied for first order Cesaro summation by expanding spheres in the class of functions $f$ such that
$$
\int_{\mathbb T^3}|f(x)|\ln_+|f(x)|\,dx<\infty
$$
(this is exactly the critical case $\kappa=\frac{d-1}2=1$ for $d=3$).
Thus, since this condition is satisfied for $M_{a,s}(x)$ due to Corollary \ref{Cor2.Grsm}, the Fourier series for $M_{a,s}(x)$ and $M_{a,s}(x)-\frac1{(2\pi)^3}G_{a,s}(x)$ are convergent or divergent simultaneously. Since the second function is $C^\infty$ on the whole torus, we have the desired convergence, see also \cite{MFS} and references therein. Thus, the second equality in \eqref{2.1cesaro} is established. To verify the first equality, it is enough to mention that the series is second order Cesaro summable to $M_{a,s}$ due to Theorem \ref{Th2.2c}. This finishes the proof of the theorem.
\end{proof}
| 2,997 | 14,842 |
en
|
train
|
0.167.5
|
\section{Concluding remarks}\lambdabel{s3}
Note that formally Theorem \ref{Th2.1c} covers Theorem \ref{Th2.2c}. Nevertheless, we would like to present both methods. The one given in subsection \ref{s21} is not only very elementary and transparent, but also can be easily extended to summation by general expanding domains $N\Omegaega$ where $\Omegaega$ is a sufficiently regular bounded domain in $\R^3$ containing zero. Also the rate of convergence of second Cesaro sums can be easily controlled. Some numeric simulations for the case of NaCl-Madelung constant ($a=0$, $s=\frac12$) are presented in the figure below
\begin{equation}gin{figure}[h!]
\centering
\includegraphics[width=0.8\linewidth]{Cesaro2ndOrder.jpg}
\caption{A figure plotting $N$th partial sums of \eqref{2.2good} with $a=0$ and $s=\frac12$ up to N = 5000.}
\lambdabel{fig:coffee}
\end{figure}
\noindent and we clearly see the convergence to the Madelung constant
$$
M_{0,1/2}=-1.74756...
$$
The second method (used in the proof of Theorem \ref{Th2.1c}) is more delicate and strongly based on the Riemann localization for multiple Fourier series and classical results of \cite{stein}. This method is more restricted to expanding spheres and the rate of convergence is not clear. Some numeric simulation for the NaCl-Madelung constant is presented in the figure below
\begin{equation}gin{figure}[h!]
\centering
\includegraphics[width=\linewidth]{Cesaro1stOrder.jpg}
\caption{A figure plotting $N$th partial sums of \eqref{2.1cesaro} with $a=0$ and $s=\frac12$ up to N = 5000.}
\lambdabel{fig:coffee1}
\end{figure}
\noindent and we see that the rate of convergence is essentially worse than for the case of second order Cesaro summation. As an advantage of this method, we mention the ability to extend it for more general class of exponential sums of the form \eqref{2.F}.
\par
Both methods are easily extendable to other dimensions $d\ne3$. Indeed, it is not difficult to see that the elementary method works for Cesaro summation of order $\kappa\ge d-2$ and the second one requires weaker assumption $\kappa\ge\frac{d-1}2$. Using the fact that the function $M_{a,s}(x)$ is more regular (belongs to some Sobolev space $W^{\varepsilon,p}(\mathbb T^3)$), together with the fact that Riemann localization holds for slightly subcritical values of $\kappa$ if this extra regularity is known (see e.g. \cite{MFS}), one can prove convergence for some $\kappa=\kappa(s)<\frac{d-1}2$ although the sharp values for $\kappa(s)$ seem to be unknown.
\begin{equation}gin{thebibliography}{9}
\bibitem{Flap}
N. Abatangelo and E. Valdinoc, {\it Getting Acquainted with the Fractional Laplacian}, in: Contemporary Research in Elliptic PDEs and Related Topics, Springer, (2019), 1--105.
\bibitem{MFS}
Sh. Alimov, R. Ashurov and A. Pulatov, {\it Multiple Fourier Series and Fourier Integrals}, in:
Commutative Harmonic Analysis IV, Springer, (1992), 1--95.
\bibitem{BDZ}
M. Bartuccelli,
J. Deane and S. Zelik, {\it Asymptotic expansions and extremals for the critical Sobolev and Gagliardo–Nirenberg inequalities on a torus}, Proc R. Soc. Edinburgh, Vol. 143, No. 3, (2013), 445--482.
\bibitem{Bor13}
J. Borwein, M. Glasser, R. McPhedran, J. Wan, and I. Zucker, {\it Lattice Sums Then and Now}, (Encyclopedia of Mathematics and its Applications), Cambridge: Cambridge University Press, 2013.
\bibitem{CR}
A. Chaba and R. Pathria, {\it
Evaluation of lattice sums using Poisson's summation formula. II},
J. Phys. A: Math. Gen.. Vol. 9. No. 9, (1976) 1411--1423.
\bibitem{Emer}
O. Emersleben, {\it \"Uber die Konvergenz der Reihen Epsteinscher Zetafunktionen}, Math. Nachr.,
Vol. 4, No. 1-6, (1950), 468--480.
\bibitem{SL}
D. Gurarie, {\it Symmetries and Laplacians},
in: Introduction to Harmonic Analysis, Group Representations and Applications, Vol. 174, North-Holland, 1992.
\bibitem{Ha}
G.H. Hardy, {\it Divergent series}, Clarendon Press, 1949.
\bibitem{mar}
S. Marshall, {\it A rapidly convergent modified Green' function for Laplace' equation in a rectangular region,} Proc. R. Soc. Lond. A. vol. 455 (1999), 1739--1766.
\bibitem{mar2000}
S. Marshall, {\it A periodic Green function for calculation of coloumbic lattice potentials}, Journal of Physics: Condensed Matter, 12(21), (2000),4575--4601.
\bibitem{Ram21}
M. Ortiz Ramirez, {\it Lattice points in d-dimensional spherical segments}, Monatsh Math, vol. 194, (2021), 167--179.
\bibitem{Trans}
L. Roncal and P. Stinga, {\it Transference of Fractional Laplacian Regularity}, in: Special Functions, Partial Differential Equations, and Harmonic Analysis, Springer (2014), 203--212.
\bibitem{stein}
E. Stein, {\it Localization and Summability of Multiple
Fourier Series},
Acta Math.
Vol. 100, No. 1-2, (1958), 93--146.
\bibitem{Watson}
G. Watson, {\it A Treatise on the Theory of Bessel Functions,} 2nd ed. Cambridge, England: Cambridge University Press, 1966.
\bibitem{ZI}
S. Zelik and A. Ilyin, {\it Green's function asymptotics and sharp interpolation inequalities}, Uspekhi Mat. Nauk, 69:2(416) (2014), 23–76;
\end{thebibliography}
\end{document}
| 1,828 | 14,842 |
en
|
train
|
0.168.0
|
\begin{document}
\thispagestyle{empty}
\begin{center}
\section*{Structures and Numerical Ranges of Power Partial Isometries}
\vspace*{3mm}
\begin {tabular}{lcl}
\hspace*{1cm}{\bf Hwa-Long Gau}$^{*1}$\hspace*{1cm}&and & \hspace*{1cm}{\bf Pei Yuan Wu}$^2$
\vspace*{3mm}\\
Department of Mathematics & & Department of Applied Mathematics\\
National Central University&& National Chiao Tung University\\
Chung-Li 32001, Taiwan&& Hsinchu 30010, Taiwan\\
Republic of China&&Republic of China
\end{tabular}
\end{center}
\centerline{\bf Abstract}
We derive a matrix model, under unitary similarity, of an $n$-by-$n$ matrix $A$ such that $A, A^2, \ldots, A^k$ ($k\ge 1$) are all partial isometries, which generalizes the known fact that if $A$ is a partial isometry, then it is unitarily similar to a matrix of the form ${\scriptsize\left[\begin{array}{cc} 0 & B\\ 0 & C\end{array}\right]}$ with $B^*B+C^*C=I$. Using this model, we show that if $A$ has ascent $k$ and $A, A^2, \ldots, A^{k-1}$ are partial isometries, then the numerical range $W(A)$ of $A$ is a circular disc centered at the origin if and only if $A$ is unitarily similar to a direct sum of Jordan blocks whose largest size is $k$. As an application, this yields that, for any $S_n$-matrix $A$, $W(A)$ (resp., $W(A\otimes A)$) is a circular disc centered at the origin if and only if $A$ is unitarily similar to the Jordan block $J_n$. Finally, examples are given to show that the conditions that $W(A)$ and $W(A\otimes A)$ are circular discs at 0 are independent of each other for a general matrix $A$.
\noindent
\emph{AMS classification}: 15A99, 15A60\\
\emph{Keywords}: Power partial isometry, numerical range, $S_n$-matrix.
${}^*$Corresponding author.
E-mail addresses: [email protected] (H.-L. Gau), [email protected] (P. Y. Wu)
${}^1$Research supported by the National Science Council of the Republic of China under NSC-102-2115-M-008-007.
${}^2$Research supported by the National Science Council of the Republic of China under NSC-102-2115-M-009-007 and by the MOE-ATU project.
\noindent
{\bf\large 1. Introduction}
An $n$-by-$n$ complex matrix $A$ is a \emph{partial isometry} if $\|Ax\|=\|x\|$ for any vector $x$ in the orthogonal complement $(\ker A)^{\perp}$ in $\mathbb{C}^n$ of the kernel of $A$, where $\|\cdot\|$ denotes the standard norm in $\mathbb{C}^n$. The study of such matrices or, more generally, such operators on a Hilbert space dates back to 1962 \cite{6}. Their general properties have since been summarized in \cite[Chapter 15]{5}.
In this paper, we study matrices $A$ such that, for some $k\ge 1$, the powers $A, A^2, \ldots, A^k$ are all partial isometries. In Section 2 below, we derive matrix models, under unitary similarity, of such a matrix (Theorems 2.2 and 2.4). They are generalizations of the known fact that $A$ is a partial isometry if and only if it is unitarily similar to a matrix of the form ${\scriptsize\left[\begin{array}{cc} 0 & B\\ 0 & C\end{array}\right]}$ with $B^*B+C^*C=I$ (Lemma 2.1).
Recall that the \emph{ascent} of a matrix, denoted by $a(A)$, is the minimal integer $k\ge 0$ for which $\ker A^k=\ker A^{k+1}$. It is easily seen that $a(A)$ is equal to the size of the largest Jordan block associated with the eigenvalue 0 in the Jordan form of $A$. We denote the $n$-by-$n$ \emph{Jordan block}
$$\left[
\begin{array}{cccc}
0 & 1 & & \\
& 0 & \ddots & \\
& & \ddots & 1 \\
& & & 0
\end{array}
\right]$$
by $J_n$. The \emph{numerical range} $W(A)$ of $A$ is the subset $\{\langle Ax, x{\rm ran\, }gle : x\in \mathbb{C}^n, \|x\|=1\}$ of the complex plane $\mathbb{C}$, where $\langle\cdot, \cdot{\rm ran\, }gle$ is the standard inner product in $\mathbb{C}^n$. It is known that $W(A)$ is a nonempty compact convex subset, and $W(J_n)=\{z\in\mathbb{C} : |z|\le\cos(\pi/(n+1))\}$ (cf. \cite[Proposition 1]{4}). For other properties of the numerical range, the readers may consult \cite[Chapter 22]{5} or \cite[Chapter 1]{9}.
Using the matrix model for power partial isometries, we show that if $a(A)=k\ge 2$ and $A, A^2, \ldots, A^{k-1}$ are all partial isometries, then the following are equivalent: (a) $W(A)$ is a circular disc centered at the origin, (b) $A$ is unitarily similar to a direct sum $J_{k_1}\oplus J_{k_2}\oplus\cdots\oplus J_{k_{\ell}}$ with $k=k_1\ge k_2\ge\cdots\ge k_{\ell}\ge 1$, and (c) $A$ has no unitary part and $A^j$ is a partial isometry for all $j\ge 1$ (Theorem 2.6). An example is given, which shows that the number ``$k-1$'' in the above assumption is sharp (Example 2.7).
In Section 3, we consider the class of $S_n$-matrices. Recall that an $n$-by-$n$ matrix $A$ is of {\em class} $S_n$ if $A$ is a contraction ($\|A\|\equiv\max\{\|Ax\|: x\in\mathbb{C}^n, \|x\|=1\}\le 1$), its eigenvalues are all in $\mathbb{D}$ ($\equiv\{z\in\mathbb{C} : |z|<1\}$), and it satisfies ${\rm ran\, }k(I_n-A^*A)=1$. Such matrices are the finite-dimensional versions of the \emph{compression of the shift} $S(\phi)$, first studied by Sarason \cite{10}. They also feature prominently in the Sz.-Nagy--Foia\c{s} contraction theory \cite{11}. It turns out that a hitherto unnoticed property of such matrices is that if $A$ is of class $S_n$ and $k$ is its ascent, then $A, A^2, \ldots, A^k$ are all partial isometries. Thus the structure theorems in Section 2 are applicable to $A$ or even to $A\otimes A$, the tensor product of $A$ with itself. As a consequence, we obtain that, for an $S_n$-matrix $A$, the numerical range $W(A)$ (resp., $W(A\otimes A)$) is a circular disc centered at the origin if and only if $A$ is unitarily similar to the Jordan block $J_n$ (Theorem 3.3). The assertion concerning $W(A)$ is known before (cf. \cite[Lemma 5]{12}). Finally, we give examples to show that if $A$ is a general matrix, then the conditions for the circularity (at the origin) of $W(A)$ and $W(A\otimes A)$ are independent of each other (Examples 3.5 and 3.6).
We use $I_n$ and $0_n$ to denote the $n$-by-$n$ identity and zero matrices, respectively. An identity or zero matrix with unspecified size is simply denoted by $I$ or $0$. For an $n$-by-$n$ matrix $A$, ${\rm nullity\, } A$ is used for $\dim\ker A$, and ${\rm ran\, }k A$ for its rank. The \emph{real part} of $A$ is ${\rm Re\, } A=(A+A^*)/2$. The \emph{geometric} and \emph{algebraic multiplicities} of an eigenvalue $\lambda$ of $A$ are ${\rm nullity\, }(A-\lambda I_n)$ and the multiplicity of the zero $\lambda$ in the characteristic polynomial $\det(zI_n-A)$ of $A$, respectively. An $n$-by-$n$ diagonal matrix with diagonal entries $a_1, \ldots, a_n$ is denoted by ${\rm diag\, }(a_1, \ldots, a_n)$.
\noindent
{\bf \large 2. Power Partial Isometries}
We start with the following characterizations of partial isometries.
{\bf Lemma 2.1.} \emph{The following conditions are equivalent for an $n$-by-$n$ matrix $A$}:
(a) \emph{$A$ is a partial isometry},
(b) \emph{$A^*A$ is an} (\emph{orthogonal}) \emph{projection}, \emph{and}
(c) \emph{$A$ is unitarily similar to a matrix of the form ${\scriptsize\left[\begin{array}{cc} 0 & B\\ 0 & C\end{array}\right]}$ with $B^*B+C^*C=I$}.
\noindent
\emph{In this case}, ${\scriptsize\left[\begin{array}{cc} 0 & B\\ 0 & C\end{array}\right]}$ \emph{acts on} $C^n=\ker A\oplus(\ker A)^{\perp}$.
Its easy proof is left to the readers.
The next theorem gives the matrix model, under unitary similarity, of a matrix $A$ with $A, A^2, \ldots, A^k$ ($1\le k\le a(A)$) partial isometries.
{\bf Theorem 2.2.} \emph{Let $A$ be an $n$-by-$n$ matrix}, $\ell\ge 1$, \emph{and} $k=\min\{\ell, a(A)\}$. \emph{Then the following conditions are equivalent}:
(a) $A, A^2, \ldots, A^k$ \emph{are partial isometries},
(b) \emph{$A$ is unitarily similar to a matrix of the form}
$$A'\equiv\left[\begin{array}{ccccc} 0 & A_1 & & & \\ & 0 & \ddots & & \\ & & \ddots & A_{k-1} & \\ & & & 0 & B\\ & & & & C\end{array}\right] \ on \ \mathbb{C}^n=\mathbb{C}^{n_1}\oplus\cdots\oplus\mathbb{C}^{n_k}\oplus\mathbb{C}^{m},$$
\emph{where the} $A_j$'\emph{s satisfy $A_j^*A_j=I_{n_{j+1}}$ for $1\le j\le k-1$}, \emph{and $B$ and $C$ satisfy $B^*B+C^*C=I_m$}. \emph{In this case}, $n_j={\rm nullity\, } A$ \emph{if} $j=1$, ${\rm nullity\, } A^j-{\rm nullity\, } A^{j-1}$ \emph{if} $2\le j\le k$, \emph{and} $m={\rm ran\, }k A^k$,
(c) \emph{$A$ is unitarily similar to a matrix of the form}
$$A''\equiv\left[\begin{array}{ccccc} 0 & I & & & \\ & 0 & \ddots & & \\ & & \ddots & I & \\ & & & 0 & B\\ & & & & C\end{array}\right]\oplus(J_{k-1}\oplus\cdots\oplus J_{k-1})\oplus\cdots\oplus(J_1\oplus\cdots\oplus J_1)$$
$$on \ \mathbb{C}^n=\underbrace{\mathbb{C}^{n_k}\oplus\cdots\oplus\mathbb{C}^{n_k}}_{k}\oplus\mathbb{C}^{m}
\oplus\underbrace{\mathbb{C}^{k-1}\oplus\cdots\oplus\mathbb{C}^{k-1}}_{n_{k-1}-n_k}\oplus\cdots\oplus
\underbrace{\mathbb{C}\oplus\cdots\oplus\mathbb{C}}_{n_1-n_2},$$
\emph{where the} $n_j$'\emph{s}, $1\le j\le k$, \emph{and $m$ are as in} (b), \emph{and $B$ and $C$ satisfy $B^*B+C^*C=I_m$}.
For the proof of Theorem 2.2, we need the following lemma.
{\bf Lemma 2.3.} \emph{Let $A=[A_{ij}]_{i,j=1}^n$ be a block matrix with $\|A\|\le 1$}, \emph{and let $\alpha$ be a nonempty subset of} $\{1, 2, \ldots, n\}$. \emph{If for some} $j_0$, $1\le j_0\le n$, \emph{we have} $\sum_{i\in\alpha}A_{i j_0}^*A_{i j_0}=I$, \emph{then $A_{i j_0}=0$ for all $i$ not in $\alpha$}.
{\em Proof}. Since $\|A\|\le 1$, we have $A^*A\le I$. Thus the same is true for the $(j_0, j_0)$-block of $A^*A$, that is, $\sum_{i=1}^n A_{i j_0}^*A_{i j_0}\le I$. Together with our assumption that $\sum_{i\in\alpha}A_{i j_0}^*A_{i j_0}=I$, this yields $\sum_{i\not\in\alpha}A_{i j_0}^*A_{i j_0}\le 0$. It follows immediately that $A_{i j_0}=0$ for all $i$ not in $\alpha$. \hspace{2mm} $\blacksquare$
{\em Proof of Theorem $2.2$}. To prove (a) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (b), let $H_1=\ker A$, $H_j=\ker A^j\ominus\ker A^{j-1}$ for $2\le j\le\ell$, and $H_{\ell+1}=\mathbb{C}^n\ominus\ker A^{\ell}$. Note that if $\ell>a(A)$, then at most $H_1, \ldots, H_{k+1}$ are present. Hence $A$ is unitarily similar to the block matrix $A'\equiv[A_{ij}]_{i,j=1}^{k+1}$ on $\mathbb{C}^n=H_1\oplus\cdots\oplus H_{k+1}$. It is easily seen that $A_{ij}=0$ for any $(i,j)\neq(k+1,k+1)$ with $1\le j\le i\le k+1$. For the brevity of notation, let $A_j=A_{j, j+1}$, $1\le j\le k-1$, $B=A_{k, k+1}$, and $C=A_{k+1, k+1}$. We now check, by induction on $j$, that $A_j^*A_j=I_{n_{j+1}}$ for all $j$, and $A_{ij}=0$ for $1\le i\le j-2\le k-2$.
| 3,998 | 31,795 |
en
|
train
|
0.168.1
|
\noindent
\emph{In this case}, ${\scriptsize\left[\begin{array}{cc} 0 & B\\ 0 & C\end{array}\right]}$ \emph{acts on} $C^n=\ker A\oplus(\ker A)^{\perp}$.
Its easy proof is left to the readers.
The next theorem gives the matrix model, under unitary similarity, of a matrix $A$ with $A, A^2, \ldots, A^k$ ($1\le k\le a(A)$) partial isometries.
{\bf Theorem 2.2.} \emph{Let $A$ be an $n$-by-$n$ matrix}, $\ell\ge 1$, \emph{and} $k=\min\{\ell, a(A)\}$. \emph{Then the following conditions are equivalent}:
(a) $A, A^2, \ldots, A^k$ \emph{are partial isometries},
(b) \emph{$A$ is unitarily similar to a matrix of the form}
$$A'\equiv\left[\begin{array}{ccccc} 0 & A_1 & & & \\ & 0 & \ddots & & \\ & & \ddots & A_{k-1} & \\ & & & 0 & B\\ & & & & C\end{array}\right] \ on \ \mathbb{C}^n=\mathbb{C}^{n_1}\oplus\cdots\oplus\mathbb{C}^{n_k}\oplus\mathbb{C}^{m},$$
\emph{where the} $A_j$'\emph{s satisfy $A_j^*A_j=I_{n_{j+1}}$ for $1\le j\le k-1$}, \emph{and $B$ and $C$ satisfy $B^*B+C^*C=I_m$}. \emph{In this case}, $n_j={\rm nullity\, } A$ \emph{if} $j=1$, ${\rm nullity\, } A^j-{\rm nullity\, } A^{j-1}$ \emph{if} $2\le j\le k$, \emph{and} $m={\rm ran\, }k A^k$,
(c) \emph{$A$ is unitarily similar to a matrix of the form}
$$A''\equiv\left[\begin{array}{ccccc} 0 & I & & & \\ & 0 & \ddots & & \\ & & \ddots & I & \\ & & & 0 & B\\ & & & & C\end{array}\right]\oplus(J_{k-1}\oplus\cdots\oplus J_{k-1})\oplus\cdots\oplus(J_1\oplus\cdots\oplus J_1)$$
$$on \ \mathbb{C}^n=\underbrace{\mathbb{C}^{n_k}\oplus\cdots\oplus\mathbb{C}^{n_k}}_{k}\oplus\mathbb{C}^{m}
\oplus\underbrace{\mathbb{C}^{k-1}\oplus\cdots\oplus\mathbb{C}^{k-1}}_{n_{k-1}-n_k}\oplus\cdots\oplus
\underbrace{\mathbb{C}\oplus\cdots\oplus\mathbb{C}}_{n_1-n_2},$$
\emph{where the} $n_j$'\emph{s}, $1\le j\le k$, \emph{and $m$ are as in} (b), \emph{and $B$ and $C$ satisfy $B^*B+C^*C=I_m$}.
For the proof of Theorem 2.2, we need the following lemma.
{\bf Lemma 2.3.} \emph{Let $A=[A_{ij}]_{i,j=1}^n$ be a block matrix with $\|A\|\le 1$}, \emph{and let $\alpha$ be a nonempty subset of} $\{1, 2, \ldots, n\}$. \emph{If for some} $j_0$, $1\le j_0\le n$, \emph{we have} $\sum_{i\in\alpha}A_{i j_0}^*A_{i j_0}=I$, \emph{then $A_{i j_0}=0$ for all $i$ not in $\alpha$}.
{\em Proof}. Since $\|A\|\le 1$, we have $A^*A\le I$. Thus the same is true for the $(j_0, j_0)$-block of $A^*A$, that is, $\sum_{i=1}^n A_{i j_0}^*A_{i j_0}\le I$. Together with our assumption that $\sum_{i\in\alpha}A_{i j_0}^*A_{i j_0}=I$, this yields $\sum_{i\not\in\alpha}A_{i j_0}^*A_{i j_0}\le 0$. It follows immediately that $A_{i j_0}=0$ for all $i$ not in $\alpha$. \hspace{2mm} $\blacksquare$
{\em Proof of Theorem $2.2$}. To prove (a) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (b), let $H_1=\ker A$, $H_j=\ker A^j\ominus\ker A^{j-1}$ for $2\le j\le\ell$, and $H_{\ell+1}=\mathbb{C}^n\ominus\ker A^{\ell}$. Note that if $\ell>a(A)$, then at most $H_1, \ldots, H_{k+1}$ are present. Hence $A$ is unitarily similar to the block matrix $A'\equiv[A_{ij}]_{i,j=1}^{k+1}$ on $\mathbb{C}^n=H_1\oplus\cdots\oplus H_{k+1}$. It is easily seen that $A_{ij}=0$ for any $(i,j)\neq(k+1,k+1)$ with $1\le j\le i\le k+1$. For the brevity of notation, let $A_j=A_{j, j+1}$, $1\le j\le k-1$, $B=A_{k, k+1}$, and $C=A_{k+1, k+1}$. We now check, by induction on $j$, that $A_j^*A_j=I_{n_{j+1}}$ for all $j$, and $A_{ij}=0$ for $1\le i\le j-2\le k-2$.
For $j=1$, since $A$ is a partial isometry, $A^*A$ is an (orthogonal) projection by Lemma 2.1. We obviously have $A^*A=0$ on $H_1=\ker A$ and $A^*A=I$ on $H_1^{\perp}=H_2\oplus\cdots\oplus H_{k+1}$. Thus $A'^*A'=0\oplus I\oplus\cdots\oplus I$ on $\mathbb{C}^n=H_1\oplus H_2\oplus\cdots\oplus H_{k+1}$. Since $A'^*A'$ is of the form
$$\left[
\begin{array}{ccccc}
0 & 0 & 0 & \cdots & 0 \\
0 & A_1^*A_1 & * & \cdots & * \\
0 & * & * & \cdots & * \\
\vdots & \vdots & \vdots & & \vdots \\
0 & * & * & \cdots & *
\end{array}
\right],$$
we conclude that $A_1^*A_1=I$.
Next assume that, for some $p$ ($2\le p<k$), $A_j^*A_j=I$ for all $j$, $1\le j\le p-1$, and all the blocks in $A'$ which are above $A_1, \ldots, A_{p-1}$ are zero. We now check that $A_{p}^*A_{p}=I$ and all blocks above $A_{p}$ are zero. Since $A^{p}$ is a partial isometry, ${A^p}^*A^{p}$ is an (orthogonal) projection with kernel equal to $H_1\oplus\cdots\oplus H_{p}$. Thus ${A'^p}^*A'^{p}=\underbrace{0\oplus\cdots\oplus 0}_{p}\oplus \underbrace{I\oplus\cdots\oplus I}_{k-p+1}$. But from
$$A'=\left[
\begin{array}{ccccccccc}
0 & A_1 & 0 & \cdots & 0 & * & \cdots & * & * \\
& \ddots & \ddots & \ddots & \vdots & \vdots & & \vdots & \vdots \\
& & \ddots & \ddots & 0 & \vdots & & \vdots & \vdots \\
& & & \ddots & A_{p-1} & * & & \vdots & \vdots \\
& & & & 0 & A_{p} & \ddots & \vdots & \vdots \\
& & & & & 0 & \ddots & * & \vdots \\
& & & & & & \ddots & A_{k-1} & * \\
& & & & & & & 0 & B \\
& & & & & & & & C \\
\end{array}
\right],$$
we have
$$A'^{p}=\begin{array}{ll} \ \ \ \overbrace{\ \hspace{15mm} \ }^{\displaystylelaystyle p} \ \ \ \overbrace{\ \hspace{71mm} \ }^{\displaystylelaystyle k-p+1} & \\ \left[
\begin{array}{cccccccc}
0 & \cdots & 0 & \prod_{j=1}^{p}A_j & * & \cdots & * & * \\
\cdot & & & 0 & \prod_{j=2}^{p+1}A_j & \ddots & \vdots & \vdots \\
\cdot & & & & \ddots & \ddots & * & \vdots \\
\cdot & & & & & \ddots & \prod_{j=k-p}^{k-1}A_j & * \\
\cdot & & & & & & 0 & * \\
\cdot & & & & & & \vdots & \vdots \\
\cdot & & & & & & 0 & BC^{p-1} \\
0 & \cdot & \cdot & \cdot & \cdot & \cdot & 0 & C^{p} \\
\end{array}
\right] & \hspace{-11mm}\begin{array}{l}\left.\begin{array}{l} {\ } \\ {\ } \\ {\ }\\ {\ }\end{array}\right\}k-p\\ \left.\begin{array}{l}{\ } \\ {\ } \\ {\ }\\ {\ }\end{array}\right\}p+1\end{array}\end{array}.$$
Thus the $(p+1, p+1)$-block of ${A'^p}^*A'^{p}$ is $(\prod_{j=1}^{p}A_j)^*(\prod_{j=1}^{p}A_j)=A_{p}^*A_{p}$, which is equal to $I$ from above. Lemma 2.3 then implies that all the blocks in $A'$ which are above $A_{p}$ are zero. Thus, by induction, the first $k$ block columns of $A'$ are of the asserted form.
| 2,797 | 31,795 |
en
|
train
|
0.168.2
|
For $j=1$, since $A$ is a partial isometry, $A^*A$ is an (orthogonal) projection by Lemma 2.1. We obviously have $A^*A=0$ on $H_1=\ker A$ and $A^*A=I$ on $H_1^{\perp}=H_2\oplus\cdots\oplus H_{k+1}$. Thus $A'^*A'=0\oplus I\oplus\cdots\oplus I$ on $\mathbb{C}^n=H_1\oplus H_2\oplus\cdots\oplus H_{k+1}$. Since $A'^*A'$ is of the form
$$\left[
\begin{array}{ccccc}
0 & 0 & 0 & \cdots & 0 \\
0 & A_1^*A_1 & * & \cdots & * \\
0 & * & * & \cdots & * \\
\vdots & \vdots & \vdots & & \vdots \\
0 & * & * & \cdots & *
\end{array}
\right],$$
we conclude that $A_1^*A_1=I$.
Next assume that, for some $p$ ($2\le p<k$), $A_j^*A_j=I$ for all $j$, $1\le j\le p-1$, and all the blocks in $A'$ which are above $A_1, \ldots, A_{p-1}$ are zero. We now check that $A_{p}^*A_{p}=I$ and all blocks above $A_{p}$ are zero. Since $A^{p}$ is a partial isometry, ${A^p}^*A^{p}$ is an (orthogonal) projection with kernel equal to $H_1\oplus\cdots\oplus H_{p}$. Thus ${A'^p}^*A'^{p}=\underbrace{0\oplus\cdots\oplus 0}_{p}\oplus \underbrace{I\oplus\cdots\oplus I}_{k-p+1}$. But from
$$A'=\left[
\begin{array}{ccccccccc}
0 & A_1 & 0 & \cdots & 0 & * & \cdots & * & * \\
& \ddots & \ddots & \ddots & \vdots & \vdots & & \vdots & \vdots \\
& & \ddots & \ddots & 0 & \vdots & & \vdots & \vdots \\
& & & \ddots & A_{p-1} & * & & \vdots & \vdots \\
& & & & 0 & A_{p} & \ddots & \vdots & \vdots \\
& & & & & 0 & \ddots & * & \vdots \\
& & & & & & \ddots & A_{k-1} & * \\
& & & & & & & 0 & B \\
& & & & & & & & C \\
\end{array}
\right],$$
we have
$$A'^{p}=\begin{array}{ll} \ \ \ \overbrace{\ \hspace{15mm} \ }^{\displaystylelaystyle p} \ \ \ \overbrace{\ \hspace{71mm} \ }^{\displaystylelaystyle k-p+1} & \\ \left[
\begin{array}{cccccccc}
0 & \cdots & 0 & \prod_{j=1}^{p}A_j & * & \cdots & * & * \\
\cdot & & & 0 & \prod_{j=2}^{p+1}A_j & \ddots & \vdots & \vdots \\
\cdot & & & & \ddots & \ddots & * & \vdots \\
\cdot & & & & & \ddots & \prod_{j=k-p}^{k-1}A_j & * \\
\cdot & & & & & & 0 & * \\
\cdot & & & & & & \vdots & \vdots \\
\cdot & & & & & & 0 & BC^{p-1} \\
0 & \cdot & \cdot & \cdot & \cdot & \cdot & 0 & C^{p} \\
\end{array}
\right] & \hspace{-11mm}\begin{array}{l}\left.\begin{array}{l} {\ } \\ {\ } \\ {\ }\\ {\ }\end{array}\right\}k-p\\ \left.\begin{array}{l}{\ } \\ {\ } \\ {\ }\\ {\ }\end{array}\right\}p+1\end{array}\end{array}.$$
Thus the $(p+1, p+1)$-block of ${A'^p}^*A'^{p}$ is $(\prod_{j=1}^{p}A_j)^*(\prod_{j=1}^{p}A_j)=A_{p}^*A_{p}$, which is equal to $I$ from above. Lemma 2.3 then implies that all the blocks in $A'$ which are above $A_{p}$ are zero. Thus, by induction, the first $k$ block columns of $A'$ are of the asserted form.
Finally, we check that $B^*B+C^*C=I_m$. If this is the case, then all the blocks in $A'$ above $B$ and $C$ are zero by Lemma 2.3 again and we will be done. As above, $A'^{k-1}$ is of the form
$$\left[\begin{array}{ccccc}
0 & \cdots & 0 & \prod_{j=1}^{k-1}A_j & D_1\\
0 & \cdots & 0 & 0 & D_2\\
\vdots & & \vdots & \vdots & \vdots\\
0 & \cdots & 0 & 0 & D_k\\
0 & \cdots & 0 & 0 & C^{k-1}
\end{array}\right],$$
and the (orthogonal) projection ${A'^{k-1}}^*A'^{k-1}$ equals $\underbrace{0\oplus\cdots\oplus 0}_{k-1}\oplus I\oplus I$ on $\mathbb{C}^n=H_1\oplus\cdots\oplus H_{k-1}\oplus H_k\oplus H_{k+1}$. Hence the $(k+1, k+1)$-block of ${A'^{k-1}}^*A'^{k-1}$ is
\begin{equation}\label{e1}
(\sum_{j=1}^k D_j^*D_j)+{C^{k-1}}^*C^{k-1},
\end{equation}
which is equal to $I$. Similarly,
$$A'^k=A'^{k-1}A'=\left[\begin{array}{cccc}
0 & \cdots & 0 & (\prod_{j=1}^{k-1}A_j)B+D_1C\\
0 & \cdots & 0 & D_2C\\
\vdots & & \vdots & \vdots\\
0 & \cdots & 0 & D_kC\\
0 & \cdots & 0 & C^{k}
\end{array}\right]$$
and the $(k+1, k+1)$-block of ${A'^k}^*A'^k$,
\begin{equation}\label{e2}
B^*(\prod_{j=1}^{k-1}A_j)^*(\prod_{j=1}^{k-1}A_j)B+
B^*(\prod_{j=1}^{k-1}A_j)^*D_1C+C^*D_1^*(\prod_{j=1}^{k-1}A_j)B+(\sum_{j=1}^kC^*D_j^*D_jC)+{C^k}^*C^k,
\end{equation}
is also equal to $I_m$. We deduce from ({\rm Re\, }f{e1}), ({\rm Re\, }f{e2}) and $A_j^*A_j=I$ for $1\le j\le k-1$ that
\begin{equation}\label{e3}
B^*B+B^*(\prod_{j=1}^{k-1}A_j)^*D_1C+C^*D_1^*(\prod_{j=1}^{k-1}A_j)B+{C}^*C=I_m.
\end{equation}
To complete the proof, we need only show that $(\prod_{j=1}^{k-1}A_j)^*D_1=0$. Indeed, since $(\prod_{j=1}^{k-1}A_j)^*(\prod_{j=1}^{k-1}A_j)=I_{n_k}$, there is an $n_1$-by-$n_1$ unitary matrix $U$ such that $U^*(\prod_{j=1}^{k-1}A_j)={\scriptsize\left[\begin{array}{c} I_{n_k} \\ 0\end{array}\right]}$. Then $V\equiv U\oplus \underbrace{I\oplus\cdots\oplus I}_k$ is unitary and
$$V^*A'^{k-1}V=\left[\begin{array}{ccccc}
0 & \cdots & 0 & U^*(\prod_{j=1}^{k-1}A_j) & U^*D_1\\
0 & \cdots & 0 & 0 & D_2\\
\vdots & & \vdots & \vdots & \vdots\\
0 & \cdots & 0 & 0 & D_k\\
0 & \cdots & 0 & 0 & C^{k-1}
\end{array}\right]=\left[\begin{array}{ccccc}
0 & \cdots & 0 & \left[\begin{array}{c} I_{n_k} \\ 0\end{array}\right] & \left[\begin{array}{c} 0 \\ D'_1\end{array}\right]\\
0 & \cdots & 0 & 0 & D_2\\
\vdots & & \vdots & \vdots & \vdots\\
0 & \cdots & 0 & 0 & D_k\\
0 & \cdots & 0 & 0 & C^{k-1}
\end{array}\right].$$
Hence
$$(\prod_{j=1}^{k-1}A_j)^*D_1=\left[I_{n_k} \ 0\right]U^*U\left[\begin{array}{c} 0 \\ D'_1\end{array}\right]=\left[I_{n_k} \ 0\right]\left[\begin{array}{c} 0 \\ D'_1\end{array}\right]=0$$
as asserted. We conclude from ({\rm Re\, }f{e3}) that $B^*B+C^*C=I_m$. Moreover, the sizes of the blocks in $A'$ are as asserted from our construction. This proves (a) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (b).
Next we prove (b) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (c). Let $A'$ be as in (b), and let $n_1, \ldots, n_k, m$ be the sizes of the diagonal blocks of $A'$. Since $A_j^*A_j=I_{n_{j+1}}$ for all $j$, $1\le j\le k-1$, we have $n_1\ge n_2\ge\cdots\ge n_k$. Also, from $A_{k-1}^*A_{k-1}=I_{n_k}$, we deduce that there is a unitary matrix $U_{k-1}$ of size $n_{k-1}$ such that $U_{k-1}^*A_{k-1}={\scriptsize\left[\begin{array}{c} I_{n_k} \\ 0\end{array}\right]}$. Similarly, since $(A_{k-2}U_{k-1})^*(A_{k-2}U_{k-1})=I_{n_{k-1}}$, there is a unitary $U_{k-2}$ of size $n_{k-2}$ such that $U_{k-2}^*(A_{k-2}U_{k-1})={\scriptsize\left[\begin{array}{c} I_{n_{k-1}} \\ 0\end{array}\right]}$. Proceeding inductively, we obtain a unitary $U_j$ of size $n_j$ satisfying $U^*_j(A_jU_{j+1})={\scriptsize\left[\begin{array}{c} I_{n_{j+1}} \\ 0\end{array}\right]}$ for each $j$, $1\le j\le k-3$. If $U=U_1\oplus\cdots\oplus U_{k-1}\oplus I_{n_k}\oplus I_m$, then
$$U^*A'U=\left[\begin{array}{cccccc} 0 & U_1^*A_1U_2 & & & & \\ & 0 & \ddots & & & \\ & & \ddots & U_{k-2}^*A_{k-2}U_{k-1} & & \\ & & & 0 & U_{k-1}^*A_{k-1} & \\ & & & & 0 & B\\ & & & & & C\end{array}\right]$$
$$=\left[\begin{array}{cccccc} 0 & \left[\begin{array}{c} I_{n_{2}} \\ 0\end{array}\right] & & & & \\ & 0 & \ddots & & & \\ & & \ddots & \left[\begin{array}{c} I_{n_{k-1}} \\ 0\end{array}\right] & & \\ & & & 0 & \left[\begin{array}{c} I_{n_{k}} \\ 0\end{array}\right] & \\ & & & & 0 & B\\ & & & & & C\end{array}\right].$$
Note that this last matrix is unitarily similar to the one asserted in (c).
| 3,432 | 31,795 |
en
|
train
|
0.168.3
|
Next we prove (b) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (c). Let $A'$ be as in (b), and let $n_1, \ldots, n_k, m$ be the sizes of the diagonal blocks of $A'$. Since $A_j^*A_j=I_{n_{j+1}}$ for all $j$, $1\le j\le k-1$, we have $n_1\ge n_2\ge\cdots\ge n_k$. Also, from $A_{k-1}^*A_{k-1}=I_{n_k}$, we deduce that there is a unitary matrix $U_{k-1}$ of size $n_{k-1}$ such that $U_{k-1}^*A_{k-1}={\scriptsize\left[\begin{array}{c} I_{n_k} \\ 0\end{array}\right]}$. Similarly, since $(A_{k-2}U_{k-1})^*(A_{k-2}U_{k-1})=I_{n_{k-1}}$, there is a unitary $U_{k-2}$ of size $n_{k-2}$ such that $U_{k-2}^*(A_{k-2}U_{k-1})={\scriptsize\left[\begin{array}{c} I_{n_{k-1}} \\ 0\end{array}\right]}$. Proceeding inductively, we obtain a unitary $U_j$ of size $n_j$ satisfying $U^*_j(A_jU_{j+1})={\scriptsize\left[\begin{array}{c} I_{n_{j+1}} \\ 0\end{array}\right]}$ for each $j$, $1\le j\le k-3$. If $U=U_1\oplus\cdots\oplus U_{k-1}\oplus I_{n_k}\oplus I_m$, then
$$U^*A'U=\left[\begin{array}{cccccc} 0 & U_1^*A_1U_2 & & & & \\ & 0 & \ddots & & & \\ & & \ddots & U_{k-2}^*A_{k-2}U_{k-1} & & \\ & & & 0 & U_{k-1}^*A_{k-1} & \\ & & & & 0 & B\\ & & & & & C\end{array}\right]$$
$$=\left[\begin{array}{cccccc} 0 & \left[\begin{array}{c} I_{n_{2}} \\ 0\end{array}\right] & & & & \\ & 0 & \ddots & & & \\ & & \ddots & \left[\begin{array}{c} I_{n_{k-1}} \\ 0\end{array}\right] & & \\ & & & 0 & \left[\begin{array}{c} I_{n_{k}} \\ 0\end{array}\right] & \\ & & & & 0 & B\\ & & & & & C\end{array}\right].$$
Note that this last matrix is unitarily similar to the one asserted in (c).
To prove (c) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (a), we may assume that
$$A''=\left[
\begin{array}{ccccc}
0 & I & & & \\
& 0 & \ddots & & \\
& & \ddots & I & \\
& & & 0 & B \\
& & & & C
\end{array}
\right]$$
with $B^*B+C^*C=I_m$. This is because powers of any Jordan block are all partial isometries and the direct sums of partial isometries are again partial isometries. Simple computations show that
$$A''^j=\begin{array}{ll} \ \ \ \overbrace{\ \hspace{15mm} \ }^{\displaystylelaystyle j} \ \overbrace{\ \hspace{36mm} \ }^{\displaystylelaystyle k-j+1} & \\ \left[
\begin{array}{cccccccc}
0 & \cdots & 0 & I & 0 & \cdots & 0 & 0 \\
& \cdot & & 0 & \ddots & \ddots & \vdots & \vdots \\
& & \cdot & & \ddots & \ddots & 0 & \vdots \\
& & & \cdot & & \ddots & I & 0 \\
& & & & \cdot & & 0 & B \\
& & & & & \cdot & \vdots & \vdots \\
& & & & & & 0 & BC^{j-1} \\
& & & & & & & C^{j} \\
\end{array}
\right] & \hspace{-11mm}\begin{array}{l}\left.\begin{array}{l} {\ } \\ {\ } \\ {\ }\\ {\ }\end{array}\right\}k-j\\ \left.\begin{array}{l}{\ } \\ {\ } \\ {\ }\\ {\ }\end{array}\right\}j+1\end{array}\end{array}$$
and ${A''^j}^*A''^j=\underbrace{0\oplus\cdots\oplus 0}_j\oplus \underbrace{I\oplus\cdots\oplus I}_{k-j}\oplus D$, where $D=(\sum_{s=0}^{j-1}{C^s}^*B^*BC^s)+{C^j}^*C^j$ for each $j$, $1\le j\le k$. From $B^*B+C^*C=I_m$, we deduce that
\begin{align*}
D &= B^*B+(\sum_{s=1}^{j-2}{C^s}^*B^*BC^s)+{C^{j-1}}^*(B^*B+C^*C)C^{j-1}\\
&= B^*B+(\sum_{s=1}^{j-2}{C^s}^*B^*BC^s)+{C^{j-1}}^*C^{j-1}\\
&= B^*B+(\sum_{s=1}^{j-3}{C^s}^*B^*BC^s)+{C^{j-2}}^*(B^*B+C^*C)C^{j-2}\\
&= \cdots\\
&= B^*B+C^*C\\
&= I_m.
\end{align*}
Hence ${A''^j}^*A''^j=0\oplus I$, which implies that $A''^j$ is a partial isometry by Lemma 2.1 for all $j$, $1\le j\le k$. This proves (c) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (a). \hspace{2mm} $\blacksquare$
A consequence of Theorem 2.2 is the following.
{\bf Theorem 2.4.} \emph{Let $A$ be an $n$-by-$n$ matrix and $\ell>a(A)$}. \emph{Then the following conditions are equivalent}:
(a) $A, A^2, \ldots, A^{\ell}$ \emph{are partial isometries},
(b) \emph{$A$ is unitarily similar to a matrix of the form} $U\oplus J_{k_1}\oplus\cdots\oplus J_{k_m}$, \emph{where $U$ is unitary and} $a(A)=k_1\ge \cdots\ge k_m\ge 1$, \emph{and}
(c) \emph{$A^j$ is a partial isometry for all $j\ge 1$}.
The equivalence of (b) and (c) here is the finite-dimensional version of a result of Halmos and Wallen \cite[Theorem]{7}.
{\em Proof of Theorem $2.4$}. Since $\ell>k\equiv a(A)$, Theorem 2.2 (a) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (b) says that $A$ is unitarily similar to a matrix of the form
$$A'\equiv\left[\begin{array}{ccccc} 0_{n_1} & A_1 & & & \\ & 0_{n_2} & \ddots & & \\ & & \ddots & A_{k-1} & \\ & & & 0_{n_k} & B\\ & & & & C\end{array}\right] \ \ \ \mbox{on} \ \ \mathbb{C}^n=\mathbb{C}^{n_1}\oplus\cdots\oplus\mathbb{C}^{n_k}\oplus\mathbb{C}^{m}$$
with the $A_j$'s, $B$ and $C$ satisfying the properties asserted therein. As $k$ is the ascent of $A$, ${\rm nullity\, } A^k$ equals the algebraic multiplicity of eigenvalue 0 of $A$. Since ${\rm nullity\, } A^k={\rm nullity\, } A'^k=\sum_{j=1}^kn_j$, it is seen from the structure of $A'$ that the eigenvalue 0 appears fully in the diagonal $0_{n_j}$'s. This shows that 0 cannot be an eigenvalue of $C$ or $C$ is invertible.
A simple computation yields that
$$A'^{k+1}=\left[\begin{array}{cccc}
0 & \cdots & 0 & (\prod_{j=1}^{k-1}A_j)BC\\
0 & \cdots & 0 & (\prod_{j=2}^{k-1}A_j)BC^2\\
\vdots & & \vdots & \vdots\\
0 & \cdots & 0 & A_{k-1}BC^{k-1}\\
0 & \cdots & 0 & BC^k\\
0 & \cdots & 0 & C^{k+1}
\end{array}\right]$$
and
\begin{equation}\label{e4}
{A'^{k+1}}^*A'^{k+1}=0_{n_1}\oplus\cdots\oplus 0_{n_k}\oplus D,
\end{equation}
where, after simplification by using $A_j^*A_j=I_{n_{j+1}}$ for $1\le j\le k-1$, $D=(\sum_{j=1}^k{C^j}^*B^*BC^j)+{C^{k+1}}^*C^{k+1}$. As $A'^{k+1}$ is a partial isometry, ${A'^{k+1}}^*A'^{k+1}$ is a projection by Lemma 2.1. Moreover, we also have
$${\rm nullity\, } {A'^{k+1}}^*A'^{k+1}={\rm nullity\, } A'^{k+1}={\rm nullity\, } A'^k=\sum_{j=1}^kn_j,$$
where the second equality holds because of $k=a(A')$. Thus we obtain from ({\rm Re\, }f{e4}) that $D=I_m$. Therefore,
\begin{align*}
I_m &= D = (\sum_{j=1}^k{C^j}^*B^*BC^j)+{C^{k+1}}^*C^{k+1}\\
&= (\sum_{j=1}^{k-1}{C^j}^*B^*BC^j)+{C^{k}}^*(B^*B+C^*C)C^{k}\\
&= (\sum_{j=1}^{k-1}{C^j}^*B^*BC^j)+{C^{k}}^*C^{k}\\
&= \cdots\\
&= C^*(B^*B+C^*C)C\\
&= C^*C.
\end{align*}
This shows that $C$ is unitary and hence $B=0$ (from $B^*B+C^*C=I_m$). Thus $A'$ is unitarily similar to the asserted form in (b). This completes the proof of (a) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (b). The implications (b) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (c) and (c) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (a) are trivial. \hspace{2mm} $\blacksquare$
At this juncture, it seems appropriate to define the {\em power partial isometry index} $p(\cdot)$ for any matrix $A$:
$$p(A)\equiv\sup\{k\ge 0: I, A, A^2, \ldots, A^k \ \mbox{are all partial isometries}\}.$$
An easy corollary of Theorem 2.4 is the following estimate for $p(A)$.
{\bf Corollary 2.5.} \emph{If $A$ is an $n$-by-$n$ matrix}, \emph{then $0\le p(A)\le a(A)$ or $p(A)=\infty$}. \emph{In particular}, \emph{we have} (a) $0\le p(A)\le n-1$ \emph{or} $p(A)=\infty$, \emph{and} (b) $p(A)=n-1$ \emph{if and only if $A$ is unitarily similar to a matrix of the form}
\begin{equation}\label{e5}
\left[\begin{array}{ccccc}
0 & 1 & & &\\
& 0 & \ddots & &\\
& & \ddots & 1 &\\
& & & 0 & a \\
& & & & b
\end{array}\right]\end{equation}
\emph{with $|a|^2+|b|^2=1$ and $a, b\neq 0$}.
{\em Proof}. The first assertion follows from Theorem 2.4. If $p(A)=n$, then $a(A)=n$, which implies that the Jordan form of $A$ is $J_n$. Thus $p(A)=\infty$, a contradiction. This proves (a) of the second assertion.
As for (b), if $p(A)=n-1$, then $a(A)=n$ will lead to a contradiction as above. Thus we must have $a(A)=n-1$. Theorem 2.2 implies that $A$ is unitarily similar to a matrix of the form ({\rm Re\, }f{e5}) with $|a|^2+|b|^2=1$. Since either $a=0$ or $b=0$ will lead to the contradicting $p(A)=\infty$, we have thus proven one direction of (b). The converse follows easily from Theorem 2.2 and the arguments in the preceding paragraph. \hspace{2mm} $\blacksquare$
The next theorem gives conditions for which $p(A)\ge a(A)-1$ implies that $A$ is unitarily similar to a direct sum of Jordan blocks.
{\bf Theorem 2.6.} \emph{Let $A$ be an $n$-by-$n$ matrix with $p(A)\ge a(A)-1$}. \emph{Then the following conditions are equivalent}:
(a) \emph{$W(A)$ is a circular disc centered at the origin},
(b) \emph{$A$ is unitarily similar to a direct sum of Jordan blocks},
(c) \emph{$A$ has no unitary part and $A^j$ is a partial isometry for all $j\ge 1$}, \emph{and}
(d) \emph{$A$ has no unitary part and $A, A^2, \ldots, A^{\ell}$ are partial isometries for some $\ell>a(A)$}.
\noindent
\emph{In this case}, $W(A)=\{z\in \mathbb{C} : |z|\le\cos(\pi/(a(A)+1))\}$ \emph{and} $p(A)=\infty$.
Here a matrix is said to have {\em no unitary part} if it is not unitarily similar to one with a unitary summand.
Note that, in the preceding theorem, the condition $p(A)\ge a(A)-1$ cannot be replaced by the weaker $p(A)\ge a(A)-2$. This is seen by the next example.
| 3,946 | 31,795 |
en
|
train
|
0.168.4
|
At this juncture, it seems appropriate to define the {\em power partial isometry index} $p(\cdot)$ for any matrix $A$:
$$p(A)\equiv\sup\{k\ge 0: I, A, A^2, \ldots, A^k \ \mbox{are all partial isometries}\}.$$
An easy corollary of Theorem 2.4 is the following estimate for $p(A)$.
{\bf Corollary 2.5.} \emph{If $A$ is an $n$-by-$n$ matrix}, \emph{then $0\le p(A)\le a(A)$ or $p(A)=\infty$}. \emph{In particular}, \emph{we have} (a) $0\le p(A)\le n-1$ \emph{or} $p(A)=\infty$, \emph{and} (b) $p(A)=n-1$ \emph{if and only if $A$ is unitarily similar to a matrix of the form}
\begin{equation}\label{e5}
\left[\begin{array}{ccccc}
0 & 1 & & &\\
& 0 & \ddots & &\\
& & \ddots & 1 &\\
& & & 0 & a \\
& & & & b
\end{array}\right]\end{equation}
\emph{with $|a|^2+|b|^2=1$ and $a, b\neq 0$}.
{\em Proof}. The first assertion follows from Theorem 2.4. If $p(A)=n$, then $a(A)=n$, which implies that the Jordan form of $A$ is $J_n$. Thus $p(A)=\infty$, a contradiction. This proves (a) of the second assertion.
As for (b), if $p(A)=n-1$, then $a(A)=n$ will lead to a contradiction as above. Thus we must have $a(A)=n-1$. Theorem 2.2 implies that $A$ is unitarily similar to a matrix of the form ({\rm Re\, }f{e5}) with $|a|^2+|b|^2=1$. Since either $a=0$ or $b=0$ will lead to the contradicting $p(A)=\infty$, we have thus proven one direction of (b). The converse follows easily from Theorem 2.2 and the arguments in the preceding paragraph. \hspace{2mm} $\blacksquare$
The next theorem gives conditions for which $p(A)\ge a(A)-1$ implies that $A$ is unitarily similar to a direct sum of Jordan blocks.
{\bf Theorem 2.6.} \emph{Let $A$ be an $n$-by-$n$ matrix with $p(A)\ge a(A)-1$}. \emph{Then the following conditions are equivalent}:
(a) \emph{$W(A)$ is a circular disc centered at the origin},
(b) \emph{$A$ is unitarily similar to a direct sum of Jordan blocks},
(c) \emph{$A$ has no unitary part and $A^j$ is a partial isometry for all $j\ge 1$}, \emph{and}
(d) \emph{$A$ has no unitary part and $A, A^2, \ldots, A^{\ell}$ are partial isometries for some $\ell>a(A)$}.
\noindent
\emph{In this case}, $W(A)=\{z\in \mathbb{C} : |z|\le\cos(\pi/(a(A)+1))\}$ \emph{and} $p(A)=\infty$.
Here a matrix is said to have {\em no unitary part} if it is not unitarily similar to one with a unitary summand.
Note that, in the preceding theorem, the condition $p(A)\ge a(A)-1$ cannot be replaced by the weaker $p(A)\ge a(A)-2$. This is seen by the next example.
{\bf Example 2.7.} If $A=J_3\oplus{\scriptsize\left[\begin{array}{cc} 0 & (1-|\lambda|^2)^{1/2}\\ 0 & \lambda\end{array}\right]}$, where $0<|\lambda|\le\sqrt{2}-1$, then $a(A)=3$ and $W(A)=\{z\in\mathbb{C} : |z|\le\sqrt{2}/2\}$. Since $A$ is a partial isometry while $A^2$ is not, we have $p(A)=1$. Note that $A$ has a nonzero eigenvalue. Hence it is not unitarily similar to any direct sum of Jordan blocks.
The proof of Theorem 2.6 depends on the following series of lemmas, the first of which is a generalization of \cite[Theorem 1]{13}.
{\bf Lemma 2.8.} \emph{Let}
$$A=\left[\begin{array}{ccccc} 0 & A_1 & & & \\ & 0 & \ddots & & \\ & & \ddots & A_{k-1} & \\ & & & 0 & B\\ & & & & C\end{array}\right] \ on \ \mathbb{C}^n=\mathbb{C}^{n_1}\oplus\cdots\oplus\mathbb{C}^{n_k}\oplus\mathbb{C}^{m},$$
\emph{where the} $A_j$'\emph{s satisfy} $A_j^*A_j=I_{n_{j+1}}$, $1\le j\le k-1$. \emph{If $W(A)$ is a circular disc centered at the origin with radius $r$ larger than} $\cos(\pi/(k+1))$, \emph{then $C$ is not invertible}.
{\em Proof}. Since $W(A)=\{z\in\mathbb{C}:|z|\le r\}$, $r$ is the maximum eigenvalue of ${\rm Re\, }(e^{i\theta}A)$ and hence $\det(rI_n-{\rm Re\, }(e^{i\theta}A))=0$ for all real $\theta$. We have
\begin{align}\label{e6}
& 0=\det\left[\begin{array}{ccccc}
rI_{n_1} & -(e^{i\theta}/2)A_1 & & & \\
-(e^{-i\theta}/2)A_1^* & rI_{n_2} & \ddots & & \\
& \ddots & \ddots & -(e^{i\theta}/2)A_{k-1} & \\
& & -(e^{-i\theta}/2)A_{k-1}^* & rI_{n_k} & -(e^{i\theta}/2)B\\
& & & -(e^{-i\theta}/2)B^* & rI_m-{\rm Re\, }(e^{i\theta}C)\end{array}\right] \\
=& \det D_k(\theta)\cdot\det(E(\theta)-F(\theta)),\nonumber
\end{align}
where
\begin{equation}\label{e7}
D_k(\theta)=\left[\begin{array}{cccc}
rI_{n_1} & -(e^{i\theta}/2)A_1 & & \\
-(e^{-i\theta}/2)A_1^* & rI_{n_2} & \ddots & \\
& \ddots & \ddots & -(e^{i\theta}/2)A_{k-1} \\
& & -(e^{-i\theta}/2)A_{k-1}^* & rI_{n_k} \end{array}\right],
\ \ E(\theta)=rI_m-{\rm Re\, }(e^{i\theta}C),
\end{equation}
and
\begin{equation}\label{e8}
F(\theta)=\left[0 \ \ldots \ 0 \ -(e^{-i\theta}/2)B^*\right]D_k(\theta)^{-1}\left[\begin{array}{c} 0\\ \vdots \\ 0\\ -(e^{i\theta}/2)B\end{array}\right],
\end{equation}
by using the Schur complement of $D_k(\theta)$ in the matrix in ({\rm Re\, }f{e6}) (cf. \cite[p. 22]{8}). Note that here the invertibility of $D_k(\theta)$ follows from the facts that $D_k(\theta)$ is unitarily similar to $rI-{\rm Re\, } J$, where $J=(\sum_{j=1}^{n_k}\oplus J_k)\oplus(\sum_{j=1}^{n_{k-1}-n_k}\oplus J_{k-1})\oplus\cdots\oplus(\sum_{j=1}^{n_1-n_2}\oplus J_1)$ (cf. the proof of Theorem 2.2 (b) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (c)), and $r$ ($>\cos(\pi/(k+1))$) is not an eigenvalue of ${\rm Re\, } J$. Moreover, the $(k,k)$-block of $D_k(\theta)^{-1}$ is independent of the value of $\theta$. Thus the same is true for the entries of $F(\theta)$. Under a unitary similarity, we may assume that $C=[c_{ij}]_{i,j=1}^m$ is upper triangular with $c_{ij}=0$ for all $i>j$. Let $F(\theta)=[b_{ij}]_{i,j=1}^m$ and $E(\theta)-F(\theta)=[d_{ij}(\theta)]_{i,j=1}^m$. Then
$$d_{ij}(\theta)=\left\{\begin{array}{ll}
r-{\rm Re\, }(e^{i\theta}c_{jj})-b_{jj} \ \ \ \ & \mbox{if} \ \ i=j,\\
-(e^{i\theta}/2)c_{ij}-b_{ij} & \mbox{if} \ \ i<j,\\
-(e^{-i\theta}/2)\overline{c}_{ji}-b_{ij} & \mbox{if} \ \ i>j.\end{array}\right.$$
Hence $p(\theta)\equiv\det(E(\theta)-F(\theta))$ is a trigonometric polynomial of degree at most $m$, say, $p(\theta)=\sum_{j=-m}^m a_je^{ij\theta}$. Since $\det(rI_m-{\rm Re\, }(e^{i\theta}A))=0$ and $\det D_k(\theta)\neq 0$, we obtain from ({\rm Re\, }f{e6}) that $p(\theta)=0$ for all real $\theta$. This implies that $a_j=0$ for all $j$. In particular, $a_m=(-1)^m\prod_{j=1}^m(c_{jj}/2)=0$ from the above description of the $d_{ij}(\theta)$'s. This yields that $c_{jj}=0$ for some $j$ or $C$ is not invertible. \hspace{2mm} $\blacksquare$
The next lemma is to be used in the proof of Lemma 2.10.
{\bf Lemma 2.9.} \emph{Let $A={\scriptsize\left[\begin{array}{cc} 0_p & B\\ 0 & C\end{array}\right]}$ be an $n$-by-$n$ matrix}, \emph{and let $B=[b_{ij}]_{i=1, j=1}^{p, n-p}$ and $C=[c_{ij}]_{i,j=1}^{n-p}$ with $c_{ij}=0$ for all $i>j$}. \emph{If the geometric and algebraic multiplicities of the eigenvalue $0$ of $A$ are equal to each other and $c_{11}=0$}, \emph{then $b_{i1}=0$ for all $i$}, $1\le i\le p$.
{\em Proof}. Let $e_j$ denote the $j$th standard unit vector $[0 \ \ldots \ 0 \ \tb{1}{j \, \mbox{th}} \ 0 \ \ldots \ 0]^T$, $1\le j\le n$. Then $e_1, \ldots, e_p$ are all in $\ker A$. Since $c_{11}=0$, we have $Ae_{p+1}=b_{11}e_1+\cdots+b_{p 1}e_p$, which is also in $\ker A$. Thus $A^2e_{p+1}=0$ or $e_{p+1}\in\ker A^2$. Our assumption on the multiplicities of 0 implies that $\ker A=\ker A^2=\cdots$. Hence we obtain $e_{p+1}\in \ker A$ or $Ae_{p+1}=0$, which yields that $b_{i 1}=0$ for all $i$, $1\le i\le p$. \hspace{2mm} $\blacksquare$
The following lemma is the main tool in proving, under the condition of circular $W(A)$, that $p(A)\ge a(A)-1$ yields $p(A)\ge a(A)$.
{\bf Lemma 2.10.} \emph{Let}
$$A=\left[\begin{array}{ccccc} 0 & A_1 & & & \\ & 0 & \ddots & & \\ & & \ddots & A_{k-2} & \\ & & & 0 & B\\ & & & & C\end{array}\right] \ \ \ \ on \ \ \mathbb{C}^n=\mathbb{C}^{n_1}\oplus\cdots\oplus\mathbb{C}^{n_{k-1}}\oplus\mathbb{C}^{m},$$
\emph{where} $k=a(A) \, (\ge 2)$, \emph{the} $A_j$'\emph{s satisfy} $A_j^*A_j=I_{n_{j+1}}$, $1\le j\le k-2$, \emph{and} $B={\scriptsize\left[\begin{array}{cc} I_p & 0\\ 0 & B_1\end{array}\right]}$ \emph{and} $C={\scriptsize\left[\begin{array}{cc} 0_p & C_1\\ 0 & C_2\end{array}\right]} \, (1\le p\le\min\{n_{k-1}, m\})$ \emph{satisfy} $B^*B+C^*C=I_m$. \emph{If $W(A)$ is a circular disc centered at the origin with radius $r$ larger than $\cos(\pi/(k+1))$}, \emph{then $A$ is unitarily similar to a matrix of the form}
$$\left[\begin{array}{ccccc} 0 & A_1' & & & \\ & 0 & \ddots & & \\ & & \ddots & A_{k-1}' & \\ & & & 0 & B'\\ & & & & C'\end{array}\right] \ \ \ \ on \ \ \mathbb{C}^n=\mathbb{C}^{n_1}\oplus\cdots\oplus\mathbb{C}^{n_{k-1}}\oplus\mathbb{C}^{q}\oplus\mathbb{C}^{m-q},$$
\emph{where} $q=\min\{n_{k-1}, m\}$, \emph{the} $A'_j$'\emph{s satisfy} ${A'_j}^*A'_j=I_{n_{j+1}}$, $1\le j\le k-2$, ${A'_{k-1}}^*A'_{k-1}=I_{q}$, \emph{and} $B'$ \emph{and} $C'$ \emph{satisfy} ${B'}^*B'+{C'}^*C'=I_{m-q}$.
| 3,789 | 31,795 |
en
|
train
|
0.168.5
|
The next lemma is to be used in the proof of Lemma 2.10.
{\bf Lemma 2.9.} \emph{Let $A={\scriptsize\left[\begin{array}{cc} 0_p & B\\ 0 & C\end{array}\right]}$ be an $n$-by-$n$ matrix}, \emph{and let $B=[b_{ij}]_{i=1, j=1}^{p, n-p}$ and $C=[c_{ij}]_{i,j=1}^{n-p}$ with $c_{ij}=0$ for all $i>j$}. \emph{If the geometric and algebraic multiplicities of the eigenvalue $0$ of $A$ are equal to each other and $c_{11}=0$}, \emph{then $b_{i1}=0$ for all $i$}, $1\le i\le p$.
{\em Proof}. Let $e_j$ denote the $j$th standard unit vector $[0 \ \ldots \ 0 \ \tb{1}{j \, \mbox{th}} \ 0 \ \ldots \ 0]^T$, $1\le j\le n$. Then $e_1, \ldots, e_p$ are all in $\ker A$. Since $c_{11}=0$, we have $Ae_{p+1}=b_{11}e_1+\cdots+b_{p 1}e_p$, which is also in $\ker A$. Thus $A^2e_{p+1}=0$ or $e_{p+1}\in\ker A^2$. Our assumption on the multiplicities of 0 implies that $\ker A=\ker A^2=\cdots$. Hence we obtain $e_{p+1}\in \ker A$ or $Ae_{p+1}=0$, which yields that $b_{i 1}=0$ for all $i$, $1\le i\le p$. \hspace{2mm} $\blacksquare$
The following lemma is the main tool in proving, under the condition of circular $W(A)$, that $p(A)\ge a(A)-1$ yields $p(A)\ge a(A)$.
{\bf Lemma 2.10.} \emph{Let}
$$A=\left[\begin{array}{ccccc} 0 & A_1 & & & \\ & 0 & \ddots & & \\ & & \ddots & A_{k-2} & \\ & & & 0 & B\\ & & & & C\end{array}\right] \ \ \ \ on \ \ \mathbb{C}^n=\mathbb{C}^{n_1}\oplus\cdots\oplus\mathbb{C}^{n_{k-1}}\oplus\mathbb{C}^{m},$$
\emph{where} $k=a(A) \, (\ge 2)$, \emph{the} $A_j$'\emph{s satisfy} $A_j^*A_j=I_{n_{j+1}}$, $1\le j\le k-2$, \emph{and} $B={\scriptsize\left[\begin{array}{cc} I_p & 0\\ 0 & B_1\end{array}\right]}$ \emph{and} $C={\scriptsize\left[\begin{array}{cc} 0_p & C_1\\ 0 & C_2\end{array}\right]} \, (1\le p\le\min\{n_{k-1}, m\})$ \emph{satisfy} $B^*B+C^*C=I_m$. \emph{If $W(A)$ is a circular disc centered at the origin with radius $r$ larger than $\cos(\pi/(k+1))$}, \emph{then $A$ is unitarily similar to a matrix of the form}
$$\left[\begin{array}{ccccc} 0 & A_1' & & & \\ & 0 & \ddots & & \\ & & \ddots & A_{k-1}' & \\ & & & 0 & B'\\ & & & & C'\end{array}\right] \ \ \ \ on \ \ \mathbb{C}^n=\mathbb{C}^{n_1}\oplus\cdots\oplus\mathbb{C}^{n_{k-1}}\oplus\mathbb{C}^{q}\oplus\mathbb{C}^{m-q},$$
\emph{where} $q=\min\{n_{k-1}, m\}$, \emph{the} $A'_j$'\emph{s satisfy} ${A'_j}^*A'_j=I_{n_{j+1}}$, $1\le j\le k-2$, ${A'_{k-1}}^*A'_{k-1}=I_{q}$, \emph{and} $B'$ \emph{and} $C'$ \emph{satisfy} ${B'}^*B'+{C'}^*C'=I_{m-q}$.
{\em Proof}. Since $W(A)=\{z\in\mathbb{C} : |z|\le r\}$, we have $\det(rI_n-{\rm Re\, }(e^{i\theta}A))=0$ for all real $\theta$. As in the proof of Lemma 2.8, we have the factorization $\det(rI_n-{\rm Re\, }(e^{i\theta}A))=\det D_{k-1}(\theta)\cdot\det(E(\theta)-F(\theta))$, where $D_{k-1}(\theta)$, $E(\theta)$ and $F(\theta)$ are as in ({\rm Re\, }f{e7}) and ({\rm Re\, }f{e8}) with $D_{k}(\theta)^{-1}$ in the expression of $F(\theta)$ there replaced by $D_{k-1}(\theta)^{-1}$. Since $D_{k-1}(\theta)$ is unitarily similar to $rI-{\rm Re\, } J$, where $J=(\sum_{j=1}^{n_{k-1}}\oplus J_{k-1})\oplus(\sum_{j=1}^{n_{k-2}-n_{k-1}}\oplus J_{k-2})\oplus\cdots\oplus(\sum_{j=1}^{n_1-n_{2}}\oplus J_{1})$ and the $(k-1, k-1)$-entry of $(rI_{k-1}-{\rm Re\, } J_{k-1})^{-1}$ is $a\equiv\det(rI_{k-2}-{\rm Re\, } J_{k-2})/\det(rI_{k-1}-{\rm Re\, } J_{k-1})$, the $(k-1, k-1)$-block of $D_{k-1}(\theta)^{-1}$ is given by $aI_{n_{k-1}}$. Hence we have $F(\theta)=(a/4)B^*B$. As before, from $\det D_{k-1}(\theta)\neq 0$, we obtain $\det(E(\theta)-F(\theta))=0$. Thus
\begin{align}\label{e9}
& \, 0 =\det(E(\theta)-F(\theta)) \nonumber\\
= & \, \det\left(rI_m-\left[\begin{array}{cc} 0_{p} & (e^{i\theta}/2)C_1\\ (e^{-i\theta}/2)C_1^* & {\rm Re\, }(e^{i\theta}C_2)\end{array}\right]-\frac{a}{4}\left[\begin{array}{cc} I_{p} & 0\\ 0 & B_1^*B_1\end{array}\right]\right) \nonumber\\
= & \, \det\left[\begin{array}{cc} (r-(a/4))I_p & -(e^{i\theta}/2)C_1\\ -(e^{-i\theta}/2)C_1^* & rI_{m-p}-{\rm Re\, }(e^{i\theta}C_2)-(a/4)B_1^*B_1\end{array}\right].
\end{align}
We claim that $r\neq a/4$. Indeed, since $\det(rI_k-{\rm Re\, } J_k)=r\det(rI_{k-1}-{\rm Re\, } J_{k-1})-(1/4)\det(rI_{k-2}-{\rm Re\, } J_{k-2})$, we have $\det(rI_k-{\rm Re\, } J_k)/\det(rI_{k-1}-{\rm Re\, } J_{k-1})=r-(a/4)$. Therefore, $r=a/4$ if and only if $\det(rI_k-{\rm Re\, } J_k)=0$. The latter would imply $r\le\cos(\pi/(k+1))$ contradicting our assumption that $r>\cos(\pi/(k+1))$. Hence $r\neq a/4$ as asserted. Using the Schur complement, we infer from ({\rm Re\, }f{e9}) that
$$p(\theta)\equiv\det(rI_{m-p}-{\rm Re\, }(e^{i\theta}C_2)-\frac{a}{4}B_1^*B_1-\frac{1}{r-(a/4)}\cdot\frac{1}{4}C_1^*C_1)=0$$
for all real $\theta$. As $p(\theta)$ is a trigonometric polynomial of degree at most $m-p$, say, $p(\theta)=\sum_{j=-(m-p)}^{m-p}a_je^{ij\theta}$, this implies that $a_j=0$ for all $j$. After a unitary similarity, we may assume that $C_2=[c_{ij}]_{i,j=1}^{m-p}$ with $c_{ij}=0$ for all $i>j$. Hence $a_{m-p}=(1/2^{m-p})c_{11}\cdots c_{m-p, m-p}=0$. Thus $c_{jj}=0$ for some $j$. We may assume that $c_{11}=0$. Note that
$$A^k=\left[\begin{array}{cccc}
0 & \cdots & 0 & (\prod_{j=1}^{k-2}A_j)BC\\
0 & \cdots & 0 & (\prod_{j=2}^{k-2}A_j)BC^2\\
\vdots & & \vdots & \vdots\\
0 & \cdots & 0 & A_{k-2}BC^{k-2}\\
0 & \cdots & 0 & BC^{k-1}\\
0 & \cdots & 0 & C^{k}
\end{array}\right],$$
$$BC^j=\left[\begin{array}{cc} I_p & 0\\ 0 & B_1\end{array}\right]\left[\begin{array}{cc} 0_p & C_1C_2^{j-1}\\ 0 & C_2^j\end{array}\right]=\left[\begin{array}{cc} 0_p & C_1C_2^{j-1}\\ 0 & B_1C_2^j\end{array}\right], \ \ 1\le j\le k-1,$$
and
$$C^k=\left[\begin{array}{cc} 0_p & C_1C_2^{k-1}\\ 0 & C_2^k\end{array}\right].$$
Since the first column of $C_2$ is zero, the same is true for the $(p+1)$st columns of $(\prod_{j=t}^{k-2}A_j)BC^t$ ($2\le t\le k-2$), $BC^{k-1}$ and $C^k$. As for $(\prod_{j=1}^{k-2}A_j)BC$, we need Lemma 2.9. Because $k=a(A)$, the geometric and algebraic multiplicities of the eigenvalue 0 of $A^k$ coincide. Hence we may apply Lemma 2.9 to $A^k$ to infer that the $((\sum_{j=1}^{k-1}n_j)+p+1)$st column of $A^k$ is zero. In particular, since $\ker(\prod_{j=1}^{k-2}A_j)=\{0\}$, the $(p+1)$st column of $BC={\scriptsize\left[\begin{array}{cc} 0_p & C_1\\ 0 & B_1C_2\end{array}\right]}$ is zero and thus the first column of $C_1$ is zero. Together with the zero first column of $C_2$, this yields $C={\scriptsize\left[\begin{array}{cc} 0_{p+1} & C_1^{(1)}\\ 0 & C_2^{(1)}\end{array}\right]}$. As
\begin{align*}
& I_m=B^*B+C^*C=\left[\begin{array}{cc} I_p & 0\\ 0 & B_1^*\end{array}\right]\left[\begin{array}{cc} I_p & 0\\ 0 & B_1\end{array}\right]+\left[\begin{array}{cc} 0_{p+1} & 0\\ C^{(1)*}_1 & C^{(1)*}_2\end{array}\right]\left[\begin{array}{cc} 0_{p+1} & C^{(1)}_1\\ 0 & C^{(1)}_2\end{array}\right]\\
=& \left[\begin{array}{cc} I_p & 0\\ 0 & B_1^*B_1\end{array}\right]+\left[\begin{array}{cc} 0_{p+1} & 0\\ 0 & C^{(1)*}_1C^{(1)}_1+ C^{(1)*}_2C^{(1)}_2\end{array}\right],
\end{align*}
we infer that the first column of $B_1$ is a unit vector. After another unitary similarity, we may further assume that
$$B_1=\left[\begin{array}{cc} 1 & 0\\ 0 & B_1^{(1)}\end{array}\right] \ \ \ \mbox{or} \ \ \ B=\left[\begin{array}{cc} I_{p+1} & 0\\ 0 & B^{(1)}_1\end{array}\right].$$
Applying the above arguments again, we have
$$C=\left[\begin{array}{cc} 0_{p+2} & C_1^{(2)}\\ 0 & C_2^{(2)}\end{array}\right], \ B_1^{(1)}=\left[\begin{array}{cc} 1 & 0\\ 0 & B_1^{(2)}\end{array}\right] \ \ \mbox{and} \ \ B=\left[\begin{array}{cc} I_{p+2} & 0\\ 0 & B_1^{(2)}\end{array}\right].$$
Continuing this process, we obtain
$$\mbox{(i)} \ \ C=\left[\begin{array}{cc} 0_{n_{k-1}} & C'_1\\ 0 & C'_2\end{array}\right] \ \ \mbox{and} \ \ B=\left[ I_{n_{k-1}} \ \ 0\right] \ \ \mbox{if} \ \ n_{k-1}<m,$$
and
$$\hspace*{-25mm}\mbox{(ii)} \ \ C=0_m \ \ \mbox{and} \ \ B=\left[\begin{array}{c} I_m \\ 0\end{array}\right] \ \ \mbox{if} \ \ n_{k-1}\ge m.$$
Finally, let $A_j'=A_j$ for $1\le j\le k-2$. In case (i), let $A_{k-1}'=I_{n_{k-1}}$, $B'=C'_1$ and $C'=C'_2$. Since
$$I_m=B^*B+C^*C=\left[\begin{array}{cc} I_{n_{k-1}} & 0\\ 0 & {C'_1}^*C'_1+{C'_2}^*C'_2\end{array}\right],$$
we have $B'^*B'+C'^*C'={C'_1}^*C'_1+{C'_2}^*C'_2=I_{m-n_{k-1}}$. On the other hand, for case (ii), let $A'_{k-1}={\scriptsize\left[\begin{array}{c} I_m \\ 0\end{array}\right]}$. In this case, $B'$ and $C'$ are absent. \hspace{2mm} $\blacksquare$
A consequence of the previous results is the following.
{\bf Proposition 2.11.} \emph{If $A$ is an $n$-by-$n$ matrix with $W(A)$ a circular disc centered at the origin and $p(A)\ge a(A)-1$}, \emph{then $p(A)=a(A)$ or $\infty$}.
| 4,016 | 31,795 |
en
|
train
|
0.168.6
|
A consequence of the previous results is the following.
{\bf Proposition 2.11.} \emph{If $A$ is an $n$-by-$n$ matrix with $W(A)$ a circular disc centered at the origin and $p(A)\ge a(A)-1$}, \emph{then $p(A)=a(A)$ or $\infty$}.
{\em Proof}. Let $k=a(A)$. The assumption $p(A)\ge a(A)-1$ says that $A, A^2, \ldots, A^{k-1}$ are all partial isometries. In particular, we have $A^{k-1}=0$ or $\|A^{k-1}\|=1$. In the former case, $p(A)$ equals $\infty$. Hence we may assume that $\|A^{k-1}\|=1$ and thus also $\|A\|=1$. By \cite[Theorem 2.10]{1}, we have $w(A)\ge\cos(\pi/(k+1))$. Two cases are considered separately:
(i) $w(A)=\cos(\pi/(k+1))$. In this case, \cite[Theorem 2.10]{1} yields that $A$ is unitarily similar to a matrix of the form $J_k\oplus A_1$ with $\|A_1\|\le 1$ and $w(A_1)\le\cos(\pi/(k+1))$. Since $A_1^{k-1}$ is also a partial isometry, we may assume as before that $\|A_1^{k-1}\|=1$ and thus also $\|A_1\|=1$. Now applying \cite[Theorem 2.10]{1} again to $A_1$ yields that $w(A_1)=\cos(\pi/(k+1))$ and $A_1$ is unitarily similar to $J_k\oplus A_2$ with $\|A_2\|\le 1$ and $w(A_2)\le\cos(\pi/(k+1))$. Continuing this process, we obtain that either $p(A)=\infty$ or $A$ is unitarily similar to a direct sum of copies of $J_k$. In the latter case, we again have $p(A)=\infty$.
(ii) $w(A)>\cos(\pi/(k+1))$. Since $A, A^2, \ldots, A^{k-1}$ are partial isometries, Theorem 2.2 yields the unitary similarity of $A$ to a matrix of the form
$$\left[\begin{array}{ccccc} 0 & A_1 & & & \\ & 0 & \ddots & & \\ & & \ddots & A_{k-2} & \\ & & & 0 & B\\ & & & & C\end{array}\right] \ \ \ \mbox{on} \ \ \mathbb{C}^n=\mathbb{C}^{n_1}\oplus\cdots\oplus\mathbb{C}^{n_{k-1}}\oplus\mathbb{C}^{m}
$$
with $A_j^*A_j=I_{n_{j+1}}$, $1\le j\le k-2$, and $B^*B+C^*C=I_m$. By Lemma 2.8, $C$ is not invertible. We may assume, after a unitary similarity, that $B$ and $C$ are of the forms ${\scriptsize\left[\begin{array}{cc} 1 & 0\\ 0 & B_1 \end{array}\right]}$ and ${\scriptsize\left[\begin{array}{cc} 0 & C_1\\ 0 & C_2\end{array}\right]}$, where $B_1$, $C_1$ and $C_2$ are $(n_{k-1}-1)$-by-$(m-1)$, 1-by-$(m-1)$ and $(m-1)$-by-$(m-1)$ matrices, respectively. Using Lemma 2.10, we obtain the unitary similarity of $A$ to a matrix of the form in Theorem 2.2 (b). Thus, by Theorem 2.2 again, $A, A^2, \ldots, A^k$ are partial isometries. Hence $p(A)\ge k=a(A)$. Our assertion then follows from Corollary 2.5. \hspace{2mm} $\blacksquare$
Note that, in the preceding proposition, the number ``$a(A)-1$'' is sharp as was seen from Example 2.7.
We are now ready to prove Theorem 2.6.
{\em Proof of Theorem $2.6$}. The implications (b) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (c) and (c) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (d) are trivial. On the other hand, (d) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (a) follows from Theorem 2.4. Hence we need only prove (a) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (b). Let $k=a(A)$. By Proposition 2.11, $A, A^2, \ldots, A^k$ are partial isometries. Thus $A$ is unitarily similar to the matrix $A'$ in Theorem 2.2 (b). Since $k$ is the ascent of $A$, the geometric multiplicity of $A^k$, that is, ${\rm nullity\, } A^k$ is equal to the algebraic multiplicity of eigenvalue 0 of $A$. As proven in (a) $\hbox{\bf\rlap{I}{\hbox to 2 pt{}}R}ightarrow$ (b) of Theorem 2.2, ${\rm nullity\, } A^k=\sum_{j=1}^kn_j$. We infer from the structure of $A'$ that 0 cannot be an eigenvalue of $C$. On the other hand, applying Lemma 2.8 to $A'$ yields the noninvertibility of $C$. This leads to a contradiction. Thus $B$ and $C$ won't appear in $A'$ and, therefore, $A'$, together with $A$, is unitarily similar to a direct sum of Jordan blocks by Theorem 2.2 (c). This proves (b). \hspace{2mm} $\blacksquare$
\noindent
{\bf\large 3. $S_n$-matrices}
In this section, we apply the results in Section 2 to the class of $S_n$-matrices. This we start with the following.
{\bf Proposition 3.1.} \emph{Let $A$ be a noninvertible $S_n$-matrix}. \emph{Then}
(a) \emph{$a(A)$ equals the algebraic multiplicity of the eigenvalue $0$ of $A$},
(b) $p(A)=a(A)$ \emph{or} $\infty$,
(c) \emph{$p(A)=\infty$ if and only if $A$ is unitarily similar to $J_n$}, \emph{and}
(d) ${\rm ran\, }k A^j=n-j$ \emph{for} $1\le j\le a(A)$.
{\em Proof}. Let $k= a(A)$.
(a) It is known that, for any eigenvalue $\lambda$ of $A$, there is exactly one associated block, say, $\lambda I_{\ell}+J_{\ell}$ in the Jordan form of $A$. In particular, for $\lambda=0$, both $a(A)$ and the algebraic multiplicity of 0 are equal to the size $\ell$ of its associated Jordan block $J_{\ell}$.
(b) By \cite[Corollary 1.3]{2}, $A$ is unitarily similar to a matrix of the form $A'\equiv{\scriptsize\left[\begin{array}{cc} J_k & B\\ 0 & C\end{array}\right]}$, where $B={\scriptsize\left[\begin{array}{c} 0\\ b\end{array}\right]}$ is a $k$-by-$(n-k)$ matrix with $b$ a row vector of $n-k$ components, and $C$ is an invertible $(n-k)$-by-$(n-k)$ upper-triangular matrix. Since ${\rm ran\, }k(I_n-A^*A)=1$, we infer from
$$I_n-A'^*A'=\left[\begin{array}{cc} I_k & 0\\ 0 & I_{n-k}\end{array}\right]-\left[\begin{array}{cc} J_k^* & 0\\ B^* & C^*\end{array}\right]\left[\begin{array}{cc} J_k & B\\ 0 & C\end{array}\right]=\left[\begin{array}{cc} {\scriptsize\left[\begin{array}{cccc} 1 & & &\\ & 0 & & \\ & & \ddots & \\ & & & 0\end{array}\right]} & 0\\ 0 & I_{n-k}-(B^*B+C^*C)\end{array}\right]$$
that $B^*B+C^*C=I_{n-k}$. As $A'$ can also be expressed as
$$\left[\begin{array}{ccccc}
0 & 1 & & & 0\\
& 0 & \ddots & & \vdots\\
& & \ddots & 1 & 0\\
& & & 0 & b \\
& & & & C
\end{array}\right] \ \ \ \mbox{on} \ \ \mathbb{C}^n=\underbrace{\mathbb{C}\oplus\cdots\oplus \mathbb{C}}_k\oplus \mathbb{C}^{n-k}$$
with $b^*b+C^*C=I_{n-k}$, Theorem 2.2 can be invoked to conclude that $A, A^2, \ldots, A^k$ are partial isometries. Thus $p(A)\ge k$. It follows from Corollary 2.5 that $p(A)=k$ or $\infty$.
(c) If $p(A)=\infty$, then the unitary similarity of $A$ and $J_n$ is an easy consequence of Theorem 2.4 and the fact that $A$ is irreducible (in the sense that it is not unitarily similar to the direct sum of two other matrices). The converse is trivial.
(d) As in the proof of (b), $A$ is unitarily similar to $A'=\left[\begin{array}{cc} J_k & B\\ 0 & C\end{array}\right]$, where $B={\scriptsize\left[\begin{array}{c} 0\\ b\end{array}\right]}$ and $C$ is invertible. Then $A^j$ is unitarily similar to
$$A'^j=\begin{array}{ll} \ \ \ \overbrace{\ \hspace{15mm} \ }^{\displaystylelaystyle j} \ \ \overbrace{\ \hspace{23mm} \ }^{\displaystylelaystyle k-j} & \\ \left[\begin{array}{c|c}
\begin{array}{ccccccc}
0 & \cdots & 0 & 1 & 0 & \cdots & 0 \\
& \cdot & & 0 & \ddots & \ddots & \vdots \\
& & \cdot & & \ddots & \ddots & 0 \\
& & & \cdot & & \ddots & 1 \\
& & & & \cdot & & 0 \\
& & & & & \cdot & \vdots \\
& & & & & & 0
\end{array} & \begin{array}{c} \\ 0
\\ \\ \\ B_j\end{array}\\ \hline 0 & C^j\end{array}
\right] & \hspace{-11mm}\begin{array}{l}
\left.\begin{array}{l} {\ } \\ {\ } \\ {\ } \\ {\ }\end{array}\right\}k-j\\
\left.\begin{array}{l}{\ } \\ {\ } \\ \vspace*{-2mm}{\ }\end{array}\right\}j\end{array}\end{array}.$$
for some $j$-by-$(n-k)$ matrix $B_j$. Since the first $k-j$ rows and the last $n-k$ rows of $A'^j$ are linearly independent, we infer that ${\rm ran\, }k A^j={\rm ran\, }k A'^j=(k-j)+(n-k)=n-j$ for $1\le j\le k$. \hspace{2mm} $\blacksquare$
The next corollary complements Corollary 2.5: it shows that any allowable value for $p(A)$ can actually be attained by some matrix $A$.
{\bf Corollary 3.2.} \emph{For any integers $n$ and $j$ satisfying $1\le j\le n-1$}, \emph{there is an $n$-by-$n$ matrix $A$ with $p(A)=j$}.
{\em Proof}. Let $A$ be a noninvertible $S_n$-matrix with the algebraic multiplicity of its eigenvalue 0 equal to $j$ (cf. \cite[Corollary 1.3]{2}). Then $p(A)=a(A)=j$ by Proposition 3.1. \hspace{2mm} $\blacksquare$
For an $n$-by-$n$ matrix $A=[a_{ij}]_{i,j=1}^n$ and an $m$-by-$m$ matrix $B$, their {\em tensor product} (or {\em Kronecker product}) $A\otimes B$ is the $(nm)$-by-$(nm)$ matrix
$$\left[
\begin{array}{ccc}
a_{11}B & \cdots & a_{1n}B \\
\vdots & & \vdots \\
a_{n1}B & \cdots & a_{nn}B
\end{array}
\right].$$
Basic properties of tensor products can be found in \cite[Chapter 4]{9}. Our main concern here is when $W(A)$ and $W(A\otimes A)$ are circular discs (centered at the origin). Problems of this nature have also been considered in \cite{1}. The main result of this section is the following theorem.
{\bf Theorem 3.3.} \emph{Let $A$ be an $S_n$-matrix}. \emph{Then the following conditions are equivalent}:
(a) \emph{$W(A)$ is a circular disc centered at the origin},
(b) \emph{$W(A\otimes A)$ is a circular disc centered at the origin}, \emph{and}
(c) \emph{$A$ is unitarily similar to $J_n$}.
In preparation for its proof, we need the next lemma.
{\bf Lemma 3.4.} \emph{Let $A$ and $B$ be an $n$-by-$n$ and $m$-by-$m$ nonzero matrices}, \emph{respectively}.
(a) $$a(A\otimes B)=\left\{\begin{array}{ll}
\min\{a(A), a(B)\} \ \ \ & \mbox{\em if} \ \ a(A), a(B)\ge 1,\\
a(A) & \mbox{\em if} \ \ a(B)=0,\\
a(B) & \mbox{\em if} \ \ a(A)=0.\end{array}\right.$$
(b) \emph{If $A$ and $B$ are partial isometries}, \emph{then so is $A\otimes B$}. \emph{The converse is false}.
(c) \emph{Assume that $A$ and $B$ are} (\emph{nonzero}) \emph{contractions}. \emph{Then $A$ and $B$ are partial isometries if and only if $A\otimes B$ is a partial isometry}.
(d) \emph{If $A$ and $B$ are} (\emph{nonzero}) \emph{contractions}, \emph{then} $p(A\otimes B)=\min\{p(A), p(B)\}$.
(e) \emph{$A$ is a partial isometry if and only if $A\otimes A$ is}. \emph{Thus}, \emph{in particular}, $p(A\otimes A)=p(A)$.
| 3,961 | 31,795 |
en
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.