set
stringclasses 1
value | id
stringlengths 5
9
| chunk_text
stringlengths 1
115k
| chunk_num_tokens
int64 1
106k
| document_num_tokens
int64 58
521k
| document_language
stringclasses 2
values |
---|---|---|---|---|---|
train
|
0.95.5
|
\section{Integral estimates for stable solutions}\label{secint}
In this section, we establish some technical integral estimates for stable solutions of systems. Most of the ideas and methods applied in this section are inspired by the ones developed in the literature, see for example \cite{dfs, far1, far2, fg, fw}. We start with the Gelfand system.
\begin{lemma}\label{lemuvg} Suppose that $(u,v)$ is a solution of $ (G)_{\lambda,\gamma}$ when
the associated stability inequality (\ref{stabilityG}) holds. Then, there exists a positive constant $C_{\lambda,\gamma,|\Omega|}=C({\lambda,\gamma,|\Omega|})$ such that
\begin{equation*}
\int_{\Omega} e^{u+v} dx \le C_{\lambda,\gamma,|\Omega|} .
\end{equation*}
\end{lemma}
\begin{proof} Multiply the second equation of $ (G)_{\lambda,\gamma}$ with $e^{u}-1$ and integrate to get
\begin{equation*}
\lambda \int_{\Omega} (e^{u}-1) e^v dx= \int_{\Omega} \mathcal L u (e^{u}-1) dx.
\end{equation*}
From Lemma \ref{fgprop}, we get
\begin{equation*}
\int_{\Omega} \mathcal L u (e^{u}-1) dx = \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n} \left[u(x)-u(z) \right] \left[e^{u(x)}-e^{u(z)} \right] J(x-z) dx dz .
\end{equation*}
Note that for $\alpha,\beta\in\mathbb R$, one can see that
\begin{equation*}
\left| e^{\frac{\beta}{2}} - e^{\frac{\alpha}{2}} \right|^2 \le \frac{1}{4} (e^\beta-e^\alpha)(\beta-\alpha).
\end{equation*}
Applying the above inequality for $\alpha=u(z)$ and $\beta=u(x)$, we obtain
\begin{equation*}
\left| e^{\frac{u(x)}{2}} - e^{\frac{u(z)}{2}} \right|^2 \le \frac{1}{4} (e^{u(x)}-e^{u(z)})(u(x)-u(z)).
\end{equation*}
From the above, we conclude
\begin{equation*}
\lambda \int_{\Omega} e^{u+v} dx \ge \lambda \int_{\Omega} (e^{u}-1) e^v dx \ge 2 \int_{\mathbb R^n} \int_{\mathbb R^n}
\left| e^{\frac{u(x)}{2}} - e^{\frac{u(z)}{2}} \right|^2J(x-z) dx dz .
\end{equation*}
Test the stability inequality, Corollary \ref{stablein}, on $\zeta=e^{\frac{u}{2}}-1$ to get
\begin{equation*}\label{}
\sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{u+v}{2}} (e^{\frac{u}{2}}-1)^2 dx \le \frac{1}{2}\int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} \left|e^{\frac{u(x)}{2}}- e^{\frac{u(z)}{2}} \right|^2 J(x-z) dz dx .
\end{equation*}
Combining above inequalities, we conclude
\begin{equation}\label{rruv}
\sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{u+v}{2}} (e^{\frac{u}{2}}-1)^2 dx \le \frac{1}{4} \lambda \int_{\Omega} e^{u+v} dx .
\end{equation}
Applying the Young's inequality $e^{u/2}\le \frac{e^u}{4}+1$, we conclude
\begin{equation*}\label{}
\int_{\Omega} e^{\frac{u+v}{2}} e^{\frac{u}{2}} dx \le \frac{1}{4} \int_{\Omega} e^{\frac{u+v}{2}} e^u dx + \int_{\Omega} e^{\frac{u+v}{2}} dx .
\end{equation*}
From this and expanding the left-hand side of (\ref{rruv}), we obtain
\begin{eqnarray*}
\lambda \int_{\Omega} e^{u+v} dx + 8 \sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{u+v}{2}} dx &\ge& 2 \sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{u+v}{2}} e^{u} dx ,
\\
\gamma \int_{\Omega} e^{u+v} dx + 8 \sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{u+v}{2}} dx & \ge& 2 \sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{u+v}{2}} e^{v} dx .
\end{eqnarray*}
Multiplying these inequalities and applying the Cauchy-Schwarz inequality, i.e.
\begin{equation*}\label{}
\int_{\Omega} e^{\frac{u+v}{2}} e^{{u}} dx \int_{\Omega} e^{\frac{u+v}{2}} e^{{v}} dx \ge
\left(\int_{\Omega} e^{u+v} dx\right)^2,
\end{equation*}
we complete the proof.
\end{proof}
We now provide a counterpart of the above estimate for stable solutions of $ (E)_{\lambda,\gamma}$.
\begin{lemma}\label{lemuve} Suppose that $(u,v)$ is a solution of $ (E)_{\lambda,\gamma}$ when
the associated stability inequality (\ref{stabilityE}) holds. Then, there exists a positive constant $C_{\lambda,\gamma,|\Omega|}=C({\lambda,\gamma,|\Omega|})$ such that
\begin{equation}\label{inuvp}
\int_{\Omega} (1+u)^p(1+v)^p dx \le C_{\lambda,\gamma,|\Omega|} .
\end{equation}
\end{lemma}
\begin{proof}
Multiply the second equation of $ (E)_{\lambda,\gamma}$ with $(1+u)^p-1$ and integrate to get
\begin{equation*}
\lambda \int_{\Omega} [(1+u)^p-1] (1+v)^p dx= \int_{\Omega} \mathcal L u [(1+u)^p-1] dx.
\end{equation*}
From Lemma \ref{fgprop}, we get
\begin{equation*}
\int_{\Omega} \mathcal L u [(1+u)^p-1] dx = \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n} \left[u(x)-u(z) \right] \left[(1+u(x))^p-(1+u(z))^p \right] J(x-z) dx dz .
\end{equation*}
Note that for $\alpha,\beta\in\mathbb R$, one can see that
\begin{equation*}
[(1+\alpha)^p-(1+\beta)^p](\alpha-\beta) \ge \frac{4p}{(p+1)^2} \left| (1+\alpha)^{\frac{p+1}{2}} - (1+\beta)^{\frac{p+1}{2}} \right|^2 .
\end{equation*}
Applying the above inequality for $\alpha=u(x)$ and $\beta=u(z)$, we obtain
\begin{equation*}
[(1+u(x))^p-(1+u(z))^p][u(x)-u(z)] \ge \frac{4p}{(p+1)^2} \left| (1+u(x))^{\frac{p+1}{2}} - (1+u(z))^{\frac{p+1}{2}} \right|^2 .
\end{equation*}
From the above, we conclude
\begin{equation*}
\lambda \int_{\Omega} (1+u)^p (1+v)^p dx \ge \frac{4p}{(p+1)^2} \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n}
\left| (1+u(x))^{\frac{p+1}{2}} - (1+u(z))^{\frac{p+1}{2}} \right|^2 J(x-z) dx dz .
\end{equation*}
Test the stability inequality, Corollary \ref{stablein}, on $\zeta=(1+u)^{\frac{p+1}{2}}-1$ to get
\begin{eqnarray*}\label{}
&&\sqrt{\lambda\gamma} p \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} [(1+u)^{\frac{p+1}{2}}-1]^2 dx
\\&\le& \frac{1}{2}\int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} \left| (1+u(x))^{\frac{p+1}{2}}- (1+u(z))^{\frac{p+1}{2}} \right|^2 J(x-z) dz dx .
\end{eqnarray*}
Combining above inequalities, we conclude
\begin{equation}\label{rruv}
\sqrt{\lambda\gamma} p \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} [(1+u)^{\frac{p+1}{2}}-1]^2 dx
\le \frac{(p+1)^2}{4p} \lambda \int_{\Omega} (1+u)^p (1+v)^p dx .
\end{equation}
Expanding the left-hand side of the inequality and rearranging we get
\begin{eqnarray}\label{l1}
&&\sqrt{\lambda\gamma} p(1-\epsilon) \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} (1+u)^{p+1} dx
\\&&\nonumber \le \frac{(p+1)^2}{4p} \lambda \int_{\Omega} (1+u)^p (1+v)^p dx
+ \frac{ \sqrt{\lambda\gamma} p}{\epsilon} \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} dx,
\end{eqnarray}
where we have used the inequality $a\le \frac{\epsilon}{2}a^2+\frac{1}{2\epsilon}$ for any $\epsilon>0$. Similarly,
\begin{eqnarray}\label{l2}
&&\sqrt{\lambda\gamma} p(1-\epsilon) \int_{\Omega} (1+v)^{\frac{p-1}{2}} (1+u)^{\frac{p-1}{2}} (1+v)^{p+1} dx
\\&&\nonumber \le \frac{(p+1)^2}{4p} \gamma \int_{\Omega} (1+u)^p (1+v)^p dx
+ \frac{ \sqrt{\lambda\gamma} p}{\epsilon} \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} dx .
\end{eqnarray}
Note that from the Cauchy-Schwarz inequality we get
\begin{equation*}\label{}
\int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} (1+u)^{p+1} dx \int_{\Omega} (1+v)^{\frac{p-1}{2}} (1+u)^{\frac{p-1}{2}} (1+v)^{p+1} dx \ge \left( \int_{\Omega} (1+u)^p (1+v)^p dx \right)^2 .
\end{equation*}
From this and
multiplying both sides of the above (\ref{l1}) and (\ref{l2}) we conclude
\begin{equation*}\label{}
\lambda\gamma \left[ p^2(1-\epsilon)^2- \left(\frac{(p+1)^2}{4p}\right)^2\right]\left( \int_{\Omega} (1+u)^p (1+v)^p dx \right)^2 \le C_{\epsilon,\lambda,\gamma} \left[ \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} dx\right]^2 ,
\end{equation*}
for small $\epsilon>0$. Note that $p^2- \left(\frac{(p+1)^2}{4p}\right)^2>0$ when $p>1$. Therefore, taking small enough $\epsilon>0$ and applying the H\"{o}lder's inequality, we complete the proof.
\end{proof}
| 3,297 | 31,238 |
en
|
train
|
0.95.6
|
Here is a counterpart of the above estimate for stable solutions of $ (M)_{\lambda,\gamma}$.
\begin{lemma}\label{lemuvm} Suppose that $(u,v)$ is a solution of $ (M)_{\lambda,\gamma}$ when
the associated stability inequality (\ref{stabilityM}) holds. Then, there exists a positive constant $C_{\lambda,\gamma,|\Omega|}=C({\lambda,\gamma,|\Omega|})$ such that
\begin{equation}
\int_{\Omega} (1-u)^{-p}(1-v)^{-p} dx \le C_{\lambda,\gamma,|\Omega|}.
\end{equation}
\end{lemma}
\begin{proof} The proof is similar to the one provide in Lemma \ref{lemuve}. Multiply the second equation of $ (M)_{\lambda,\gamma}$ with $(1-u)^{-p}-1$ and integrate to get
\begin{equation*}
\lambda \int_{\Omega} [(1-u)^{-p}-1] (1-v)^{-p} dx= \int_{\Omega} \mathcal L u [(1-u)^{-p}-1] dx.
\end{equation*}
From Lemma \ref{fgprop}, we get
\begin{equation*}
\int_{\Omega} \mathcal L u [(1-u)^{-p}-1] dx = \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n} \left[u(x)-u(z) \right] \left[(1-u(x))^{-p}-(1-u(z))^{-p} \right] J(x-z) dx dz .
\end{equation*}
Note that for $\alpha,\beta\in\mathbb R$, one can see that
\begin{equation*}
[(1-\alpha)^{-p}-(1-\beta)^{-p}](\alpha-\beta) \ge \frac{4p}{(p-1)^2} \left| (1-\alpha)^{\frac{1-p}{2}} - (1-\beta)^{\frac{1-p}{2}} \right|^2 .
\end{equation*}
Applying the above inequality for $\alpha=u(x)$ and $\beta=u(z)$, we obtain
\begin{equation*}
[(1-u(x))^{-p}-(1-u(z))^{-p}][u(x)-u(z)] \ge \frac{4p}{(p-1)^2} \left| (1-u(x))^{\frac{-p+1}{2}} - (1-u(z))^{\frac{-p+1}{2}} \right|^2 .
\end{equation*}
From the above, we conclude
\begin{equation*}
\lambda \int_{\Omega} (1-u)^{-p} (1-v)^{-p} dx \ge \frac{4p}{(p-1)^2} \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n}
\left| (1-u(x))^{\frac{-p+1}{2}} - (1-u(z))^{\frac{-p+1}{2}} \right|^2 J(x-z) dx dz .
\end{equation*}
Test the stability inequality, Corollary \ref{stablein}, on $\zeta=(1-u)^{\frac{-p+1}{2}}-1$ to get
\begin{eqnarray*}\label{}
&&\sqrt{\lambda\gamma} p \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} [(1-u)^{\frac{1-p}{2}}-1]^2 dx
\\&\le& \frac{1}{2}\int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} \left| (1-u(x))^{\frac{-p+1}{2}}- (1-u(z))^{\frac{-p+1}{2}} \right|^2 J(x-z) dz dx .
\end{eqnarray*}
Combining above inequalities, we conclude
\begin{equation}\label{rruv}
\sqrt{\lambda\gamma} p \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} [(1-u)^{\frac{1-p}{2}}-1]^2 dx
\le \frac{(p-1)^2}{4p} \lambda \int_{\Omega} (1-u)^{-p }(1-v)^{-p} dx .
\end{equation}
Expanding the left-hand side of the inequality and rearranging we get
\begin{eqnarray}\label{l1}
&&\sqrt{\lambda\gamma} p(1-\epsilon) \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} (1-u)^{-p+1} dx
\\&&\nonumber \le \frac{(p-1)^2}{4p} \lambda \int_{\Omega} (1-u)^{-p} (1-v)^{-p} dx
+ \frac{ \sqrt{\lambda\gamma} p}{\epsilon} \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{\frac{p+1}{2}} dx,
\end{eqnarray}
where we have used the inequality $a\le \frac{\epsilon}{2}a^2+\frac{1}{2\epsilon}$ for any $\epsilon>0$. Similarly,
\begin{eqnarray}\label{l2}
&&\sqrt{\lambda\gamma} p(1-\epsilon) \int_{\Omega} (1-v)^{-\frac{p+1}{2}} (1-u)^{-\frac{p+1}{2}} (1-v)^{-p+1} dx
\\&&\nonumber \le \frac{(p-1)^2}{4p} \gamma \int_{\Omega} (1-u)^{-p} (1-v)^{-p} dx
+ \frac{ \sqrt{\lambda\gamma} p}{\epsilon} \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} dx .
\end{eqnarray}
Note that from the Cauchy-Schwarz inequality we get
\begin{eqnarray*}\label{}
&&\int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} (1-u)^{-p+1} dx \int_{\Omega} (1-v)^{-\frac{p+1}{2}} (1-u)^{-\frac{p+1}{2}} (1-v)^{-p+1} dx \\&& \ge \left( \int_{\Omega} (1-u)^{-p} (1-v)^{-p} dx \right)^2 .
\end{eqnarray*}
From this and
multiplying both sides of the above (\ref{l1}) and (\ref{l2}) we conclude
\begin{equation*}\label{}
\lambda\gamma \left[ p^2(1-\epsilon)^2- \left(\frac{(p-1)^2}{4p}\right)^2\right]\left( \int_{\Omega} (1-u)^{-p} (1-v)^{-p} dx \right)^2 \le C_{\epsilon,\lambda,\gamma} \left[ \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} dx\right]^2 ,
\end{equation*}
for small $\epsilon>0$. Note that $p^2- (\frac{(p-1)^2}{4p})^2>0$ when $p>0$. Therefore, taking small enough $\epsilon>0$ and applying the H\"{o}lder's inequality when $p>1$, we complete the proof.
\end{proof}
In the next lemmata, we provide integral $L^q(\Omega)$ estimates for Gelfand, Lane-Emden and MEMS systems. We start with the Gelfand system and establish a relation between $\int_{\Omega} e^{\frac{2t+1}{2}u} e^{\frac{v}{2}} dx$ and $\int_{\Omega} e^{\frac{2t+1}{2}v} e^{\frac{u}{2}} dx$ for some constant $t>\frac{1}{2}$.
\begin{lemma}\label{lemuvgt} Under the same assumptions as Lemma \ref{lemuvg}, set
\begin{equation*}
X:=\int_{\Omega} e^{\frac{2t+1}{2}u} e^{\frac{v}{2}} dx, Y:=\int_{\Omega} e^{\frac{2t+1}{2}v} e^{\frac{u}{2}} dx, Z:=\int_{\Omega} e^{u} dx , W:= \int_{\Omega} e^{v} dx,
\end{equation*}
where $t>\frac{1}{2}$. Then,
\begin{eqnarray}\label{lgXg}
\begin{array}{lcl}
\sqrt{\lambda\gamma} X \le (\frac{t }{4}+\epsilon) \lambda X^{\frac{2t-1}{2t}} Y^{\frac{1}{2t}} + C_{\epsilon,\lambda,\gamma,|\Omega|} Z, \\
\sqrt{\lambda\gamma} Y \le (\frac{t }{4}+\epsilon) \gamma Y^{\frac{2t-1}{2t}} X^{\frac{1}{2t}} + C_{\epsilon,\lambda,\gamma,|\Omega|} W,
\end{array}
\end{eqnarray}
where $C_{\epsilon,\lambda,\gamma,|\Omega|} $ is a positive constant.
\end{lemma}
\begin{proof} Multiply the second equation of $ (G)_{\lambda,\gamma}$ with $e^{tu}-1$ when $t>\frac{1}{2}$ is a constant. Integrating implies that
\begin{equation*}
\lambda \int_{\Omega} (e^{tu}-1) e^v dx= \int_{\Omega} \mathcal L u (e^{tu}-1) dx.
\end{equation*}
From Lemma \ref{fgprop}, we get
\begin{equation*}
\int_{\Omega} \mathcal L u (e^{tu}-1) dx = \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n} \left[u(x)-u(z) \right] \left[e^{tu(x)}-e^{tu(z)} \right] J(x-z) dx dz .
\end{equation*}
Note that for $\alpha,\beta\in\mathbb R$, one can see that
\begin{equation*}
\left| e^{\frac{\beta}{2}} - e^{\frac{\alpha}{2}} \right|^2 \le \frac{1}{4} (e^\beta-e^\alpha)(\beta-\alpha).
\end{equation*}
Applying the above inequality for $\alpha=tu(z)$ and $\beta=tu(x)$, we obtain
\begin{equation*}
\left| e^{\frac{tu(x)}{2}} - e^{\frac{tu(z)}{2}} \right|^2 \le \frac{t}{4} (e^{tu(x)}-e^{tu(z)})(u(x)-u(z)).
\end{equation*}
From the above, we conclude
\begin{equation*}
\lambda \int_{\Omega} (e^{tu}-1) e^v dx \ge \frac{2}{t} \int_{\mathbb R^n} \int_{\mathbb R^n}
\left| e^{\frac{tu(x)}{2}} - e^{\frac{tu(z)}{2}} \right|^2J(x-z) dx dz .
\end{equation*}
Test the stability inequality, Corollary \ref{stablein}, on $\zeta=e^{\frac{tu}{2}}-1$ to get
\begin{equation*}\label{}
\sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{u+v}{2}} (e^{\frac{tu}{2}}-1)^2 dx \le \frac{1}{2}\int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} \left|e^{\frac{tu(x)}{2}}- e^{\frac{tu(z)}{2}} \right|^2 J(x-z) dz dx .
\end{equation*}
Combining above inequalities, we conclude
\begin{equation}\label{uvt2}
\sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{u+v}{2}} (e^{\frac{tu}{2}}-1)^2 dx \le \frac{t}{4} \lambda \int_{\Omega} (e^{tu}-1) e^v dx .
\end{equation}
On the other hand, from the Young inequality we have
\begin{equation}\label{uvte1}
\int_{\Omega} e^{\frac{t+1}{2}u} e^{\frac{v}{2}} dx \le \frac{\epsilon}{2} \sqrt{\frac{\lambda }{\gamma}} \int_{\Omega} e^{tu} e^{v} dx + \frac{1}{2\epsilon} \sqrt{\frac{\gamma }{\lambda}} \int_{\Omega} e^{u} dx ,
\end{equation}
where $\epsilon$ is a positive constant. In addition, from the H\"{o}lder inequality we get
\begin{equation}\label{uvte2}
\int_{\Omega} e^{t u} e^v dx \le \left( \int_{\Omega} e^{\frac{2t+1}{2}u} e^{\frac{v}{2}} dx \right)^{\frac{2t-1}{2t}} \left( \int_{\Omega} e^{\frac{2t+1}{2}v} e^{\frac{u}{2}} dx \right)^{\frac{1}{2t}} .
\end{equation}
Now, expanding both sides of (\ref{uvt2}) we have
\begin{equation}\label{uvte3}
\sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{2t+1}{2}u} e^{\frac{v}{2}} dx \le \frac{t}{4} \lambda \int_{\Omega} e^{tu} e^v dx + 2 \sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{t+1}{2}u} e^{\frac{v}{2}} dx .
\end{equation}
Combining (\ref{uvte1}), (\ref{uvte2}) and (\ref{uvte3}) proves the first inequality in (\ref{lgXg}). With similar arguments one can show the second inequality.
\end{proof}
| 3,620 | 31,238 |
en
|
train
|
0.95.7
|
We now consider the Lane-Emden system and establish a relation between $\int_{\Omega} (1+u)^{\frac{p+1}{2}+t} (1+v)^{\frac{p-1}{2}} dx$ and $\int_{\Omega} (1+v)^{\frac{p+1}{2}+t} (1+u)^{\frac{p-1}{2}} dx$ for some constant $t>{1}$.
\begin{lemma}\label{lemuvet} Under the same assumptions as Lemma \ref{lemuve}, set
\begin{eqnarray*}
&&X:=\int_{\Omega} (1+u)^{\frac{p-1}{2}+t+1} (1+v)^{\frac{p-1}{2}} dx, \ \ Y:=\int_{\Omega} (1+v)^{\frac{p-1}{2}+t+1} (1+u)^{\frac{p-1}{2}} dx,
\\&&Z:=\int_{\Omega} (1+u)^{\frac{p-1}{2}}(1+v)^{\frac{p-1}{2}} dx ,
\end{eqnarray*}
for $t>1$. Then, for some constant $0<\epsilon<1$ we get
\begin{eqnarray}\label{lgXet}
\begin{array}{lcl}
\sqrt{\lambda\gamma} p(1-\epsilon) X \le \frac{(t+1)^2 }{4t} \lambda X^{\frac{2t-p+1}{2(t+1)}} Y^{\frac{p+1}{2(t+1)}} + C_{\epsilon,\lambda,\gamma,|\Omega|} Z, \\
\sqrt{\lambda\gamma} p(1-\epsilon) Y \le \frac{(t+1)^2 }{4t} \gamma Y^{\frac{2t-p+1}{2(t+1)}} X^{\frac{p+1}{2(t+1)}} + C_{\epsilon,\lambda,\gamma,|\Omega|} Z,
\end{array}
\end{eqnarray}
where $C_{\epsilon,\lambda,\gamma,|\Omega|} $ is a positive constant.
\end{lemma}
\begin{proof} Let $t>1$ be a constant. Multiply the second equation of $ (E)_{\lambda,\gamma}$ with $(1+u)^t-1$ and integrate to get
\begin{equation*}
\lambda \int_{\Omega} [(1+u)^t-1] (1+v)^p dx= \int_{\Omega} \mathcal L u [(1+u)^t-1] dx.
\end{equation*}
From Lemma \ref{fgprop}, we get
\begin{equation*}
\int_{\Omega} \mathcal L u [(1+u)^t-1] dx = \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n} \left[u(x)-u(z) \right] \left[(1+u(x))^t-(1+u(z))^t \right] J(x-z) dx dz .
\end{equation*}
Note that for $\alpha,\beta\in\mathbb R$, one can see that
\begin{equation*}
[(1+\alpha)^t-(1+\beta)^t](\alpha-\beta) \ge \frac{4t}{(t+1)^2} \left| (1+\alpha)^{\frac{t+1}{2}} - (1+\beta)^{\frac{t+1}{2}} \right|^2 .
\end{equation*}
Applying the above inequality for $\alpha=u(x)$ and $\beta=u(z)$, we obtain
\begin{equation*}
[(1+u(x))^t-(1+u(z))^t][u(x)-u(z)] \ge \frac{4t}{(t+1)^2} \left| (1+u(x))^{\frac{t+1}{2}} - (1+u(z))^{\frac{t+1}{2}} \right|^2 .
\end{equation*}
From the above, we conclude
\begin{equation*}
\lambda \int_{\Omega} (1+u)^t (1+v)^p dx \ge \frac{4t}{(t+1)^2} \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n}
\left| (1+u(x))^{\frac{t+1}{2}} - (1+u(z))^{\frac{t+1}{2}} \right|^2 J(x-z) dx dz .
\end{equation*}
Test the stability inequality, Corollary \ref{stablein}, on $\zeta=(1+u)^{\frac{t+1}{2}}-1$ to get
\begin{eqnarray*}\label{}
&&\sqrt{\lambda\gamma} p \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} [(1+u)^{\frac{t+1}{2}}-1]^2 dx
\\&\le& \frac{1}{2}\int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} \left| (1+u(x))^{\frac{t+1}{2}}- (1+u(z))^{\frac{t+1}{2}} \right|^2 J(x-z) dz dx ,
\end{eqnarray*}
Combining above inequalities, we conclude
\begin{equation}\label{rruv}
\sqrt{\lambda\gamma} p \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} [(1+u)^{\frac{t+1}{2}}-1]^2 dx
\le \frac{(t+1)^2}{4t} \lambda \int_{\Omega} (1+u)^t (1+v)^p dx .
\end{equation}
Expanding the left-hand side of the inequality and rearranging we get
\begin{eqnarray}\label{l1e}
&&\sqrt{\lambda\gamma} p(1-\epsilon) \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} (1+u)^{t+1} dx
\\&&\nonumber \le \frac{(t+1)^2}{4t} \lambda \int_{\Omega} (1+u)^t (1+v)^p dx
+ \frac{ \sqrt{\lambda\gamma} p}{\epsilon} \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} dx,
\end{eqnarray}
where we have used the inequality $a\le \frac{\epsilon}{2}a^2+\frac{1}{2\epsilon}$ for any $\epsilon>0$. From the H\"{o}lder's inequality we get
\begin{eqnarray*}\label{}
&& \int_{\Omega} (1+u)^t (1+v)^p dx
\\& \le &\left[ \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} (1+u)^{t+1} dx \right]^{\frac{1}{\beta}}
\left[ \int_{\Omega} (1+v)^{\frac{p-1}{2}} (1+u)^{\frac{p-1}{2}} (1+v)^{t+1} dx \right]^{1-\frac{1}{\beta}} ,
\end{eqnarray*}
where $\beta=\frac{2(t+1)}{2t-p+1}$. This and (\ref{l1e}) completes the proof of the first estimate in (\ref{lgXet}). Similarly, one can show the second estimate.
\end{proof}
We now consider the MEMS system with singular power nonlinearities and establish a relation between $\int_{\Omega} (1-u)^{\frac{1-p}{2}-t} (1-v)^{-\frac{p+1}{2}} dx$ and $\int_{\Omega} (1-v)^{\frac{1-p}{2}-t} (1-u)^{-\frac{p+1}{2}} dx$ for some constant $t>{1}$.
\begin{lemma}\label{lemuvmt} Under the same assumptions as Lemma \ref{lemuvm}, set
\begin{eqnarray*}
&& X:=\int_{\Omega} (1-u)^{-\frac{p+1}{2}-t+1} (1-v)^{-\frac{p+1}{2}} dx, \ \ Y:=\int_{\Omega} (1-v)^{-\frac{p+1}{2}-t+1} (1-u)^{-\frac{p+1}{2}} dx,
\\&& Z:=\int_{\Omega} (1-u)^{-\frac{p+1}{2}}(1-v)^{-\frac{p+1}{2}} ,
\end{eqnarray*}
for $t>1$. Then, for some constant $0<\epsilon<1$ we get
\begin{eqnarray}\label{lgXmt}
\begin{array}{lcl}
\sqrt{\lambda\gamma} p(1-\epsilon) X \le \frac{(t-1)^2 }{4t} \lambda X^{\frac{2t-p-1}{2(t-1)}} Y^{\frac{p-1}{2(t-1)}} + C_{\epsilon,\lambda,\gamma,|\Omega|} Z, \\
\sqrt{\lambda\gamma} p(1-\epsilon) Y \le \frac{(t-1)^2 }{4t} \gamma Y^{\frac{2t-p-1}{2(t-1)}} X^{\frac{p-1}{2(t-1)}} + C_{\epsilon,\lambda,\gamma,|\Omega|} Z,
\end{array}
\end{eqnarray}
where $C_{\epsilon,\lambda,\gamma,|\Omega|} $ is a positive constant.
\end{lemma}
\begin{proof} Let $t>1$ be a constant. Multiply the second equation of $ (M)_{\lambda,\gamma}$ with $(1-u)^{-t}-1$ and integrate to get
\begin{equation*}
\lambda \int_{\Omega} [(1-u)^{-t}-1] (1-v)^p dx= \int_{\Omega} \mathcal L u [(1-u)^{-t}-1] dx.
\end{equation*}
From Lemma \ref{fgprop}, we get
\begin{equation*}
\int_{\Omega} \mathcal L u [(1-u)^{-t}-1] dx = \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n} \left[u(x)-u(z) \right] \left[(1-u(x))^{-t}-(1-u(z))^{-t} \right] J(x-z) dx dz .
\end{equation*}
Note that for $\alpha,\beta\in\mathbb R$, one can see that
\begin{equation*}
[(1-\alpha)^{-t}-(1-\beta)^{-t}](\alpha-\beta) \ge \frac{4t}{(t-1)^2} \left| (1-\alpha)^{\frac{-t+1}{2}} - (1-\beta)^{\frac{-t+1}{2}} \right|^2 .
\end{equation*}
Applying the above inequality for $\alpha=u(x)$ and $\beta=u(z)$, we obtain
\begin{equation*}
[(1-u(x))^{-t}-(1-u(z))^{-t}][u(x)-u(z)] \ge \frac{4t}{(t-1)^2} \left| (1-u(x))^{\frac{-t+1}{2}} - (1-u(z))^{\frac{-t+1}{2}} \right|^2 .
\end{equation*}
From the above, we conclude
\begin{equation*}
\lambda \int_{\Omega} (1-u)^{-t} (1-v)^{-p} dx \ge \frac{4t}{(t-1)^2} \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n}
\left| (1-u(x))^{\frac{-t+1}{2}} - (1-u(z))^{\frac{-t+1}{2}} \right|^2 J(x-z) dx dz .
\end{equation*}
Test the stability inequality, Corollary \ref{stablein}, on $\zeta=(1-u)^{\frac{-t+1}{2}}-1$ to get
\begin{eqnarray*}\label{}
&&\sqrt{\lambda\gamma} p \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} [(1-u)^{\frac{-t+1}{2}}-1]^2 dx
\\&\le& \frac{1}{2}\int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} \left| (1-u(x))^{\frac{-t+1}{2}}- (1-u(z))^{\frac{-t+1}{2}} \right|^2 J(x-z) dz dx .
\end{eqnarray*}
Combining above inequalities, we conclude
\begin{equation}\label{rruv}
\sqrt{\lambda\gamma} p \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} [(1-u)^{\frac{-t+1}{2}}-1]^2 dx
\le \frac{(t-1)^2}{4t} \lambda \int_{\Omega} (1-u)^{-t} (1-v)^{-p} dx .
\end{equation}
Expanding the left-hand side of the inequality and rearranging we get
\begin{eqnarray}\label{l1e}
&&\sqrt{\lambda\gamma} p(1-\epsilon) \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} (1-u)^{-t+1} dx
\\&&\nonumber \le \frac{(t-1)^2}{4t} \lambda \int_{\Omega} (1-u)^{-t} (1-v)^{-p} dx
+ \frac{ \sqrt{\lambda\gamma} p}{\epsilon} \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} dx,
\end{eqnarray}
where we have used the inequality $a\le \frac{\epsilon}{2}a^2+\frac{1}{2\epsilon}$ for any $\epsilon>0$ and $a\in\mathbb R$. From the H\"{o}lder's inequality we get
\begin{eqnarray*}\label{}
&& \int_{\Omega} (1-u)^{-t} (1-v)^{-p} dx
\\& \le &\left[ \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} (1-u)^{-t+1} dx \right]^{\frac{1}{\beta}}
\left[ \int_{\Omega} (1-v)^{-\frac{p+1}{2}} (1-u)^{-\frac{p+1}{2}} (1-v)^{-t+1} dx \right]^{1-\frac{1}{\beta}} ,
\end{eqnarray*}
where $\beta=\frac{2(t-1)}{2t-p-1}$. This and (\ref{l1e}) completes the proof of the first estimate in (\ref{lgXet}). Similarly, one can show the second estimate.
\end{proof}
| 3,862 | 31,238 |
en
|
train
|
0.95.8
|
In regards to the gradient system with superlinear nonlinearities satisfying (\ref{R}) we establish an integral estimate that yields $L^2(\Omega)$ of the function $f'(u)g'(v)$. We then use this to conclude estimates on the nonlinearities of the gradient system. Our methods and ideas in the proof are inspired by the ones developed in \cite{cf} and originally by Nedev in \cite{Nedev}.
\begin{lemma}\label{lemab}
Suppose that $f$ and
$g$ both satisfy condition (\ref{R}) and $a:=f'(0)>0 $ and $ b:=g'(0)>0$. Assume that $ f',g'$ are convex and (\ref{deltaeps}) holds. Let $ (\lambda^*,\gamma^*) \in \Upsilon$ and $ (u,v)$ denote the extremal solution associated with $ (H)_{\lambda^*,\gamma^*}$.
Then, there exists a positive constant $ C < \infty$ such that
\begin{equation*}
\int_{\Omega} f'(u) g'(v) (f'(u)-a) (g'(v)-b) \le C.
\end{equation*}
\end{lemma}
\begin{proof} We obtain uniform estimates for any minimal solution $(u,v)$ of $(H)_{\lambda,\gamma}$ on the ray $ \Gamma_\sigma$ and then one sends $ \lambda \nearrow \lambda^*$ to obtain the same estimate for $ (u^*,v^*)$. Let $(u,v)$ denote a smooth minimal solution of $(H)_{\lambda,\gamma}$ on the ray $\Gamma_\sigma$ and put $ \zeta:= f'(u)-a$ and $ \eta:=g'(v)-b$ into (\ref{stabilityH}) to obtain
\begin{eqnarray*}\label{}
&&\int_{\Omega} \left[f''(u) g(v) (f'(u)-a)^2 + f(u) g''(v) (g'(v)-b)^2 + 2 f'(u) g'(v) (f'(u)-a)(g'(v)-b)\right]dx
\\ &\le&\nonumber
\frac{1}{2} \int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n}\left( \frac{1}{\lambda} |f'(u(x))- f'(u(z))|^2 + \frac{1}{\gamma} |g'(v(x))- g'(v(z))|^2 \right) J(x-z) dz dx .
\end{eqnarray*}
Note that for all $\alpha,\beta\in\mathbb R$, one can see that
\begin{equation*}\label{}
|f'(\beta)-f'(\alpha)|^2=\left|\int_\alpha^\beta f''(s) ds\right|^2 \le \int_\alpha^\beta |f''(s)|^2 ds (\beta-\alpha)= (h_1(\beta)-h_1(\alpha))(\beta-\alpha),
\end{equation*}
when $h_1(s):=\int_0^s |f''(w)|^2 dw$. Similar inequality holds for the function $g$
that is
\begin{equation*}\label{}
|g'(\beta)-g'(\alpha)|^2 \le (h_2(\beta)-h_2(\alpha))(\beta-\alpha),
\end{equation*}
when $h_2(s):=\int_0^s |g''(w)|^2 dw$. Set $\beta=u(x)$ and $\alpha=u(z)$ and $\beta=v(x)$ and $\alpha=v(z)$ in the above inequalities to conclude
\begin{eqnarray*}\label{}
&&\frac{1}{2} \int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n}\left( \frac{1}{\lambda} |f'(u(x))- f'(u(z))|^2 + \frac{1}{\gamma} |g'(v(x))- g'(v(z))|^2 \right) J(x-z) dz dx
\\&\le&
\frac{1}{2} \int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} \frac{1}{\lambda} [h_1(u(x))- h_1(u(z))][u(x)- u(z)] J(x-z) dz dx
\\&&+
\frac{1}{2} \int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} \frac{1}{\gamma} [h_2(v(x))- h_2(v(z))][v(x)- v(z)] J(x-z) dz dx.
\end{eqnarray*}
From the equation of system and Lemma \ref{fgprop}, we get
\begin{eqnarray*}\label{}
&&\frac{1}{2} \int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n}\left( \frac{1}{\lambda} |f'(u(x)) - f'(u(z))|^2 + \frac{1}{\gamma} |g'(v(x))- g'(v(z))|^2 \right) J(x-z) dz dx
\\&\le&
\int_{\Omega} \left[ h_1(u) \frac{1}{\lambda} \mathcal L(u) + h_2(v) \frac{1}{\gamma} \mathcal L(v) \right]dx
\\&=&
\int_{\Omega} \left[ h_1(u)f'(u)g(v) dx + h_2(v)f(u)g'(v) \right]dx .
\end{eqnarray*}
From this and we conclude that
\begin{eqnarray}\label{stafg}
&&\int_{\Omega} \left[f''(u) g(v) (f'(u)-a)^2 + f(u) g''(v) (g'(v)-b)^2 + 2 f'(u) g'(v) (f'(u)-a)(g'(v)-b)\right]dx
\\ &\le&\nonumber
\int_{\Omega} \left[ h_1(u)f'(u)g(v) dx + h_2(v)f(u)g'(v) \right] dx .
\end{eqnarray}
Given the assumptions, there is some $ M>1$ large enough and $0<\delta<1$ that for all $ u \ge M$ we have
$ h_1(u) \le \delta f''(u)(f'(u)-a)$ for all $ u \ge M$. Then, we have
\begin{equation*}
\int_{\Omega} h_1(u) g(v) f'(u) = \int_{u \ge M} + \int_{u <M} \le \delta \int f''(u) g(v) (f'(u)-a)^2 + \int_{u<M} \int h_1(u) g(v) f'(u) .
\end{equation*}
We now estimate the last integral in the above. Let $ k \ge 1$ denote a natural number. Then,
\begin{equation*}
\int_{u<M} h_1(u) g(v) f'(u) = \int_{u<M, v<kM} + \int_{u<M, v \ge kM} = C(k,M) + \int_{u<M, v \ge kM}h_1(u) g(v) f'(u) .
\end{equation*}
Note that this integral is bounded above by
\begin{equation*}
\sup_{u<M} \frac{h_1(u)}{(f'(u)-a)} \sup_{v >kM} \frac{g(v)}{(g'(v)-b) g'(v)} \int (f'(u)-a) (g'(v)-b) f'(u) g'(v).
\end{equation*}
From the above estimates, we conclude that for sufficiently large $ M$ and for all $1 \le k$ there is some positive constant $ C(k,M)$ and $0<\delta <1$ that
\begin{eqnarray}\label{h1gf}
\ \ \ \ \ \int_{\Omega} h_1(u) g(v) f'(u) &\le & \delta \int f''(u) g(v) (f'(u)-a)^2
+ C(k,M)
\\&& \nonumber+ \sup_{u<M} \frac{h_1(u)}{(f'(u)-a)} \sup_{v >kM} \frac{g(v)}{(g'(v)-b) g'(v)} \int (f'(u)-a) (g'(v)-b) f'(u) g'(v).
\end{eqnarray}
Applying the same argument, one can show that for sufficiently large $M$ and for all $ 1 \le k$ there is some positive constant $ D(k,M)$ and $0<\epsilon <1$ that
\begin{eqnarray}\label{h2gf}
\ \ \ \ \ \int_{\Omega} h_2(v) g'(v) f(u) &\le & \epsilon \int f(u) g''(v) (g'(v)-b)^2
+ D(k,M)
\\&& \nonumber + \sup_{v<M} \frac{h_2(v)}{(g'(v)-b)} \sup_{u >kM} \frac{f(u)}{(f'(u)-a) f'(u)} \int (f'(u)-a) (g'(v)-b) f'(u) g'(v).
\end{eqnarray}
Note that $ f''(u),g''(v) \rightarrow \infty$ when $u,v\to\infty$. This implies that
\begin{equation*}
\lim_{k \rightarrow \infty} \sup_{u >kM} \frac{f(u)}{(f'(u)-a) f'(u)} =0 \ \ \text{and} \ \ \lim_{k \rightarrow \infty} \sup_{v >kM} \frac{g(v)}{(g'(v)-b) g'(v)} =0 .
\end{equation*}
Now set $ k$ to be sufficiently large and substitute (\ref{h1gf}) and (\ref{h2gf}) in (\ref{stafg}) to
complete the proof. Note that see that all the integrals in (\ref{stafg}) are bounded independent of $ \lambda$ and $\gamma$.
\end{proof}
| 2,567 | 31,238 |
en
|
train
|
0.95.9
|
\section{Regularity of the extremal solution; Proof of Theorem \ref{thmg}-\ref{nedev}}\label{secreg}
In this section, we apply the integral estimates established in the latter section to prove regularity results for extremal solutions of systems mentioned in the introduction earlier.
\\
\\
\noindent {\it Proof of Theorem \ref{thmg}}.
We shall provide the proof for the case of $n>2s$, since otherwise is straightforward. Let $(u,v)$ be the smooth minimal solution of $ (G)_{\lambda,\gamma}$ for $\frac{\lambda^*}{2}<\lambda<\lambda^*$ and $\frac{\gamma^*}{2}<\gamma<\gamma^*$. From Lemma \ref{lemuvg} we conclude that
\begin{equation}
\int_{\Omega} e^{u+v} dx \le C_{\lambda,\gamma,|\Omega|} .
\end{equation}
From this and Lemma \ref{lemuvgt}, we conclude that for $t>\frac{1}{2}$
\begin{equation}
\lambda\gamma \left[1-\left(\frac{t }{4}+\epsilon\right)^2\right] XY \le C_{\epsilon,\lambda,\gamma,|\Omega|}
\left(1+X^{\frac{2t-1}{2t}} Y^{\frac{1}{2t}} + Y^{\frac{2t-1}{2t}} X^{\frac{1}{2t}}\right) .
\end{equation}
Therefore, for every $t<4$ either $X$ or $Y$ must be
bounded where $X$ and $Y$ are given by
\begin{equation}
X:=\int_{\Omega} e^{\frac{2t+1}{2}u} e^{\frac{v}{2}} dx \ \ \text{and} \ \ Y:=\int_{\Omega} e^{\frac{2t+1}{2}v} e^{\frac{u}{2}} dx.
\end{equation}
Without loss of generality, assume that $\lambda\le \gamma$ implies that $u\le v$ and therefore $e^u$ is bounded in $L^{q}(\Omega)$ for $q=t+1<5$. Therefore, in light of Proposition \ref{propregL} we have $u\in L^\infty(\Omega)$ for $\frac
{n}{2s}<5$ that is $n<10s$.
Now, let $(u,v)$ be the smooth minimal solution of $ (E)_{\lambda,\gamma}$ for $\frac{\lambda^*}{2}<\lambda<\lambda^*$ and $\frac{\gamma^*}{2}<\gamma<\gamma^*$. From Lemma \ref{lemuve} we conclude that
\begin{equation}\label{}
\int_{\Omega} (1+u)^p(1+v)^p dx \le C_{\lambda,\gamma,|\Omega|} .
\end{equation}
From this and Lemma \ref{lemuvet}, we conclude that
\begin{equation}
\lambda\gamma \left[p^2(1-\epsilon)^2 -\left( \frac{(t+1)^2 }{4t} \right)^2\right] XY \le C_{\epsilon,\lambda,\gamma,|\Omega|}
\left(1+X^{\frac{2t-p+1}{2(t+1)}} Y^{\frac{p+1}{2(t+1)}} + Y^{\frac{2t-p+1}{2(t+1)}} X^{\frac{p+1}{2(t+1)}} \right) .
\end{equation}
Therefore, for every $1\le t< 2p+2\sqrt{p(p-1)}-1$ either $X$ or $Y$ must be
bounded where $X$ and $Y$ are given by
\begin{equation}
X:=\int_{\Omega} (1+u)^{\frac{p-1}{2}+t+1} (1+v)^{\frac{p-1}{2}} dx \ \ \text{and} \ \ Y:=\int_{\Omega} (1+v)^{\frac{p-1}{2}+t+1} (1+u)^{\frac{p-1}{2}} dx.
\end{equation}
Without loss of generality, assume that $\lambda\le \gamma$ implies that $u\le v$ and therefore $(1+u)$ is bounded in $L^{q}(\Omega)$ for $q=p+t$. We now rewrite the system $ (E)_{\lambda,\gamma}$ for the extremal solution $(u,v)$ that is
\begin{eqnarray*}
\left\{ \begin{array}{lcl}
\mathcal L u &=&\lambda^* c_{v}(x) v + \lambda^* \qquad \text{in} \ \ \Omega, \\
\mathcal L v &=& \gamma^* c_{u}(x) u +\gamma^* \qquad \text{in} \ \ \Omega, \\
\end{array}\right.
\end{eqnarray*}
when $0\le c_{v}(x)= \frac{(1+v)^{p}-1}{v} \le p v^{p-1}$ and $0\le c_{u}(x)= \frac{(1+u)^{p}-1}{u} \le p u^{p-1}$ where convexity argument is applied. From the regularity theory, Proposition \ref{propregL}, we conclude that $v\in L^\infty(\Omega)$ provided $ c_{u}(x)\in L^{r}(\Omega)$ when $r>\frac{n}{2s}$. This implies that $\frac{n}{2s} <\frac{p+t}{p-1}$ for $1\le t<2p+2\sqrt{p(p-1)}-1$. This completes the proof for the case of Lane-Emden system $ (E)_{\lambda,\gamma}$. The proof for the case of $ (M)_{\lambda,\gamma}$ is very similar and replies on applying Lemma \ref{lemuvm} and Lemma \ref{lemuvmt}.
$\Box$
\begin{remark}\label{rem1}
Even though the above theorem is optimal as $s\to 1$, it is not optimal for smaller values of $0<s<1$.
\end{remark}
In this regard, consider the case of $\lambda=\gamma$ and the Gelfand system turns into ${(-\Delta)}^s u= \lambda e^u$ in the entire space $\mathbb R^n$. It is known that the explicit singular solution $u^*(x)=\log \frac{1}{|x|^{2s}}$ is stable solution of the scalar Gelfand equation if and only if
\begin{equation*}
\frac{ \Gamma(\frac{n}{2}) \Gamma(1+s)}{\Gamma(\frac{n-2s}{2})} \le
\frac{ \Gamma^2(\frac{n+2s}{4})}{\Gamma^2(\frac{n-2s}{4})} ,
\end{equation*}
for the constant
$$\lambda=2^{2s}\frac{ \Gamma(\frac{n}{2}) \Gamma(1+s)}{\Gamma(\frac{n-2s}{2})} .
$$
This implies that the extremal solution of the fractional Gelfand equation should be regular for
\begin{equation*}
\frac{ \Gamma(\frac{n}{2}) \Gamma(1+s)}{\Gamma(\frac{n-2s}{2})} >
\frac{ \Gamma^2(\frac{n+2s}{4})}{\Gamma^2(\frac{n-2s}{4})} .
\end{equation*}
In particular, the extremal solution should be bounded when $n\le 7$ for $0<s<1$. We refer interested to \cite{rs,r1} for more details. Now, consider the case of Lane-Emden equation $(-\Delta)^s u= \lambda u^p$ in the entire space $\mathbb R^n$. It is also known that the explicit singular solution $u_s(x) = A |x|^{-\frac{2s}{p-1}}$ where the constant $A$ is given by
\begin{equation*}
A^{p-1} =\frac{\Gamma(\frac{n}{2}-\frac{s}{p-1}) \Gamma(s+\frac{s}{p-1})}{\Gamma(\frac{s}{p-1}) \Gamma(\frac{n-2s}{2}-\frac{s}{p-1})} ,
\end{equation*}
is a stable solution of the scalar Lane-Emden equation if and only if
\begin{equation*}\label{}
p \frac{\Gamma(\frac{n}{2}-\frac{s}{p-1}) \Gamma(s+\frac{s}{p-1})}{\Gamma(\frac{s}{p-1}) \Gamma(\frac{n-2s}{2}-\frac{s}{p-1})}
\le
\frac{ \Gamma^2(\frac{n+2s}{4}) }{\Gamma^2(\frac{n-2s}{4})}.
\end{equation*}
This yields that the extremal solution of the above equation should be regular for
\begin{equation*}\label{}
p \frac{\Gamma(\frac{n}{2}-\frac{s}{p-1}) \Gamma(s+\frac{s}{p-1})}{\Gamma(\frac{s}{p-1}) \Gamma(\frac{n-2s}{2}-\frac{s}{p-1})}
>
\frac{ \Gamma^2(\frac{n+2s}{4}) }{\Gamma^2(\frac{n-2s}{4})}.
\end{equation*}
As $s\to 1$, the above inequality is consistent with the dimensions given in (\ref{dime}). For more information, we refer interested readers to Wei and the author in \cite{fw} and Davila et al. in \cite{ddw}. Given above, proof of the optimal dimension for regularity of extremal solutions remains an open problem.
We now provide a proof for Theorem \ref{nedev} that deals with a regularity result for the gradient system $ (H)_{\lambda,\gamma}$ with general nonlinearities $f$ and $g$. Note that for the case of local scalar equations such results are provided by Nedev in \cite{Nedev} for $n=3$ and Cabr\'{e} in \cite{Cabre} for $n=4$. For the case of scalar equation with the fractional Laplacian operator, Ros-Oton and Serra in \cite{rs} established regularity results for dimensions $n<4s$ when $0<s<1$. For the case of local gradient systems, that is when $s=1$, such a regularity result is established by the author and Cowan in \cite{cf} in dimensions $n\le 3$.
\\
\\
\noindent {\it Proof of Theorem \ref{nedev}}.
We suppose that $ (\lambda^*,\gamma^*) \in \Upsilon$ and $(u,v)$ is the associated extremal solution of $(G)_{\lambda^*,\gamma^*}$. Set $ \sigma=\frac{\gamma^*}{\lambda^*}$.
From Lemma \ref{lemab}, we conclude that $ f'(u) g'(v) \in L^2(\Omega)$. Note that this and the convexity of $ g$ show that
\begin{equation}
\int_\Omega \frac{f'(u)^2 g(v)^2}{(v+1)^2} \le C.
\end{equation}
Note that $ (-\Delta)^s u\in L^1$ and $ (-\Delta)^s v\in L^1$ and hence we have $ u,v \in L^{p}$ for any $ p <\frac{n}{n-2s}$. We now use the domain decomposition method as in \cite{Nedev,cf}. Set
\begin{eqnarray*}
\Omega_1 &:=& \left\{ x: \frac{f'(u)^2 g(v)^2}{(v+1)^2} \ge f'(u)^{2-\alpha} g(v)^{2-\alpha} \right\},\\
\Omega_2 &:=& \Omega \backslash \Omega_1 = \left\{ x : f'(u) g(v) \le (v+1)^\frac{2}{\alpha} \right\},
\end{eqnarray*}
where $ \alpha $ is a positive constant and will be fixed later. First note that
\begin{equation}
\int_{\Omega_1} (f'(u) g(v))^{2-\alpha} \le \int_\Omega \frac{f'(u)^2 g(v)^2}{(v+1)^2} \le C.
\end{equation}
Similarly we have
\begin{equation}
\int_{\Omega_2} (f'(u) g(v))^p \le \int_\Omega (v+1)^\frac{2p}{\alpha}.
\end{equation}
We shall consider the case of $n>2s$, and $n\le 2s$ is straightforward as discussed in Section \ref{secpre}. Taking $ \alpha= \frac{4(n-2s)}{3n-4s}$ and using the $ L^{p}$-bound on $ v$ for $p<\frac{n}{n-2s}$ shows that $ f'(u) g(v) \in L^{p}(\Omega)$ for $p<\frac{2n}{3n-4s}$. By a symmetry argument we also have $ f(u) g'(v) \in L^{p}(\Omega)$ for $p<\frac{2n}{3n-4s}$. Therefore, $(-\Delta )^s u,(-\Delta )^s v\in L^p(\Omega)$ when $p<\frac{2n}{3n-4s}$. From elliptic estimates we conclude that $u,v\in L^p(\Omega)$ when $p<\frac{2n}{3n-8s}$ for $n>\frac{8}{3}s$ and when $p<\infty$ for $n=\frac{8}{3}s$ and when $p\le \infty$ for $n<\frac{8}{3}s$. This completes the proof when $2s\le n<\frac{8}{3}s$.
\\
Now, set $ \alpha= \frac{3n-8s}{2(n-2s)}$. From the above estimate $u,v\in L^p(\Omega)$ when $p<\frac{2n}{3n-8s}$ for $n>\frac{8}{3}s$ on $v$ and domain decomposition arguments, we get $ f(u) g'(v), f'(u) g(v)\in L^{p}(\Omega)$ for $p<\frac{n}{2(n-2s)}$. Therefore, $(-\Delta )^s u,(-\Delta )^s v\in L^p(\Omega)$ with the latter bounds for $p$. From elliptic estimates we get
$u,v\in L^p(\Omega)$ when $p<\frac{n}{2(n-3s)}$ for $n>3s$ and when $p<\infty$ for $n=3s$ and when $p\le \infty$ for $n<3s$. We perform the above arguments once more to arrive at
$u,v\in L^p(\Omega)$ when $p<\frac{2n}{5n-16s}$ for $n>\frac{16}{5}s$ and when $p<\infty$ for $n=\frac{16}{5}s$ and when $p\le \infty$ for $n<\frac{16}{5}s$. This completes the proof for
$\frac{8}{3}s\le n < \frac{16}{5}s$ containing $n=3s$.
\\
Now, suppose that $u,v\in L^p(\Omega)$ for $p<p_*$. Then, notice that
\begin{equation*}
\int_{\Omega_2} (f'(u) g(v))^p \le \int_\Omega (v+1)^\frac{2p}{\alpha} \le C \ \ \text{when} \ \ p<\frac{\alpha p_*}{2}.
\end{equation*}
Set $ \alpha= \frac{4}{p_*+2}$. Then, from the above we conclude that $ f'(u) g(v) \in L^{p}(\Omega)$ for $p<\frac{2p_*}{p_*+2}$ and similarly $ f(u) g'(v) \in L^{p}(\Omega)$ for $p<\frac{2p_*}{p_*+2}$. Applying the fact that $(-\Delta)^s u,(-\Delta)^s v\in L^p(\Omega)$ for the same range of $p$. From elliptic estimates we conclude that
\begin{equation*}
u,v\in L^{p}(\Omega) \ \ \text{when} \ \ p<\frac{2p_*n}{p_*( n-4s) +2n} .
\end{equation*}
Applying the above elliptic estimates arguments, we conclude the boundedness of solutions when $n<4s$.
| 4,085 | 31,238 |
en
|
train
|
0.95.10
|
$\Box$
We end this section with power polynomial nonlinearities for the gradient system $ (H)_{\lambda^*,\gamma^*}$ and we provide regularity of the extremal solution. For the case of local systems, that is when $s=1$, a similar result is given in \cite{cf}. Due to the technicality of the proof we omit it here.
\begin{thm} \label{grade} Let $ f(u)=(1+u)^p $ and $ g(v)=(1+v)^q$ when $ p,q>2$. Assume that $(\lambda^*,\gamma^*) \in \Upsilon$. Then, the associated extremal solution of $ (H)_{\lambda^*,\gamma^*}$ is bounded provided
\begin{equation} \label{gradp}
n < 2s + \frac{4s}{p+q-2} \max\{ T(p-1), T(q-1)\} ,
\end{equation}
when $ T(t):= t+ \sqrt{t(t-1)}$.
\end{thm}
\vspace*{.4 cm }
\noindent {\it Acknowledgment}. The author would like to thank Professor Xavier Ros-Oton for online communications and comments in regards to Section \ref{secpre}. The author is grateful to Professor Xavier Cabr\'{e} for bringing reference \cite{sp} to his attention and for the comments in regards to Section \ref{secin}.
\end{document}
| 358 | 31,238 |
en
|
train
|
0.96.0
|
\begin{document}
\title{On the group structure of $[\Omega \mathbb S^2, \Omega Y]$}
\author{Marek Golasi\'nski}
\address{Institute of Mathematics, Casimir the Great University,
pl.\ Weyssenhoffa 11, 85-072 Bydgoszcz, Poland}
\email{[email protected]}
\author{Daciberg Gon\c calves}
\address{Dept. de Matem\'atica - IME - USP, Caixa Postal 66.281 - CEP 05314-970,
S\~ao Paulo - SP, Brasil}
\email{[email protected]}
\author{Peter Wong}
\address{Department of Mathematics, Bates College, Lewiston, ME 04240, U.S.A.}
\email{[email protected]}
\thanks{}
\begin{abstract} Let $J(X)$ denote the James
construction on a space $X$ and $J_n(X)$ be the $n$-th stage of the James filtration of $J(X)$. It is known that $[J(X),\Omega Y]\cong \lim\limits_{\leftarrow}
[J_n(X),\Omega Y]$ for any space $Y$. When $X=\mathbb S^1$, the circle, $J(\mathbb S^1)=\Omega {\mathbb S}igma \mathbb S^1=\Omega \mathbb S^2$. Furthermore,
there is a bijection between $[J(\mathbb S^1),\Omega Y]$ and the product $\prod_{i=2}^\infty \pi_i(Y)$, as sets.
In this paper, we describe the group structure of $[J_n(\mathbb S^1),\Omega Y]$ by
determining the co-multiplication structure on the suspension ${\mathbb S}igma J_n(\mathbb S^1)$.
\end{abstract}
\date {\today}
\keywords{Cohen groups, Fox torus homotopy groups, James construction, Whitehead products}
\subjclass[2010]{Primary: 55Q05, 55Q15, 55Q20; secondary: 55P35}
\maketitle
\section*{Introduction}\setcounter{section}{0}
Groups of homotopy classes of maps $[\Omega {\mathbb S}igma X, \Omega Y]$ from the loop space of the suspension ${\mathbb S}igma X$ to the loop space of $Y$ play an important role in classical homotopy theory. These groups have been used to give functorial homotopy decompositions of loop suspensions via modular representation theory and to investigate the intricate relationship between Hopf invariants and looped Whitehead products. In the special case when $X={\mathbb S}^1$ and $Y={\mathbb S}^2$, the authors in \cite{cohen,cohen-sato,cohen-wu} explored the relationship between $[\Omega {\mathbb S}^2, \Omega {\mathbb S}^2]$ and the Artin's pure braid groups via Milnor's
free group construction $F[K]$ (see \cite{M2}) for a simplicial set $K$. For $K={\mathbb S}^1$, the geometric realization of $F[{\mathbb S}^1]$ has the homotopy type of $\Omega {\mathbb S}igma {\mathbb S}^1=J({\mathbb S}^1)$, the James construction on ${\mathbb S}^1$. In general, the James construction $J(X)$ on $X$ admits a filtration $J_1(X) \subseteq J_2(X) \subseteq\cdots$ so that
$\displaystyle{[J(X),\Omega Y]\cong\lim_{\leftarrow} [J_n(X),\Omega Y]}$. The {\it Cohen groups} $[J_n(X),\Omega Y]$ and the {\it total Cohen group} $[J(X),\Omega Y]$ have been studied in \cite{wu2}
via the simplicial group $\{[X^n, \Omega Y]\}_{n\ge 1}$. In his original paper \cite{J}, James introduced $J(X)$ as a model for $\Omega{\mathbb S}igma X$, the loop
space of the suspension ${\mathbb S}igma X$ of the space $X$ and showed that ${\mathbb S}igma J(X)$ has the homotopy type of the suspension of the wedge of self smash products of $X$. This shows that $[\Omega {\mathbb S}^2, \Omega Y]=[J({\mathbb S}^1),\Omega Y]$, {\it as a set}, is in one-to-one correspondence with the direct product $\prod_{i= 2}^\infty \pi_i(Y)$ of the higher homotopy groups of $Y$. However, the group structure of $[J({\mathbb S}^1),\Omega Y]$ is far from being abelian.
In \cite{cohen-sato}, the group $[J_n({\mathbb S}^1),\Omega Y]$ is shown to be a central extension with kernel $\pi_{n+1}(Y)$ and quotient
$[J_{n-1}({\mathbb S}^1),\Omega Y]$. In \cite{ggw5}, it is shown that $\pi_{n+1}(Y)$ is in fact central in a larger group $\tauu_{n+1}(Y)$,
the Fox torus homotopy group. The proof in \cite{ggw5} relies on embedding $[J_n({\mathbb S}^1),\Omega Y]$ into $\tauu_{n+1}(Y)$. Fox \cite{fox}
introduced the torus homotopy groups such that the Whitehead products, when embedded as elements of a torus homotopy group, become commutators.
Indeed, the Fox torus homotopy group $\tauu_n(Y)$ is completely determined by the homotopy groups $\pi_i(Y)$ for $1\le i\le n$ and the Whitehead products.
Furthermore, Fox determined whether $\alpha \in \pi_{k+1}(Y), \beta\in \pi_{l+1}(Y)$, when embedded in
$\tauu_n(Y)$ commute. Following \cite{fox}, we consider a $k$-subset ${\bf a}$ and an $l$-subset ${\bf b}$ of the set of indices $\{1,2,\ldots, n\}$
for some $n\ge k+l$. The sets ${\bf a}$ and ${\bf b}$ determine two embeddings $\pi_{i+1}(Y) \to \tauu_{n+1}(Y)$ for $i=k,l$. Denote by $\alpha^{{\bf a}}$ and $\beta^{{\bf b}}$ the corresponding images of $\alpha$ and $\beta$ in $\tauu_{n+1}(Y)$.
\begin{proM}
\begin{enumerate}
\item[(1)] If ${\bf a} \cap {\bf b}=\emptyset$ then $(\alpha^{{\bf a}},\beta^{{\bf b}})=(-1)^{w+(|{\bf a}|-1)}[\alpha, \beta]^{{\bf a}\cup {\bf b}}$.\\
\item[(2)] If ${\bf a} \cap {\bf b}\ne \emptyset$ then $(\alpha^{{\bf a}},\beta^{{\bf b}})=1$.
\end{enumerate}
Here $(x,y)$ denotes the commutator $xyx^{-1}y^{-1}$ of $x$ and $y$ and $w={\mathbb S}igma_{i\in {\bf a}, j\in {\bf b}} w_{i,j}$, where $w_{i,j}=1$ if $j<i$ and $w_{i,j}=0$ otherwise.
\end{proM}
Since the groups $[J_n({\mathbb S}^1),\Omega Y]$ can be embedded as subgroups of the Fox torus homotopy groups $\tauu_{n+1}(Y)$, the group structure of
$[J_n({\mathbb S}^1),\Omega Y]$ is induced by that of $\tauu_{n+1}(Y)$. In this paper, we obtain the group structure of $[J_n(\mathbb S^1),\Omega Y]$ via the torus homotopy group studied in \cite{ggw4} together with a recent result by Arkowitz and Lee \cite{AL2} on the co-$H$-structures of a wedge of spheres. The following is our main theorem.
\begin{thmM} The suspension co-$H$-structure $$\overline{\mu}_n : {\mathbb S}igma J_n(\mathbb S^1)
\to {\mathbb S}igma J_n(\mathbb S^1) \vee {\mathbb S}igma J_n(\mathbb S^1)$$ for $n\ge 1$ is given by $\overline{\mu}_n k_i\simeq\iota_1k_i+\iota_2k_i+P_i$,
where the perturbation $P_i\simeq\sum_{l=0}^i\varphii(i-l,i-1)P_{l,i}$ with the Fox function $\varphii$ and
$$P_{l,i}: \mathbb S^{i+1}\to \mathbb S^{i-l+1}\vee \mathbb S^{l+1}\stackrel{k_{i-l}\vee k_l}{\hookrightarrow}{\mathbb S}igma J_n(\mathbb S^1)\vee {\mathbb S}igma J_n(\mathbb S^1)$$
determined by the Whitehead product map $\mathbb S^{i+1}\to \mathbb S^{i-l+1}\vee \mathbb S^{l+1}$ for $i=0,\ldots,n$ and $l=0,\ldots,i$.
\end{thmM}
Here the perturbations $P_i$ are used in \cite{AL2} to measure the deviation of the co-$H$-structure of a space of homotopy type of a wedge of spheres from the usual coproduct of the suspension co-$H$-structures of spheres. The function $\varphii$ derived from Proposition \ref{gen-Fox-W-product} is used to determine the coefficients of Whitehead products.
This paper is organized as follows. In Section 1, we recall the Fox torus homotopy groups $\tauu_{n+1}(Y)$ and give examples of $[J_n({\mathbb S}^1),\Omega Y]$,
where the group structure can be obtained via $\tauu_{n+1}(Y)$. In Section $2$, we obtain a closed form solution to a recurrence relation on the function
$\varphii$ which gives the coefficient of the Whitehead product in Proposition \ref{gen-Fox-W-product}. Section $3$ combines the result of \cite{AL2}
and the result of Section $2$ to obtain the co-$H$-structure of ${\mathbb S}igma J_n({\mathbb S}^1)$. In Section 4, we revisit and generalize the examples of Section $1$.
In particular, we give necessary and sufficient conditions (see Proposition \ref{3_stem}) for $[J_{4n+1}({\mathbb S}^1),\Omega {\mathbb S}^{2n}]$ to be abelian when $n$
is not a power of $2$. We also investigate when two torsion elements in $[J({\mathbb S}^1),\Omega Y]$ commute. To end
this introduction, we want to point out that the Fox torus homotopy groups $\tauu_{n+1}({\rm Conf}(n))$ of the
configuration space of $n$ distinct points in $\mathbb R^3$ were used by F.\ Cohen et al.\ \cite{cohen-et-al} to
give an alternative proof of a result of U.\ Koschorke, which is related to Milnor's link homotopy \cite{M} and
homotopy string links examined by Habegger-Lin \cite{H-L}, that the $\kappa$-invariant for the Brunnian links
in $\mathbb R^3$ is injective.
We thank the anonymous referee for his/her careful reading of the earlier version of the manuscript as well as helpful comments that lead to a better exposition of the paper.
| 2,832 | 29,488 |
en
|
train
|
0.96.1
|
\section{Fox torus homotopy groups}
In this section, we make some calculation on $[J_n({\mathbb S}^1),\Omega Y]$ using the Fox torus homotopy groups. First, we recall from \cite{fox} the definition of the $n$-th Fox torus homotopy group of
a connected pointed space $Y$, for $n\ge 1$. Let $y_0$ be a basepoint of $Y$, then
$$
\tauu_n(Y)\cong \tauu_n(Y,y_0)=\pi_1(Y^{{\mathbb T}^{n-1}},\overline{y_0}),
$$
where $Y^{{\mathbb T}^{n-1}}$ denotes the space of unbased maps from the $(n-1)$-torus ${\mathbb T}^{n-1}$ to $Y$ and
$\overline{y_0}$ is the constant map at $y_0$. When $n=1$, $\tauu_1(Y)=\pi_1(Y)$.
To re-interpret Fox's result, we showed in \cite{ggw1} that
$$
\tauu_n(Y)\cong [F_n({\mathbb S}^1),Y]
$$
the group of homotopy classes of basepoint preserving maps from the
reduced suspension $F_n({\mathbb S}^1):={\mathbb S}igma ({\mathbb T}^{n-1}\sqcup *)$ of ${\mathbb T}^{n-1}$ adjoined with a distinguished point
to $Y$.
\addtocounter{theorem}{1}
One of the main results of \cite{fox} is the following split exact sequence:
\begin{equation}\label{fox-split}
0\to \prod_{i=2}^n \pi_i(Y)^{\sigma_i} \to \tauu_n(Y) \stackrel{\dashleftarrow}{\to} \tauu_{n-1}(Y) \to 0,
\end{equation}
where $\sigma_i=\binom{n-2}{i-2}$, the binomial coefficient.
With the isomorphism $\tauu_{n-1}(\Omega Y)\cong \prod_{i=2}^n \pi_i(Y)^{\sigma_i}$ shown in \cite[Theorem 1.1]{ggw1},
the sequence \eqref{fox-split} becomes
\begin{equation}\label{general-fox-split}
0\to \tauu_{n-1}(\Omega Y) \to \tauu_n(Y) \stackrel{\dashleftarrow}{\to} \tauu_{n-1}(Y) \to 0.
\end{equation}
Here, the projection $\tauu_n(Y) \to \tauu_{n-1}(Y)$ is induced by the suspension of the inclusion
${\mathbb T}^{n-2}\sqcup * \hookrightarrow {\mathbb T}^{n-1}\sqcup *$ given
by $(t_1,\ldots,t_{n-2})\mapsto (1,t_1,\ldots,t_{n-2})$ and the
section $\tauu_{n-1}(Y) \to \tauu_n(Y)$ is the homomorphism induced by the suspension of the
projection ${\mathbb T}^{n-1} \sqcup * \to {\mathbb T}^{n-2}\sqcup *$ given by $(t_1,\ldots,t_{n-1}) \mapsto (t_2,\ldots,t_{n-1})$. This
splitting (section) gives the semi-direct product structure so that the action
$$\bullet : \tauu_{n-1}(Y)\times \tauu_{n-1}(\Omega Y)\longrightarrow\tauu_{n-1}(\Omega Y)$$
of the quotient $\tauu_{n-1}(Y)$ on the kernel $\tauu_{n-1}(\Omega Y)$ is simply conjugation
in $\tauu_n(Y)$ by the image of $\tauu_{n-1}(Y)$ under the section. It follows from the work
of Fox (in particular Proposition \ref{gen-Fox-W-product}) that the action is determined by the
Whitehead products. More precisely, given $\alpha \in \pi_i(Y), \beta \in \pi_j(Y)$, let
$\hat \alpha$ and $\hat \beta$ be the respective images of $\alpha$ in $\tauu_{n-1}(Y)$ and
of $\beta$ in $\tauu_{n-1}(\Omega Y)$. Then
$$\hat \alpha \bullet \hat \beta =\widehat {[\alpha,\beta]} \hat \beta,$$
where $\widehat{[\alpha,\beta]}$ denotes the image in $\tauu_n(Y)$ of the Whitehead product $[\alpha,\beta]$.
Note that $F_n({\mathbb S}^1)\simeq ({\mathbb S}igma {\mathbb T}^{n-1}) \vee {\mathbb S}^1$ but
$[F_n({\mathbb S}^1),Y]\cong [{\mathbb S}igma {\mathbb T}^{n-1},Y] \rtimes \pi_1(Y)$.
\begin{example}\label{ex1}
The group $[J_2(\mathbb S^1),\Omega \mathbb S^2]$ is abelian (in fact isomorphic to $\mathbb Z \oplus \mathbb Z$, see Example \ref{ex1-revisit}). We now examine the multiplication in $[J_2(\mathbb S^1),\Omega \mathbb S^2]$ by embedding this inside the group $\tauu_3(\mathbb S^2)$. First, we note that $\tauu_3(\mathbb S^2) \cong (\pi_3(\mathbb S^2) \oplus \pi_2(\mathbb S^2)_{\{1\}}) \rtimes \pi_2(\mathbb S^2)_{\{2\}}$. Here we follow the work of \cite{fox} by using the indexing set $\{1,2\}$ for the two copies of $\pi_2(\mathbb S^2)$ in $\tauu_3(\mathbb S^2)$. Let $\alpha, \beta \in \pi_2(\mathbb S^2)$ and we write $\alpha^{\{i\}}, \beta^{\{i\}} \in \pi_2(\mathbb S^2)_{\{i\}}$ for $i=1,2$. Using the semi-direct product structure and the representation of elements of $[J_2(\mathbb S^1),\Omega \mathbb S^2]$ in $\tauu_3(\mathbb S^2)$, the product of two elements of $[J_2(\mathbb S^1),\Omega \mathbb S^2]$ is given by
\begin{equation*}
\begin{aligned}
&(\alpha^{\{1\}},\alpha^{\{2\}})\cdot (\beta^{\{1\}}, \beta^{\{2\}})\\
=&(\alpha^{\{1\}}+(\alpha^{\{2\}}\bullet \beta^{\{1\}}), \alpha^{\{2\}}+\beta^{\{2\}})\\
=&(\alpha^{\{1\}}+\beta^{\{1\}}+(-1)^{w+(m-1)}[\alpha, \beta]^{\{1,2\}}, \alpha^{\{2\}}+\beta^{\{2\}}) \quad \text{(here $m=1,w=1$)} \\
=&(\alpha^{\{1\}}+\beta^{\{1\}}+(-1)[\alpha, \beta]^{\{1,2\}}, \alpha^{\{2\}}+\beta^{\{2\}}).
\end{aligned}
\end{equation*}
Conversely,
\begin{equation*}
\begin{aligned}
&(\beta^{\{1\}}, \beta^{\{2\}})\cdot (\alpha^{\{1\}},\alpha^{\{2\}})\\
=&(\beta^{\{1\}}+\alpha^{\{1\}}+(-1)^{w+(m-1)}[\beta,\alpha]^{\{1,2\}}, \beta^{\{2\}}+\alpha^{\{2\}}) \quad \text{here $m=1$} \\
=&(\alpha^{\{1\}}+\beta^{\{1\}}+(-1)[\beta,\alpha]^{\{1,2\}}, \alpha^{\{2\}}+\beta^{\{2\}}).
\end{aligned}
\end{equation*}
Since both $\alpha$ and $\beta$ have dimension $2$, the Whitehead product $[\alpha,\beta]$ coincides with $[\beta, \alpha]$.
Thus the above calculation shows that $[J_2(\mathbb S^1),\Omega \mathbb S^2]$ is abelian.
\end{example}
However, the groups $[J_n(X),\Omega Y]$ are non-abelian in general.
\begin{example}\label{ex2}
The group $[J_3(\mathbb S^1), \Omega Y]$ is non-abelian for $Y=\mathbb S^2 \vee \mathbb S^3$. First, the Fox homotopy group $\tauu_4(Y)\cong \tauu_3(\Omega Y) \rtimes \tauu_3(Y)$. Since $Y$ is 1-connected, using the Fox short split exact sequence \eqref{general-fox-split},
we obtain
\begin{equation}
\begin{aligned}
\tauu_4(Y) &\cong \left( \pi_4(Y) \oplus 2\pi_3(Y) \oplus \pi_2(Y)\right)\rtimes \left((\pi_3(Y)\oplus \pi_2(Y))\rtimes \pi_2(Y)\right).
\end{aligned}
\end{equation}
Let $\iota_1:\mathbb S^2 \hookrightarrow \mathbb S^2 \vee \mathbb S^3$ and $\iota_2:\mathbb S^3 \hookrightarrow \mathbb S^2 \vee \mathbb S^3$ denote the canonical inclusions. The (basic) Whitehead product $[\iota_1,\iota_2]$ is a non-trivial element in $\pi_4(\mathbb S^2 \vee \mathbb S^3)$. When regarded as an element of the group $\tauu_4(Y)$, the image of $[\iota_1,\iota_2]$ in $\tauu_4(Y)$ is the commutator $(\hat a, \hat b)$, where
\begin{equation}
\begin{aligned}
\hat a&=\left( (1\oplus (1\oplus 1) \oplus a), ((1\oplus a),a)\right) \\
\hat b&=\left( (1\oplus (b\oplus b) \oplus 1), ((b\oplus 1),1)\right)
\end{aligned}
\end{equation}
with $a=[\iota_1]\in \pi_2(Y)$ and $b=[\iota_2]\in \pi_3(Y)$. By \cite[Theorem 2.2]{ggw5}, $\hat a, \hat b$ are
in the image of $[J_3(\mathbb S^1),\Omega Y]\hookrightarrow\tauu_4(Y)$. Since the commutator $(\hat a, \hat b)$
is non-trivial, it follows that $[J_3(\mathbb S^1),\Omega Y]$ is non-abelian.
\end{example}
Now, we analyze the central extension
\begin{equation}\label{cohen-sequence}
0 \to \pi_{n+1}(Y) \to [J_n(\mathbb S^1),\Omega Y] \to [J_{n-1}(\mathbb S^1),\Omega Y] \to 0
\end{equation}
and give an example in which this extension does not split.
The calculation below makes use of torus homotopy groups.
\begin{example}\label{ex3}
Take $Y=\mathbb S^4$. Since ${\mathbb S}^4$ is $3$-connected, we have
$[J_1(\mathbb S^1),\Omega Y]=[J_2(\mathbb S^1),\Omega Y]=0$ and
$[J_3(\mathbb S^1),\Omega \mathbb S^4]\cong \pi_4(\mathbb S^4)\cong \mathbb Z$.
For $n=4$, the sequence
\eqref{cohen-sequence} becomes
$$0\to \pi_5(\mathbb S^4) \to [J_4(\mathbb S^1),\Omega \mathbb S^4] \to [J_3(\mathbb S^1),\Omega \mathbb S^4] \to 0.
$$
Note that the corresponding Fox split exact sequence is
$$
0\to \pi_5(\mathbb S^4)\oplus 3 \pi_4(\mathbb S^4) \to \tauu_5(\mathbb S^4) \stackrel{\dashleftarrow}{\to} \tauu_4(\mathbb S^4) \to 0
$$
and $\tauu_4(\mathbb S^4)\cong \pi_4(\mathbb S^4)$. For dimensional reasons, $\tauu_5(\mathbb S^4)$ does not contain any non-trivial
Whitehead products so that $\pi_5(\mathbb S^4)\oplus 3 \pi_4(\mathbb S^4)$ is central in $\tauu_5(\mathbb S^4)$.
Thus, $\tauu_5(\mathbb S^4)\cong \pi_5(\mathbb S^4)\oplus 4 \pi_4(\mathbb S^4)$ is abelian. It follows
that $$[J_4(\mathbb S^1),\Omega \mathbb S^4]\cong \pi_5({\mathbb S}^ 4)\oplus\pi_4({\mathbb S}^4)$$ is abelian.
For $n=5$, the corresponding Fox split exact sequence is
$$
0\to \pi_6(\mathbb S^4)\oplus 4 \pi_5(\mathbb S^4) \oplus 6 \pi_4(\mathbb S^4) \to \tauu_6(\mathbb S^4) \stackrel{\dashleftarrow}{\to} \tauu_5(\mathbb S^4) \to 0.$$
Again, for dimensional reasons, $\tauu_6(\mathbb S^4)$ contains no non-trivial Whitehead products so that
$$[J_5({\mathbb S}^1),\Omega {\mathbb S}^4]\cong \pi_6({\mathbb S}^4) \oplus \pi_5({\mathbb S}^4) \oplus \pi_4({\mathbb S}^4).$$ It follows that $[J_5(\mathbb S^1),\Omega\mathbb S^4]$ is
abelian and the sequence \eqref{cohen-sequence} splits for $n=5$. To see this, we note that $[J_5(\mathbb S^1),\Omega\mathbb S^4]$ is of rank $1$ and
so it is either $\mathbb Z \oplus \mathbb Z_2 \oplus \mathbb Z_2$ or $\mathbb Z \oplus \mathbb Z_4$, where $\mathbb{Z}_n$ is the
cyclic group of order $n$. Since $\tauu_6(\mathbb S^4)$ has no elements of order $4$ so we have $[J_5(\mathbb S^1),\Omega\mathbb S^4] \cong \mathbb Z \oplus \mathbb Z_2 \oplus \mathbb Z_2$.
| 3,525 | 29,488 |
en
|
train
|
0.96.2
|
Now, we analyze the central extension
\begin{equation}\label{cohen-sequence}
0 \to \pi_{n+1}(Y) \to [J_n(\mathbb S^1),\Omega Y] \to [J_{n-1}(\mathbb S^1),\Omega Y] \to 0
\end{equation}
and give an example in which this extension does not split.
The calculation below makes use of torus homotopy groups.
\begin{example}\label{ex3}
Take $Y=\mathbb S^4$. Since ${\mathbb S}^4$ is $3$-connected, we have
$[J_1(\mathbb S^1),\Omega Y]=[J_2(\mathbb S^1),\Omega Y]=0$ and
$[J_3(\mathbb S^1),\Omega \mathbb S^4]\cong \pi_4(\mathbb S^4)\cong \mathbb Z$.
For $n=4$, the sequence
\eqref{cohen-sequence} becomes
$$0\to \pi_5(\mathbb S^4) \to [J_4(\mathbb S^1),\Omega \mathbb S^4] \to [J_3(\mathbb S^1),\Omega \mathbb S^4] \to 0.
$$
Note that the corresponding Fox split exact sequence is
$$
0\to \pi_5(\mathbb S^4)\oplus 3 \pi_4(\mathbb S^4) \to \tauu_5(\mathbb S^4) \stackrel{\dashleftarrow}{\to} \tauu_4(\mathbb S^4) \to 0
$$
and $\tauu_4(\mathbb S^4)\cong \pi_4(\mathbb S^4)$. For dimensional reasons, $\tauu_5(\mathbb S^4)$ does not contain any non-trivial
Whitehead products so that $\pi_5(\mathbb S^4)\oplus 3 \pi_4(\mathbb S^4)$ is central in $\tauu_5(\mathbb S^4)$.
Thus, $\tauu_5(\mathbb S^4)\cong \pi_5(\mathbb S^4)\oplus 4 \pi_4(\mathbb S^4)$ is abelian. It follows
that $$[J_4(\mathbb S^1),\Omega \mathbb S^4]\cong \pi_5({\mathbb S}^ 4)\oplus\pi_4({\mathbb S}^4)$$ is abelian.
For $n=5$, the corresponding Fox split exact sequence is
$$
0\to \pi_6(\mathbb S^4)\oplus 4 \pi_5(\mathbb S^4) \oplus 6 \pi_4(\mathbb S^4) \to \tauu_6(\mathbb S^4) \stackrel{\dashleftarrow}{\to} \tauu_5(\mathbb S^4) \to 0.$$
Again, for dimensional reasons, $\tauu_6(\mathbb S^4)$ contains no non-trivial Whitehead products so that
$$[J_5({\mathbb S}^1),\Omega {\mathbb S}^4]\cong \pi_6({\mathbb S}^4) \oplus \pi_5({\mathbb S}^4) \oplus \pi_4({\mathbb S}^4).$$ It follows that $[J_5(\mathbb S^1),\Omega\mathbb S^4]$ is
abelian and the sequence \eqref{cohen-sequence} splits for $n=5$. To see this, we note that $[J_5(\mathbb S^1),\Omega\mathbb S^4]$ is of rank $1$ and
so it is either $\mathbb Z \oplus \mathbb Z_2 \oplus \mathbb Z_2$ or $\mathbb Z \oplus \mathbb Z_4$, where $\mathbb{Z}_n$ is the
cyclic group of order $n$. Since $\tauu_6(\mathbb S^4)$ has no elements of order $4$ so we have $[J_5(\mathbb S^1),\Omega\mathbb S^4] \cong \mathbb Z \oplus \mathbb Z_2 \oplus \mathbb Z_2$.
When $n=6$, the sequence \eqref{cohen-sequence} becomes
$$
0\to \pi_7(\mathbb S^4) \to [J_6(\mathbb S^1),\Omega \mathbb S^4] \to [J_5(\mathbb S^1),\Omega \mathbb S^4] \to 0.
$$
By projecting $[J_6(\mathbb S^1),\Omega \mathbb S^4]$ onto $[J_3(\mathbb S^1),\Omega \mathbb S^4]$, the above sequence
gives rise to the following exact sequence
\begin{equation}\label{alt-exact}
0\to \mathcal W \to [J_6(\mathbb S^1),\Omega \mathbb S^4] \to [J_3(\mathbb S^1),\Omega \mathbb S^4]\cong \pi_4(\mathbb S^4)\cong \mathbb Z \to 0
\end{equation}
so that $[J_6(\mathbb S^1),\Omega \mathbb S^4] \cong \mathcal W \rtimes \pi_4(\mathbb S^4)$. Similarly, the corresponding Fox
split exact sequence can be written as
$$
0\to \widehat {\mathcal W} \to \tauu_7({\mathbb S}^4) \to \tauu_4(\mathbb S^4)=\pi_4(\mathbb S^4) \to 0.
$$
Here, $\mathcal W$ is generated by elements of $\pi_i(\mathbb S^4)$ for $i=5,6,7$ while $\widehat {\mathcal W}$ is generated by elements of $\pi_i(\mathbb S^4)$ for $i=4,5,6,7$. It follows
that the action of $\pi_4(\mathbb S^4)$ on $\mathcal W$ is the same as the action of $\pi_4(\mathbb S^4)=\tauu_4(\mathbb S^4)$ on
$\widehat {\mathcal W}$ and is determined by the Whitehead products. For dimensional reasons, if $x\in \mathcal W$, the Whitehead
product of $x$ with any element in $\pi_4(\mathbb S^4)$ will be zero in $\tauu_7(\mathbb S^4)$ and hence $\pi_4(\mathbb S^4)$
acts trivially on $\mathcal W$. Thus,
$$
[J_6(\mathbb S^1),\Omega \mathbb S^4] \cong \mathcal W \times \pi_4(\mathbb S^4)\cong \bigoplus_{i=4}^7 \pi_i(\mathbb S^4)
$$
while $\tauu_7(\mathbb S^4)$ is non-abelian. Moreover, the central extension
$$
0\to \pi_7(\mathbb S^4) \to [J_6(\mathbb S^1),\Omega \mathbb S^4] \to [J_5(\mathbb S^1),\Omega \mathbb S^4] \to 0
$$
splits because $\mathcal W\subset \pi_5(\mathbb S^4) \oplus \pi_6(\mathbb S^4) \subset \pi_4(\mathbb S^4) \oplus \pi_5(\mathbb S^4) \oplus \pi_6(\mathbb S^4) \cong [J_5(\mathbb S^1),\Omega \mathbb S^4]$.
Next, consider the case when $n=7$. Then, the sequence \eqref{cohen-sequence} becomes
$$
0\to \pi_8(\mathbb S^4) \to [J_7(\mathbb S^1),\Omega \mathbb S^4] \to [J_6(\mathbb S^1),\Omega \mathbb S^4] \to 0.
$$
The corresponding Fox split exact sequence is
$$
0\to \pi_8({\mathbb S}^4) \oplus 6 \pi_7({\mathbb S}^4) \oplus 15 \pi_6({\mathbb S}^4)\oplus
20 \pi_5({\mathbb S}^4) \oplus 15 \pi_4({\mathbb S}^4) \to \tauu_8(\mathbb S^4) \stackrel{\dashleftarrow}{\to} \tauu_7(\mathbb S^4) \to 0.
$$
Again, for dimensional reasons, the only non-trivial Whitehead products lie in $\pi_8({\mathbb S}^4)$ between the elements in $\pi_4({\mathbb S}^4)$ and
$\pi_5({\mathbb S}^4)$. Let $\iota_1, \iota_2$ be the generators of the cyclic groups $\pi_4(\mathbb S^4)\cong \mathbb Z$ and
$\pi_5(\mathbb S^4)\cong \mathbb Z_2$, respectively. Since there are $15$ copies of $\pi_5(\mathbb S^4)$ and $20$ copies of $\pi_4({\mathbb S}^4)$ in
$\tauu_7({\mathbb S}^4)$, there are a total of $35$ copies of $\pi_4({\mathbb S}^4)$ and $35$ copies of $\pi_5({\mathbb S}^4)$ in $\tauu_8({\mathbb S}^4)$. According to \cite{fox},
the Whitehead products are determined by embedding $\pi_n({\mathbb S}^4)$ in $\tauu_r({\mathbb S}^4)$ using $\binom{r-1}{n-1}$ embeddings. With $r=8$ and
$n=4,5$, there are $(35)^2$ possible pairings $(\iota_1',\iota_2')$, where $\iota_i'$ corresponds to the image of $\iota_i$
under one of $35$ embeddings.
\par Now, by the result of \cite{fox}, once an embedding for $\pi_4({\mathbb S}^4)$ is chosen, there is a unique
embedding of $\pi_5({\mathbb S}^4)$ so that non-trivial Whitehead products can be formed. Thus, there are exactly $35$ such products (commutators)
each of which is the generator of $\pi_8({\mathbb S}^4)\cong \mathbb Z_2$. The product of these $35$ commutators
is non-trivial since $35\not\equiv\, 0\,(\bmod\, 2)$. If $\tilde \iota_i$ is a preimage of $\iota_i$ in
$[J_7(\mathbb S^1), \Omega \mathbb S^4]$ for $i=1,2$, the commutator $[\tilde \iota_1, \tilde \iota_2]$
is independent of choice of the preimages since $\pi_8(\mathbb S^4)$ is central in $[J_7(\mathbb S^1), \Omega \mathbb S^4]$.
This commutator is non-trivial in $[J_7(\mathbb S^1), \Omega \mathbb S^4]$ and this shows that the projection
$[J_7(\mathbb S^1), \Omega \mathbb S^4] \to [J_6(\mathbb S^1), \Omega \mathbb S^4]$ cannot have a section.
\par We point out that such non-trivial Whitehead products will persist and induce non-trivial commutators in
$[J_k(\mathbb S^1), \Omega \mathbb S^4]$ for any $k>7$. Thus, we conclude that $[J_k(\mathbb S^1), \Omega \mathbb S^4]$ is non-abelian for any $k\ge7$.
\end{example}
| 2,645 | 29,488 |
en
|
train
|
0.96.3
|
\section{The Fox number and the function $\varphii$}
The Fox torus homotopy group $\tauu_n(Y)$ is completely determined by the homotopy groups $\pi_i(Y)$ for $1\le i\le n$ and the Whitehead products.
Furthermore, Fox determined whether $\alpha \in \pi_{k+1}(Y), \beta\in \pi_{l+1}(Y)$, when embedded in
$\tauu_n(Y)$ commute. Following \cite{fox}, we consider a $k$-subset ${\bf a}$ and an $l$-subset ${\bf b}$ of the set of indices $\{1,2,\ldots, n\}$
for some $n\ge k+l$. The sets ${\bf a}$ and ${\bf b}$ determine two embeddings $\pi_{i+1}(Y) \to \tauu_{n+1}(Y)$ for $i=k,l$. Denote by $\alpha^{{\bf a}}$ and $\beta^{{\bf b}}$ the corresponding images of $\alpha$ and $\beta$ in $\tauu_{n+1}(Y)$.
\begin{proposition}\label{gen-Fox-W-product}{\em
\begin{enumerate}
\item[(1)] If ${\bf a} \cap {\bf b}=\emptyset$ then $(\alpha^{{\bf a}},\beta^{{\bf b}})=(-1)^{w+(|{\bf a}|-1)}[\alpha, \beta]^{{\bf a}\cup {\bf b}}$.\\
\item[(2)] If ${\bf a} \cap {\bf b}\ne \emptyset$ then $(\alpha^{{\bf a}},\beta^{{\bf b}})=1$.
\end{enumerate}
Here $(x,y)$ denotes the commutator $xyx^{-1}y^{-1}$ of $x$ and $y$ and $w={\mathbb S}igma_{i\in {\bf a}, j\in {\bf b}} w_{i,j}$, where $w_{i,j}=1$ if $j<i$ and $w_{i,j}=0$ otherwise.}
\end{proposition}
The number $(-1)^{w+(|{\bf a}|-1)}$ as in Proposition \ref{gen-Fox-W-product}, which is crucial in determining the structure of the torus homotopy groups, depends on the parameters $l$ and $k$ and we shall call this the {\it Fox number} of the partition $\{{\bf a},{\bf b}\}$.
For any integers $l,k$ with $k>l>0$, let
$$
\varphii(l,k)=\sum_{{\bf a}, |{\bf a}|=l} (-1)^{w+(|{\bf a}|-1)}.
$$
Moreover, for all $k\ge 0$, we let $\varphii(k,k)=(-1)^k$ and $\varphii(0,k)=1$. We shall call $\varphii$ the {\it Fox function}.
In order to compute $\varphii(l,k)$ in terms of a simple algebraic expression, we state our main lemma.
\begin{lemma}\label{main-lemma} The function $\varphii$ satisfies the following recurrence relations.
For $k>l>0$,
$$\varphii(l,k)=(-1)^{k-l+1}\varphii(l-1,k-1)+\varphii(l,k-1)$$
and
$$\varphii(k,k)=-\varphii(k-1,k-1).$$
\end{lemma}
\begin{proof} The formula for $l=k$ follows from the definition of $\varphii(k , k)$.
Now assume that $k>l>0$. Let $L$ denote the family of all subsets of $\{ 1,2,\ldots,k\}$
with cardinality $l$. Divide this family into two subfamilies, $L_1$ and $L_2$. A subset
belongs to $L_1$ if it contains $k$, otherwise it belongs to $L_2$. Recall that the Fox number for one partition is given by $(-1)^{l-1+w}$. Summing of the Fox numbers over all partitions $\{{\bf a},{\bf b}\}$ with ${\bf a}$ belonging to $L_2$ amounts to
computing the Fox function for the pair $(l,k-1)$ since the element
$k$ does not play a role in the calculation because $k$ is the last element of the set of $k$ elements and it does not belong to any subset of $l$ elements. Thus we conclude that the Fox function restricted to those partitions, where ${\bf a}$ belongs to $L_2$ coincides with $\varphii(l,k-1)$.
For the sum of the Fox numbers over all partitions with ${\bf a}$ belonging to $L_1$, since $k$ belongs to the subset ${\bf a}$ the number $w$ contains a summand $k-l$ independent of the subset in $L_1$ since $\sum_j w_{k,j}=\sum_{j\in {\bf b}}1=k-l$. Then the remaining part of $w$ is obtained by considering subsets of $l-1$ elements in a set of cardinality $k-1$. So this coincides with
the calculation of $\varphii(l-1,k-1)$ except that the computation for $\varphii(l-1,k-1)$ uses subsets of length
$l-1$ and the one for elements of $L_1$ uses subsets of length $l$. Thus we conclude that the Fox number restricted
to those partitions with ${\bf a}$ belonging to $L_1$ coincides with $ (-1)^{k-l+1}\varphii(l-1,k-1)$ and the result follows.
\end{proof}
Our function $\varphii$ by definition satisfies $\varphii(0,k)=1$ and certainly satisfies the equality $\varphii(0,2k+1)=\varphii(0,2k)$. It is not difficult to see from
the definition of $\varphii$ that $\varphii(1,2k)=0$ and
$\varphii(1,2k+1)=-\varphii(0,2k)=-1$.
The following three propositions give the basic properties in order to compute $\varphii$.
\begin{proposition}\label{odd-even} {\em \mbox{\em (1)} For $l$ odd and $k>l/2$ we have $$\varphii(l,2k)=0.$$
\mbox{\em (2)} For $l$ even and $k\geq l/2$ we have $$\varphii(l,2k)=\varphii(l,2k+1).$$
\mbox{\em (3)} For $l$ even and $k\geq l/2$ we have $$ \varphii(l+1,2k+1)=-\varphii(l,2k+1).$$}
\end{proposition}
\begin{proof} The proof is by induction. We say that
the inductive hypothesis holds for an integer $m$ if (1) holds for all $l\leq m$ with $l$ odd, and (2) and (3) hold for all $l\leq m$ with $l$ even.
First, we show that the inductive hypothesis holds for $m=1$. Part (2) follows from the definition of $\varphii$ where both sides of the equation are $1$.
For the parts ($1$) and ($3$) we use the following equations
\begin{equation}\label{formulaI}
\varphii(1,2k+1)=(-1)^{2k+1}\varphii(0,2k)+\varphii(1,2k)
\end{equation}
and
\begin{equation}\label{formulaII}
\varphii(1,2k)=(-1)^{2k}\varphii(0,2k-1)+\varphii(1,2k-1)
\end{equation}
obtained from Lemma \ref{main-lemma}.
\par By induction on $k$, we prove simultaneously that $\varphii(1,2k)=0$,
and $\varphii(1,2k-1)=0-1=-1$, where the latter equality is equivalent to part (3) for $l=0$.
By definition $\varphii(1,1)=-1$ and by formula \eqref{formulaII} above
$\varphii(1,2)=(-1)^{2}\varphii(0,1)+\varphii(1,1)=1-1=0$. So the result holds for $k=1$.
Suppose that $\varphii(1,2r)=0$, $\varphii(1,2r-1)=-1$, for $r\leq k$.
Next we prove that $\varphii(1,2k+2)=0$ and $\varphii(1,2k+1)=-1$. By the inductive hypothesis and equation \eqref{formulaI} it follows that
$\varphii(1,2k+1)=(-1)^{2k+1}\varphii(0,2k)+\varphii(1,2k)=-1+0=-1$. Now the inductive hypothesis and formula \eqref{formulaII} yield
$\varphii(1,2k+2)=(-1)^{2k+2}\varphii(0,2k+1)+\varphii(1,2k+1)=1-1=0$ and the result follows.
Now assume the assertions for parts (1) - (3) hold for $m=2s+1$. Then, we show that the result holds for $m=2s+3$.
The proof is similar to the arguments above. First, we have
$$\varphii(2s+2,2k+1)=\varphii(2s+1,2k)+\varphii(2s+2,2k)=\varphii(2s+2,2k)$$
where the first equality follows from Lemma \ref{main-lemma} and the second equality holds by inductive hypothesis about part (1). So part (2) follows.
For parts ($1$) and ($3$), we use the following equations
\begin{equation}\label{formulaIII}
\varphii(2s+3,2k+1)=-\varphii(2s+2,2k)+\varphii(2s+3,2k),
\end{equation}
\begin{equation}\label{formulaIV}
\varphii(2s+3,2k)=\varphii(2s+2,2k-1)+\varphii(2s+3,2k-1)
\end{equation} and
\begin{equation}\label{formulaV}
\varphii(2s+2,2k+1)=\varphii(2s+1,2k)+\varphii(2s+2,2k)
\end{equation}
obtained from Lemma \ref{main-lemma}.
\par By induction on $k$, we prove simultaneously that $\varphii(2s+3,2k)=0$,
and $\varphii(2s+3,2k-1)=-\varphii(2s+2,2k-1)$. We have $k\geq s+2$ and take
$k=s+2$.
By definition of $\varphii$, we have
$\varphii(2s+3,2s+3)=-\varphii(2s+2,2s+2)$ and by equation \eqref{formulaV}, $\varphii(2s+2,2s+3)=\varphii(2s+1,2s+2)+\varphii(2s+2,2s+2)=\varphii(2s+2,2s+2)$ where the last equality
follows from the inductive hypothesis. Therefore $\varphii(2s+3,2s+3)=-\varphii(2s+2,2s+3)$ and (3) follows. The following equation holds
$$\varphii(2s+3,2s+4)=\varphii(2s+2,2s+3)+\varphii(2s+3,2s+3)=-\varphii(2s+3,2s+3)+\varphii(2s+3,2s+3)=0,$$
where the first equality follows from equation \eqref{formulaIV}, and the second equality
follows from part (3). So the result holds for part (1).
Now, suppose that the statement holds for $k$ and
let us prove for $k+1$.
From equation \eqref{formulaIII} we have
$\varphii(2s+3,2k+1)=-\varphii(2s+2,2k)+\varphii(2s+3,2k)$. Since $\varphii(2s+3,2k)=0$ by inductive hypothesis,
and $\varphii(2s+2,2k)=\varphii(2s+2, 2k+1)$ it follows that (3) holds. It remains to show that $\varphii(2s+3,2k+2)=0$ in order for (1) to hold.
From \eqref{formulaIV} we have $\varphii(2s+3,2k+2)=\varphii(2s+2,2k+1)+\varphii(2s+3,2k+1)$, and from (3), $\varphii(2s+3,2k+1)-\varphii(2s+2,2k+1)$, so it follows that
$\varphii(2s+3,2k+2)=0$.
Therefore (1) and (3) hold for $k+1$ and this concludes the proof.
\end{proof}
\begin{proposition}\label{rec} {\em The function $\varphii$ satisfies the recursive formula
\begin{equation}\label{ggw-formula}
\varphii(2l,2k)=\varphii(2l ,2k-2)+\varphii(2l -2, 2k-2).
\end{equation}}
\end{proposition}
\begin{proof} From Lemma \ref{main-lemma}, we have
$$\varphii(2l,2k)=(-1)^{2k-2l+1}\varphii(2l-1,2k-1)+\varphii(2l,2k-1)=(-1)\varphii(2l-1,2k-1)+\varphii(2l,2k-1).$$
From Proposition \ref{odd-even}, we have
$$(-1)\varphii(2l-1,2k-1)=\varphii(2l-2,2k-2)$$
and
$$\varphii(2l,2k-1)=\varphii(2l,2k-2).$$
Hence, the result follows.
\end{proof}
| 3,577 | 29,488 |
en
|
train
|
0.96.4
|
\begin{proposition}\label{rec} {\em The function $\varphii$ satisfies the recursive formula
\begin{equation}\label{ggw-formula}
\varphii(2l,2k)=\varphii(2l ,2k-2)+\varphii(2l -2, 2k-2).
\end{equation}}
\end{proposition}
\begin{proof} From Lemma \ref{main-lemma}, we have
$$\varphii(2l,2k)=(-1)^{2k-2l+1}\varphii(2l-1,2k-1)+\varphii(2l,2k-1)=(-1)\varphii(2l-1,2k-1)+\varphii(2l,2k-1).$$
From Proposition \ref{odd-even}, we have
$$(-1)\varphii(2l-1,2k-1)=\varphii(2l-2,2k-2)$$
and
$$\varphii(2l,2k-1)=\varphii(2l,2k-2).$$
Hence, the result follows.
\end{proof}
Next, we give a simple expression for $\varphii(l,k)$ when $l$ and $k$ are even.
\begin{proposition}\label{Pascal}{\em
For any $l,k\ge 1$,
$$
\varphii(2l,2k)=-\binom{k}{l}.
$$}
\end{proposition}
\begin{proof}
Let $c(n,k)$ be a function of two integer variables such that
\begin{equation}\label{2-var}
c(n+1,k)=c(n,k)+c(n,k-1)
\end{equation}
for $n,k\ge 1$. Suppose that $c(n,0)=a_n$ and $c(1,k)=b_k$, where $\{a_n\}_{n\ge 1}$ and $\{b_k\}_{k\ge 1}$ are two arbitrary sequences. Then H. Gupta \cite{gupta} showed that
\begin{equation}\label{gupta}
c(n,k)=\sum_{r=k}^{n-1} \binom{r-1}{k-1} a_{n-r} + \sum_{r=0}^{k-1} \binom{n-1}{r} b_{k-r}.
\end{equation}
Now, if we let $c(n,k)=\varphii(2k,2n)$ then \eqref{ggw-formula} shows that \eqref{2-var} holds. Moreover, $a_n=c(n,0)=\varphii(0,2n)=-1$ for all $n\ge 0$
and $b_k=c(1,k)=\varphii(2k,2)$. It follows that $b_1=\varphii(2,2)=-1$ and $b_k=0$ for all $k\ge 2$. Now, the formula \eqref{gupta} becomes
\begin{equation*}
\varphii(2l , 2k)=\sum_{r=l}^{k-1} \binom{r-1}{l-1} a_{k-r} + \sum_{r=0}^{l-1} \binom{k-1}{r} b_{l-r}.
\end{equation*}
Since $b_k=0$ for $k\ge 2$, it follows that
\begin{equation}\label{ggw-recurrence}
\begin{aligned}
\varphii(2l , 2k)&=\sum_{r=l}^{k-1} \binom{r-1}{l-1} a_{k-r} + \binom{k-1}{l -1} b_{1} \\
&=\sum_{r=l}^{k-1} \binom{r-1}{l-1} (-1) + \binom{k-1}{l -1} (-1) \\
&=-\sum_{r=l}^{k} \binom{r-1}{l-1}.
\end{aligned}
\end{equation}
The following equality
\begin{equation}\label{pascal}
\binom{0}{k} + \binom{1}{k} + \cdots + \binom{n}{k}=\binom{n+1}{k+1}
\end{equation}
can be derived using the Pascal triangle and the general form of the binomial coefficient $\binom{n}{k}$.
Now,
\begin{equation*}
\begin{aligned}
\sum_{r=l}^{k} \binom{r-1}{l-1}&=\binom{l -1}{l -1} + \cdots +\binom{k-1}{l -1}\\
&=\left[\binom{0}{l -1} + \cdots +\binom{l -2}{l -1} + \binom{l -1}{l -1} + \cdots +\binom{k-1}{l -1}\right] - \left[\binom{0}{l -1} + \cdots +\binom{l -2}{l -1}\right] \\
&=\binom{k}{l} -\binom{l -1}{l} \quad \text{by \eqref{pascal}} \\
&=\binom{k}{l} -0 = \binom{k}{l}.
\end{aligned}
\end{equation*}
Hence, we have
\begin{equation}\label{simple-phi}
\varphii(2l, 2k)=-\binom{k}{l}
\end{equation}
and the proof is complete.
\end{proof}
Lemma \ref{main-lemma} and all propositions in this section yield its main result.
\begin{theorem}\label{main-phi}
For any integers $l, k$ with $1\le l \le k$, we have
$$
\varphii(l,k) \quad = \quad
\left\{
\aligned
& -\binom{\frac{k}{2}}{\frac{l}{2}}, \qquad & \text{if $l$ is even and $k$ is even;} \\
& 0, \qquad & \text{if $l$ is odd and $k$ is even;} \\
& \binom{\frac{k-1}{2}}{\frac{l-1}{2}}, \qquad & \text{if $l$ is odd and $k$ is odd;} \\
& -\binom{\frac{k-1}{2}}{\frac{l}{2}}, \qquad & \text{if $l$ is even and $k$ is odd.}
\endaligned
\right.
$$
\end{theorem}
| 1,623 | 29,488 |
en
|
train
|
0.96.5
|
\section{Group structure of $[J_n(\mathbb S^1),\Omega Y]$}
The group structure of $[J_n({\mathbb S}^1),\Omega Y]$ is determined by the suspension co-$H$-structure on ${\mathbb S}igma J_n({\mathbb S}^1)$.
But, in view of \cite{ggw5}, the suspension $p_n : F_{n+1}({\mathbb S}^1)\to {\mathbb S}igma J_n({\mathbb S}^1)$ of the projection map ${\mathbb T}^n\sqcup *\to J_n({\mathbb S}^1)$
leads to a monomorphism of groups $$[{\mathbb S}igma J_n({\mathbb S}^1),Y] \hookrightarrow [F_{n+1}({\mathbb S}^1),Y]=\tauu_{n+1}(Y)$$ for any pointed space $Y$.
Thus, the group structure of $[{\mathbb S}igma J_n({\mathbb S}^1),Y]$ is detrmined by the multiplication of $[F_{n+1}({\mathbb S}^1),Y]$.
\par To relate those structures, first notice that the cofibration
$${\mathbb S}^1\stackrel{j}{\to} \mathcal P_n({\mathbb S}^1):={\mathbb T}^n/{\mathbb T}^{n-1}\stackrel{q}{\to} {\mathbb S}^1\wedge {\mathbb T}^{n-1}$$
has a retraction $p : \mathcal P_n({\mathbb S}^1)\to {\mathbb S}^1$.
Hence, the map $${\mathbb S}igma p+{\mathbb S}igma q :{\mathbb S}igma \mathcal P_n({\mathbb S}^1)\stackrel{\simeq}{\longrightarrow} {\mathbb S}igma {\mathbb S}^1\vee {\mathbb S}^1\wedge {\mathbb S}igma{\mathbb T}^{n-1}$$
is a homotopy equivalence for $n\ge 1$. Because ${\mathbb S}igma(X_1\times X_2)\simeq {\mathbb S}igma X_1\vee {\mathbb S}igma X_2\vee {\mathbb S}igma(X_1\wedge X_2)$
for any pointed spaces $X_1$ and $X_2$, by an inductive argument, we derive:
\begin{equation}\label{eq1}
\begin{array}{l}
{\mathbb S}igma \mathcal P_n({\mathbb S}^1)\simeq \bigvee_{k=1}^n\binom{n-1}{k-1}{\mathbb S}^{k+1},\\
F_n({\mathbb S}^1)\simeq {\mathbb S}igma{\mathbb T}^{n-1}\vee {\mathbb S}^1 \simeq\bigvee_{k=0}^{n-1}\binom{n-1}{k}{\mathbb S}^{k+1}
\end{array}
\end{equation}
for $n\ge 1$.
Further, recall from \cite{J} that
\begin{equation}\label{eq2}
{\mathbb S}igma J_n({\mathbb S}^1)\simeq\bigvee_{k=1}^n{\mathbb S}^{k+1}
\end{equation}
for $n\ge 1$ and notice that (up to homotopy equivalences above) the suspension map $$p_n : F_{n+1}({\mathbb S}^1)\longrightarrow {\mathbb S}igma J_n({\mathbb S}^1)$$
restricts to ${p_n}|_{{\mathbb S}^1}=\ast$ and $p_n|_{{\mathbb S}^{k+1}} : {\mathbb S}^{k+1} \to {\mathbb S}igma J_n({\mathbb S}^1)$ to the inclusion map for $k=1,\ldots,n$.
\par In view of \cite[Theorem 3.1]{ggw1}, the suspension co-$H$-structure $$\hat{\mu}_n : F_n({\mathbb S}^1)\to F_n({\mathbb S}^1)\vee F_n({\mathbb S}^1)$$ on $F_n({\mathbb S}^1)$ leads to
the following split exact sequence
$$
1\to [{\mathbb S}igma {\mathcal P}_{n-1}({\mathbb S}^1),Y] \to [F_n({\mathbb S}^1),Y]\stackrel{\dashleftarrow}{\to} [F_{n-1}({\mathbb S}^1),Y] \to 1
$$
for any pointed space $Y$.
Hence, $[F_n({\mathbb S}^1),Y]\cong [{\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1),Y]\rtimes[F_{n-1}({\mathbb S}^1),Y]$ is
the semi-direct product with respect to the natural action
$$[F_{n-1}({\mathbb S}^1),Y]\times[{\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1),Y]\to[{\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1),Y].$$
In particular, for $Y=F_{n-1}({\mathbb S}^1)\vee{\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1)$, by the natural bijection
$[F_{n-1}({\mathbb S}^1),Y]\times[{\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1),Y]\cong[F_{n-1}({\mathbb S}^1)\vee{\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1),Y]$,
the identity map $\mbox{id}_Y$ is sent to the corresponding co-action
$$\alpha_{n-1} : {\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1)\to F_{n-1}({\mathbb S}^1)\vee {\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1).$$
Furthermore, the natural bijection $[{\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1),Y]\times[F_{n-1}({\mathbb S}^1),Y]\cong
[F_n({\mathbb S}^1),Y]$ for any pointed space $Y$ yields a homotopy equivalence
$$F_n({\mathbb S}^1)\simeq F_{n-1}({\mathbb S}^1)\vee{\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1).$$
\par By means of \cite[Theorem 2.2]{ggw4}, the suspension co-$H$-structure
$$\hat{\mu}_n : F_n({\mathbb S}^1)\longrightarrow F_n({\mathbb S}^1)\vee F_n({\mathbb S}^1)$$
is described inductively and determined by
$$\hat{\mu}^1_n : F_{n-1}({\mathbb S}^1)\stackrel{\hat{\mu}_{n-1}}{\to}F_{n-1}({\mathbb S}^1)\vee F_{n-1}({\mathbb S}^1)\hookrightarrow F_n({\mathbb S}^1)\vee F_n({\mathbb S}^1)$$
and a map
$$\hat{\mu}_n^2: {\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1)\to F_n({\mathbb S}^1)\vee F_n({\mathbb S}^1)$$
defined via the co-action $\alpha_{n-1} : {\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1)\to F_{n-1}({\mathbb S}^1)\vee {\mathbb S}igma \mathcal P_{n-1}({\mathbb S}^1)$.
\par Similarly to $F_{n+1}({\mathbb S}^1)$, the suspension co-$H$-structure
$$\overline{\mu}_n: {\mathbb S}igma J_n({\mathbb S}^1)\to {\mathbb S}igma J_n({\mathbb S}^1) \vee {\mathbb S}igma J_n({\mathbb S}^1)$$
is also described inductively.
Because ${\mathbb S}igma J_1({\mathbb S}^1)=F_2({\mathbb S}^1)$, the co-$H$-structure
$\overline{\mu}_1=\hat{\mu}_2 : {\mathbb S}igma J_1({\mathbb S}^1)\to {\mathbb S}igma J_1({\mathbb S}^1)\vee {\mathbb S}igma J_1({\mathbb S}^1)$.
Given the co-$H$-structure $\overline \mu_n : {\mathbb S}igma J_n({\mathbb S}^1)
\to {\mathbb S}igma J_n({\mathbb S}^1) \vee {\mathbb S}igma J_n({\mathbb S}^1)$
write ${\mathbb S}igma J_{n+1}({\mathbb S}^1) \simeq {\mathbb S}igma J_n({\mathbb S}^1) \vee {\mathbb S}^{n+2}$ and
$F_{n+2}({\mathbb S}^1) \simeq F_{n+1}({\mathbb S}^1)
\vee {\mathbb S}igma \mathcal P_{n+1}({\mathbb S}^1)$. Then, one can easily verify that the composite maps
$$\overline{\mu}_{n+1}^1 :
{\mathbb S}igma J_n({\mathbb S}^1) \stackrel{\overline
{\mu}_n}{\longrightarrow} {\mathbb S}igma J_n({\mathbb S}^1) \vee
{\mathbb S}igma J_n({\mathbb S}^1) \hookrightarrow {\mathbb S}igma J_{n+1}({\mathbb S}^1) \vee
{\mathbb S}igma J_{n+1}({\mathbb S}^1)$$
and $$\overline{\mu}_{n+1}^2 :
{\mathbb S}^{n+2} \hookrightarrow {\mathbb S}igma \mathcal P_{n+1}({\mathbb S}^1)
\stackrel{\hat{\mu}_{n+2}^2}{\longrightarrow}F_{n+2}({\mathbb S}^1)\vee F_{n+2}({\mathbb S}^1)\stackrel{p_{n+1}\vee
p_{n+1}}{\longrightarrow} {\mathbb S}igma J_{n+1}({\mathbb S}^1) \vee {\mathbb S}igma J_{n+1}({\mathbb S}^1)$$
lead to the suspension co-$H$-structure $$\overline \mu_{n+1}=\overline{\mu}_{n+1}^1\vee\overline{\mu}_{n+1}^2 : {\mathbb S}igma J_{n+1}({\mathbb S}^1)
\longrightarrow {\mathbb S}igma J_{n+1}({\mathbb S}^1) \vee {\mathbb S}igma J_{n+1}({\mathbb S}^1)$$
on the space ${\mathbb S}igma J_{n+1}({\mathbb S}^1)\simeq{\mathbb S}igma J_n({\mathbb S}^1)\vee {\mathbb S}^{n+2}$.
\par To analyze the suspension co-$H$-structure on ${\mathbb S}igma J_n(\mathbb S^1)$, we recall the recent work \cite{AL2} on co-$H$-structures
on a wedge of spheres ${\mathbb S}=\bigvee_{i=1}^t\mathbb{S}^{n_i}$. Write
$k_i :\mathbb{S}^{n_i}\hookrightarrow {\mathbb S}$ for the inclusion maps with $i=1,\ldots,t$.
Further, set $\iota_j :{\mathbb S}\hookrightarrow {\mathbb S}\vee {\mathbb S}$ for the inclusion, and
$p_j : {\mathbb S}\vee {\mathbb S}\to {\mathbb S}$ for the projection maps with $j=1,2$. In \cite{AL2}, Arkowitz and Lee have proved the following result.
\begin{proposition}\mbox{$($\cite[Lemma 3.3]{AL2}$)$}\label{n-spheres}{\em
Let $\varphi:{\mathbb S}\to {\mathbb S}\vee {\mathbb S}$ be a co-action. Then, $\varphi$ is a co-$H$-structure on ${\mathbb S}$
if and only if $\varphi k_i=\iota_1k_i+\iota_2k_i+P_i$, where
$P_i : \mathbb{S}^{n_i}\to {\mathbb S}\vee {\mathbb S}$ has the property $p_1P_i=0=p_2P_i$ for $i=1,\ldots,t$.}
\end{proposition}
| 2,837 | 29,488 |
en
|
train
|
0.96.6
|
\par Similarly to $F_{n+1}({\mathbb S}^1)$, the suspension co-$H$-structure
$$\overline{\mu}_n: {\mathbb S}igma J_n({\mathbb S}^1)\to {\mathbb S}igma J_n({\mathbb S}^1) \vee {\mathbb S}igma J_n({\mathbb S}^1)$$
is also described inductively.
Because ${\mathbb S}igma J_1({\mathbb S}^1)=F_2({\mathbb S}^1)$, the co-$H$-structure
$\overline{\mu}_1=\hat{\mu}_2 : {\mathbb S}igma J_1({\mathbb S}^1)\to {\mathbb S}igma J_1({\mathbb S}^1)\vee {\mathbb S}igma J_1({\mathbb S}^1)$.
Given the co-$H$-structure $\overline \mu_n : {\mathbb S}igma J_n({\mathbb S}^1)
\to {\mathbb S}igma J_n({\mathbb S}^1) \vee {\mathbb S}igma J_n({\mathbb S}^1)$
write ${\mathbb S}igma J_{n+1}({\mathbb S}^1) \simeq {\mathbb S}igma J_n({\mathbb S}^1) \vee {\mathbb S}^{n+2}$ and
$F_{n+2}({\mathbb S}^1) \simeq F_{n+1}({\mathbb S}^1)
\vee {\mathbb S}igma \mathcal P_{n+1}({\mathbb S}^1)$. Then, one can easily verify that the composite maps
$$\overline{\mu}_{n+1}^1 :
{\mathbb S}igma J_n({\mathbb S}^1) \stackrel{\overline
{\mu}_n}{\longrightarrow} {\mathbb S}igma J_n({\mathbb S}^1) \vee
{\mathbb S}igma J_n({\mathbb S}^1) \hookrightarrow {\mathbb S}igma J_{n+1}({\mathbb S}^1) \vee
{\mathbb S}igma J_{n+1}({\mathbb S}^1)$$
and $$\overline{\mu}_{n+1}^2 :
{\mathbb S}^{n+2} \hookrightarrow {\mathbb S}igma \mathcal P_{n+1}({\mathbb S}^1)
\stackrel{\hat{\mu}_{n+2}^2}{\longrightarrow}F_{n+2}({\mathbb S}^1)\vee F_{n+2}({\mathbb S}^1)\stackrel{p_{n+1}\vee
p_{n+1}}{\longrightarrow} {\mathbb S}igma J_{n+1}({\mathbb S}^1) \vee {\mathbb S}igma J_{n+1}({\mathbb S}^1)$$
lead to the suspension co-$H$-structure $$\overline \mu_{n+1}=\overline{\mu}_{n+1}^1\vee\overline{\mu}_{n+1}^2 : {\mathbb S}igma J_{n+1}({\mathbb S}^1)
\longrightarrow {\mathbb S}igma J_{n+1}({\mathbb S}^1) \vee {\mathbb S}igma J_{n+1}({\mathbb S}^1)$$
on the space ${\mathbb S}igma J_{n+1}({\mathbb S}^1)\simeq{\mathbb S}igma J_n({\mathbb S}^1)\vee {\mathbb S}^{n+2}$.
\par To analyze the suspension co-$H$-structure on ${\mathbb S}igma J_n(\mathbb S^1)$, we recall the recent work \cite{AL2} on co-$H$-structures
on a wedge of spheres ${\mathbb S}=\bigvee_{i=1}^t\mathbb{S}^{n_i}$. Write
$k_i :\mathbb{S}^{n_i}\hookrightarrow {\mathbb S}$ for the inclusion maps with $i=1,\ldots,t$.
Further, set $\iota_j :{\mathbb S}\hookrightarrow {\mathbb S}\vee {\mathbb S}$ for the inclusion, and
$p_j : {\mathbb S}\vee {\mathbb S}\to {\mathbb S}$ for the projection maps with $j=1,2$. In \cite{AL2}, Arkowitz and Lee have proved the following result.
\begin{proposition}\mbox{$($\cite[Lemma 3.3]{AL2}$)$}\label{n-spheres}{\em
Let $\varphi:{\mathbb S}\to {\mathbb S}\vee {\mathbb S}$ be a co-action. Then, $\varphi$ is a co-$H$-structure on ${\mathbb S}$
if and only if $\varphi k_i=\iota_1k_i+\iota_2k_i+P_i$, where
$P_i : \mathbb{S}^{n_i}\to {\mathbb S}\vee {\mathbb S}$ has the property $p_1P_i=0=p_2P_i$ for $i=1,\ldots,t$.}
\end{proposition}
Adapting Proposition \ref{n-spheres} to our setting, we prove the first of our main results.
\begin{theorem} \label{DMP} The suspension co-$H$-structure $$\overline{\mu}_n : {\mathbb S}igma J_n(\mathbb S^1)
\to {\mathbb S}igma J_n(\mathbb S^1) \vee {\mathbb S}igma J_n(\mathbb S^1)$$ for $n\ge 1$ is given by $\overline{\mu}_n k_i\simeq\iota_1k_i+\iota_2k_i+P_i$, where the perturbation $P_i\simeq\sum_{l=0}^i\varphii(i-l,i-1)P_{l,i}$ with the Fox function $\varphii$ and
$$P_{l,i}: \mathbb S^{i+1}\to \mathbb S^{i-l+1}\vee \mathbb S^{l+1}\stackrel{k_{i-l}\vee k_l}{\hookrightarrow}{\mathbb S}igma J_n(\mathbb S^1)\vee {\mathbb S}igma J_n(\mathbb S^1)$$
determined by the Whitehead product map $\mathbb S^{i+1}\to \mathbb S^{i-l+1}\vee \mathbb S^{l+1}$ for $i=0,\ldots,n$ and $l=0,\ldots,i$.
\end{theorem}
\begin{proof} We proceed inductively on $n\ge 1$. Since the space
$J_n(\mathbb S^1)$ is a $CW$-complex, by the Cellular Approximation Theorem,
$\overline{\mu}_n\simeq \overline{\mu}'_n : {\mathbb S}igma J_n(\mathbb S^1)
\to {\mathbb S}igma J_n(\mathbb S^1) \vee {\mathbb S}igma J_n(\mathbb S^1)$, where $\overline{\mu}'_n$ is cellular.
Hence, from now on, we may assume that $\overline{\mu}_n$ is a cellular map.
\par If $n=1$ then $P_0=0$ and, by definition, $\bar{\mu}_1k_0=\iota_1k_0+\iota_2k_0$.
\par Because $J_n(\mathbb S^1)\subseteq J_{n+1}(\mathbb S^1)$, for dimensional reasons,
we derive that the restriction $\overline{\mu}_{n+1}|_{J_n(\mathbb S^1)}=\overline{\mu}_n$.
Consequently, we must analyze the map $\overline{\mu}_{n+1} k_{n+1}: \mathbb S^{n+2}\to
{\mathbb S}igma J_{n+1}(\mathbb S^1)\vee {\mathbb S}igma J_{n+1}(\mathbb S^1)$ determined by the composition
$$\overline{\mu}_{n+1}^2 :
\mathbb S^{n+2} \hookrightarrow {\mathbb S}igma \mathcal P_{n+1}(\mathbb S^1)
\stackrel{\hat{\mu}_{n+2}^2}{\longrightarrow} F_{n+2}({\mathbb S}^1)\vee F_{n+1}({\mathbb S}^1)\stackrel{p_{n+1}\vee
p_{n+1}}{\longrightarrow} {\mathbb S}igma J_{n+1}(\mathbb S^1)\vee {\mathbb S}igma J_{n+1}(\mathbb S^1).
$$
But, in view of the decompositions (\ref{eq1}) and (\ref{eq2}), we derive that
$$p_{n+1}|_{{\mathbb S}igma \mathcal P_{n+1}(\mathbb S^1)} : {\mathbb S}igma \mathcal P_{n+1}(\mathbb S^1)\simeq \bigvee_{k=1}^{n+1}\binom{n}{k-1}\mathbb S^{k+1}
\to \bigvee _{k=1}^{n+1}\mathbb S^{k+1}$$
restricts to the inclusion map ${\mathbb S}^{k+1}\hookrightarrow \bigvee _{k=1}^{n+1}\mathbb S^{k+1}$ for $k=1,\ldots,n+1$.
\par Because $\hat{\mu}_{n+2}^2$ is defined via the co-action $\alpha_{n+1}: {\mathbb S}igma \mathcal P_{n+1}(\mathbb S^1) \to F_{n+1}(\mathbb S^1) \vee {\mathbb S}igma \mathcal P_{n+1}(\mathbb S^1)$,
granting the monomorphism $[{\mathbb S}igma J_n({\mathbb S}^1),Y] \to [F_{n+1}({\mathbb S}^1),Y]=\tauu_{n+1}(Y)$ analysed in \cite[Theorem 2.2]{ggw5}, we have
to include all possible ways of contributing the same Whitehead product and thus the sum of all those $(-1)^{w+(|{\bf a}|-1)}$
given by Proposition \ref{gen-Fox-W-product}.
This leads to $\overline{\mu}_{n+1}k_{n+1}-\iota_1k_{n+1}-\iota_2k_{n+1}\simeq P_{n+1}\simeq\sum_{i=0}^{n+1}\varphii(n+1-i,n) P_{n+1,i}$
with $P_{n+1,i}: \mathbb S^{n+2}\to \mathbb S^{n+2-i}\vee \mathbb S^{i+1}\hookrightarrow J_{n+1}(\mathbb S^1)\vee J_{n+1}(\mathbb S^1)$
determined by the Whitehead product map $\mathbb S^{n+2}\to \mathbb S^{n+2-i}\vee \mathbb S^{i+1}$
and the proof is complete.
\end{proof}
| 2,473 | 29,488 |
en
|
train
|
0.96.7
|
When $Y$ is a $W$-space, that is, all Whitehead products vanish, it has
been shown in \cite[Corollary 3.6]{ggw5} that $[J({\mathbb S}^1),\Omega Y]$ is isomorphic to the direct product of $\prod_{i= 2}^\infty\pi_i(Y)$
so that $[J({\mathbb S}^1),\Omega Y]$ is abelian, in particular. As an easy consequence of Theorem \ref{DMP}, we have
the following corollary.
\begin{corollary}\label{abelian-cohen-groups}{\em
Let $Y$ be a path connected pointed space such that the Whitehead products
$[f,g]: \mathbb S^{k+l+1}\to Y$ for $f: \mathbb S^{k+1}\to Y$
and $g : \mathbb S^{l+1}\to Y$ vanish with $k,l$ even. Then,
the groups $[J_n(\mathbb S^1),\Omega Y]$ for $n\ge 1$ and $[J(\mathbb S^1),\Omega Y]$
are abelian provided $Y$ is a path connected pointed
space with $\pi_i(Y)=0$ for $i$ odd.}
\end{corollary}
Next, we make use of the function $\varphii$ of Section 2 to determine whether $\alpha \in \pi_{n+1}(Y)$ and $\beta \in \pi_{m+1}(Y)$ commute in the group $[J(\mathbb S^1),\Omega Y]$.
\begin{theorem}\label{abelian}
Let $\alpha\in \pi_{n+1}(Y), \beta\in \pi_{m+1}(Y)$. Suppose the Whitehead product $[\alpha,\beta]\ne 0$ and has order $k$. Then:
\begin{itemize}
\item if $k=\infty$ then the product $\alpha \#\beta \in [J(\mathbb S^1),\Omega Y]$ coincides with $\beta \#\alpha$ iff both $n$ and $m$ are odd; \\
\item if $k<\infty$ then the product $\alpha \#\beta \in [J(\mathbb S^1),\Omega Y]$ coincides with $\beta \#\alpha$ iff both $n$ and $m$ are odd or
\[
k~\text{divides} \begin{cases}
\binom{\frac{n+m-1}{2}}{\frac{m}{2}} & \text{if $n$ is odd and $m$ is even;} \\
\binom{\frac{n+m-1}{2}}{\frac{n}{2}} & \text{if $n$ is even and $m$ is odd;} \\
\binom{\frac{n+m}{2}}{\frac{n}{2}} & \text{if $n$ and $m$ is both even.}
\end{cases}
\]
\end{itemize}
\end{theorem}
\begin{proof}
First of all, if there are non-trivial perturbations $P_{l,i}$ determined by the Whitehead product $[\alpha,\beta]$ then $l=m$ and $i-l=n$. In other words, $P_{l,i}=P_{m,n+m}$. It follows that $P_{n+m}=\varphii(n,n+m-1)P_{m,n+m}$. Now, we have
$$
\alpha \# \beta =\alpha + \beta + \varphii(n,n+m-1)[\alpha,\beta] \qquad \text{and} \qquad \beta \# \alpha =\alpha + \beta + \varphii(m,n+m-1)[\beta,\alpha].
$$
Since $[\beta,\alpha]=(-1)^{(n+1)(m+1)}[\alpha,\beta]$, it suffices to compare $\varphii(n,n+m-1)$ with $\varphii(m,n+m-1)(-1)^{(n+1)(m+1)}$.
Let $\Delta=|\varphii(n,n+m-1)-\varphii(m,n+m-1)(-1)^{(n+1)(m+1)}|$. Depending on the parity of $n$ and $m$, we have the following table:
\begin{center}
\begin{tabular}{|c|c|c|c|c|l|}
\hline
$n$ & $m$ & $\varphii(n,n+m-1)$ & $\varphii(m,n+m-1)(-1)^{(n+1)(m+1)}$ & $\Delta$ \\ \hline
{odd} & {odd} & $\binom{\frac{n+m-2}{2}}{\frac{n-1}{2}}$ & $\binom{\frac{n+m-2}{2}}{\frac{m-1}{2}}$ & $0$\\ \hline
{odd} & {even} & $0$ & $-\binom{\frac{n+m-1}{2}}{\frac{m}{2}}$ & $\binom{\frac{n+m-1}{2}}{\frac{m}{2}}$\\ \hline
{even} & {odd} & $-\binom{\frac{n+m-1}{2}}{\frac{n}{2}}$ & $0$ & $\binom{\frac{n+m-1}{2}}{\frac{n}{2}}$\\ \hline
{even} & {even} & $-\binom{\frac{n+m-2}{2}}{\frac{n}{2}}$ & $\binom{\frac{n+m-2}{2}}{\frac{m}{2}}$ & $\binom{\frac{n+m}{2}}{\frac{n}{2}}$\\ \hline
\end{tabular}
\end{center}
\begin{equation}\label{table}
\text{Table for }\Delta
\end{equation}
Because of $\binom{a}{b}=\binom{a}{a-b}$, the equality $\Delta=0$ holds exactly when $n$ and $m$ are both odd.
For the case when both $n$ and $m$ are even, we note
that $\binom{\frac{n+m-2}{2}}{\frac{m}{2}}=\binom{\frac{n+m-2}{2}}{\frac{n}{2}-1}$ and the Pascal triangle
asserts that
$$
\Delta = \binom{\frac{n+m-2}{2}}{\frac{n}{2}-1} + \binom{\frac{n+m-2}{2}}{\frac{n}{2}} = \binom{\frac{n+m}{2}}{\frac{n}{2}}.
$$
This completes the proof.
\end{proof}
Now, using the fact that the group structure of $[J_n({\mathbb S}^1),\Omega Y]$ is induced by the group structure of
the torus homotopy group $\tauu_{n+1}(Y)$ which in turn is determined completely by the homotopy
groups $\{\pi_i(Y)\}_{1\le i\le n+1}$ and their Whitehead products, we give explicitly the group
structure of $[J_n({\mathbb S}^1),\Omega Y]$ following Theorem \ref{DMP} and using the function $\varphii$.
Since $[J_n({\mathbb S}^1),\Omega Y]$ is in one-to-one correspondence with $\displaystyle{\prod_{i=2}^{n+1}\pi_i(Y)}$ as sets, we denote the group multiplication determined
by the co-$H$-structure on ${\mathbb S}igma J_n(\mathbb{S}^1)$ by $\#_n$, i.e.,
$$
[J_n({\mathbb S}^1),\Omega Y]\cong \left(\prod_{i=2}^{n+1} \pi_i(Y), \#_n\right).
$$
Now, we describe $\#_n$ inductively as follows. First write
$$\prod_{i=2}^{n+1} \pi_i(Y)=\left(\prod_{i=2}^{n} \pi_i(Y)\right)\times \pi_{n+1}(Y).$$ For any $\displaystyle{\alpha \in \prod_{i=2}^{n+1} \pi_i(Y)}$, write $\alpha=(\alpha_1,\alpha_2)$, where $\displaystyle{\alpha_1\in \prod_{i=2}^{n} \pi_i(Y)}$ and $\alpha_2\in \pi_{n+1}(Y)$. Moreover, $(\alpha)_k$ denotes the coordinate of $\alpha$ in $\pi_k(Y)$. For $\displaystyle{(\alpha_1,\alpha_2), (\beta_1,\beta_2)\in \left(\prod_{i=2}^{n} \pi_i(Y)\right)\times \pi_{n+1}(Y)}$,
we have
\begin{equation}\label{J_n-product}
(\alpha_1,\alpha_2)\#_n(\beta_1,\beta_2):=\left(\alpha_1\#_{n-1}\beta_1, \alpha_2+\beta_2+\sum_{k+j=n+2}\varphii(k-1,k+j-3)[(\alpha_1)_k,(\beta_1)_j]\right).
\end{equation}
Recall that the natural inclusion $j_n :J_{n-1}({\mathbb S}^1) \hookrightarrow J_n({\mathbb S}^1)$ induces a surjective homomorphism $j_n^\ast: [J_n({\mathbb S}^1),\Omega Y] \to [J_{n-1}({\mathbb S}^1),\Omega Y]$, where $\mbox{Ker}\, j_n^\ast\cong \pi_{n+1}(Y)$ is central. In other words,
$$
j_n^\ast((\alpha)_1, (\alpha)_2,\ldots,(\alpha)_n, (\alpha)_{n+1})=((\alpha)_1, (\alpha)_2,\ldots,(\alpha)_n)
$$
for $((\alpha)_1, (\alpha)_2,\ldots,(\alpha)_n, (\alpha)_{n+1})\in[{\mathbb S}igma J_n(\mathbb{S}^1),Y]$.
| 2,398 | 29,488 |
en
|
train
|
0.96.8
|
\section{Computations}
In this section, we revisit, simplify, and generalize the examples from Section 1 using the group structure of $[J_n({\mathbb S}^1),\Omega Y]$ in the previous
section together with the function $\varphii$. Furthermore, we determine whether $[J_k(\mathbb S^1),\Omega \mathbb S^{2n}]$ is abelian for certain values of $k$.
\begin{example}\label{ex1-revisit}
The multiplication in $[J_2(\mathbb S^1),\Omega \mathbb S^2]$ as in Example \ref{ex1} is given by the following rule:
$$(\alpha_1,\alpha_2) \#_2 (\beta_1,\beta_2)=(\alpha_1+\beta_1,\alpha_2+\beta_2+2\alpha_1\beta_1),
$$
where $\alpha_1,\beta_1\in \pi_2({\mathbb S}^2)\cong \mathbb Z$ and $\alpha_2,\beta_2 \in \pi_3({\mathbb S}^2)
\cong \mathbb Z$.
The perturbation $P: {\mathbb S}^3\to {\mathbb S}igma J_2({\mathbb S}^1)\vee {\mathbb S}igma J_2({\mathbb S}^1)$ in this case is
determined by the basic Whitehead product $[i_1,i_2]$, where
$i_j :{\mathbb S}^2\to{\mathbb S}^2\vee{\mathbb S}^2$ is the corresponding inclusion for $j=1,2$
which yields in the group $[J_2(\mathbb S^1),\Omega \mathbb S^2]$ the relation $[\iota_2,\iota_2]=2\etaa_2$
for the generators $\iota_2\in\pi_2({\mathbb S}^2)$ and $\etaa_2\in\pi_3({\mathbb S}^2)$
given by the Hopf map. Further, the isomorphism
$\mathbb Z\oplus \mathbb Z\cong [J_2({\mathbb S}^1),\Omega {\mathbb S}^2]$ is given
by $(m,n) \mapsto(m,m(m-1)+n)$ for $(m,n)\in\mathbb Z\oplus \mathbb Z$.
\end{example}
More generally, we can use Theorem \ref{DMP} to analyze the co-$H$-structure of ${\mathbb S}igma J_2(\mathbb S^1)$.
The perturbation $P: \mathbb S^{3}\to {\mathbb S}igma J_2(\mathbb S^1)\vee {\mathbb S}igma J_2(\mathbb S^1)$ is given by the
Whitehead product $\mathbb S^{3}\to \mathbb S^2\vee \mathbb S^2$.
Therefore, the multiplication on the set $[J_2(\mathbb S^1), \Omega Y]=\pi_2(Y) \times \pi_3(Y)$
is given by
$$(\alpha_1,\alpha_2)\#_2(\beta_1,\beta_2)=(\alpha_1+\beta_1,\alpha_2+\beta_2+[\alpha_1,\beta_1]),$$
where $\alpha_1,\beta_1\in \pi_2(Y)$, $\alpha_2,\beta_2\in \pi_3(Y)$ and
$[\alpha_1,\beta_1]$ denotes the Whitehead product.
Note that $[\beta_1,\alpha_1]=(-1)^4[\alpha_1,\beta_1]=[\alpha_1,\beta_1]$. Consequently, we generalize Example \ref{ex1} by the following proposition.
\begin{proposition}\label{J_2-abelian}{\em
The group $[J_2(\mathbb S^1),\Omega Y]$ is an abelian group.}
\end{proposition}
\begin{remark}
Since $[J_2(\mathbb S^1),\Omega Y]$ is abelian, using induction on the central extension $0\to \pi_{n+1}(Y) \to [J_n({\mathbb S}^1),\Omega Y]\to [J_{n-1}({\mathbb S}^1),\Omega Y]\to 1$ shows that the group $[J_n({\mathbb S}^1),\Omega Y]$ is nilpotent with nilpotency class $\le n-1$.
\end{remark}
\begin{proposition} {\em If $Y$ is a $(2n-1)$-connected space then there is
an isomorphism of groups $[J_{4n-3}({\mathbb S}^1),\Omega Y]\cong\pi_{2n}(Y)\oplus\pi_{2n+1}(Y)\oplus\cdots\oplus\pi_{4n-2}(Y)$,
the group $[J_{4n-2}({\mathbb S}^1),\Omega Y]$ is abelian and the short exact sequence
$$0\to\pi_{4n-1}(Y)\longrightarrow[J_{4n-2}({\mathbb S}^1),\Omega Y]\longrightarrow[J_{4n-3}({\mathbb S}^1),\Omega Y]\to 0$$
splits provided $\pi_{2n}(Y)$ is free abelian.
In particular, this holds for $Y={\mathbb S}^{2n}$.}
\end{proposition}
\begin{proof} For dimensional reasons and by the connectivity of $Y$, there are no non-trivial Whitehead products obtained from the elements in $[J_{4n-3}({\mathbb S}^1),\Omega Y]$, which in turn is isomorphic to $\pi_{2n}(Y)\oplus\pi_{2n+1}(Y)\oplus\cdots\oplus\pi_{4n-2}(Y)$. The only possibly non-trivial Whitehead products in $[J_{4n-2}({\mathbb S}^1),\Omega Y]$ come from elements $\alpha, \beta \in \pi_{2n}(Y)$. It follows from Table \eqref{table} that $\Delta =0$, that is, $[\alpha, \beta]=[\beta,\alpha]$. This implies that $[J_{4n-2}({\mathbb S}^1),\Omega Y]$ is abelian. Finally, when $\pi_{2n}(Y)$ is free abelian, one can find a section $\pi_{2n}(Y) \to [J_{4n-2}({\mathbb S}^1),\Omega Y]$ so that $\alpha \#_{4n-3} \beta \mapsto (\alpha \#_{4n-3} \beta, [\alpha,\beta])$.
Hence, this gives rise to a section $\pi_{2n}(Y)\oplus\pi_{2n+1}(Y)\oplus\cdots\oplus\pi_{4n-2}(Y) \to[J_{4n-2}({\mathbb S}^1),\Omega Y]$.
\end{proof}
| 1,560 | 29,488 |
en
|
train
|
0.96.9
|
\begin{remark}
The assumption that $\pi_{2n}(Y)$ being free abelian above is only sufficient for the splitting of the short exact sequence as we illustrate in the following examples.
\end{remark}
\par It is well-known that the $(n-2)$-suspension $M^n={\mathbb S}igma^{n-2}\mathbb R P^2$ of the projective plane $\mathbb R P^2$
is the Moore space of type $({\mathbb Z}_2,n-1)$ for $n\ge 3$ so that $M^n$ is $(n-2)$-connected.
Given the inclusion $i_2 : {\mathbb S}^1\hookrightarrow \mathbb R P^2$ and the collapsing map $p_2 : \mathbb R P^2\to \mathbb R P^2/{\mathbb S}^1={\mathbb S}^2$,
we write $i_n ={\mathbb S}igma^{n-2}i_2 : {\mathbb S}^{n-1}\to M^n$ and $p_n = {\mathbb S}igma^{n-2}p_2 : M^n\to {\mathbb S}^n$ for the $(n-2)$-suspension maps with $n\ge 2$, respectively.
\begin{example}\label{JM-ex1}
Take $Y=M^3$. In view of \cite[Proposition 3.6]{G-M}, $\pi_2(M^3)= \mathbb Z_2\langle i_3\rangle$ and $\pi_3(M^3)=\mathbb Z_4\langle i_3\circ \etaa_2\rangle$.
Then, the following short exact sequence
$$0\to\pi_{3}(M^3)\longrightarrow[J_{2}({\mathbb S}^1),\Omega M^3]\longrightarrow[J_{1}({\mathbb S}^1),\Omega M^3]\to 0$$
becomes
\begin{equation}\label{Mukai_ex1}
0\to \mathbb Z_4 \to [J_{2}({\mathbb S}^1),\Omega M^3] \to \mathbb Z_2 \to 0.
\end{equation}
By Proposition \ref{J_2-abelian}, $[J_{2}({\mathbb S}^1),\Omega M^3]$ is abelian and thus is isomorphic to either $\mathbb Z_8$ or $\mathbb Z_4 \oplus \mathbb Z_2$.
It was pointed out to us by J.\ Mukai that the Whitehead product $[i_3,i_3]\ne 0$ and its order is $2$. It is straighforward to see, based upon the group structure of $[J_{2}({\mathbb S}^1),\Omega M^3]$, that there are no elements of order $8$ in $[J_{2}({\mathbb S}^1),\Omega M^3]$.
Hence, the short exact sequence \eqref{Mukai_ex1} splits.
\end{example}
Next, we provide a similar example \footnote{The authors are grateful to Juno Mukai for providing the example below.} as above, where the sequence does not split.
\begin{example}\label{JM-ex2}
The short exact sequence
$$0\to \pi_{11}(M^7)\longrightarrow[J_{10}({\mathbb S}^1),\Omega M^7]\longrightarrow[J_9({\mathbb S}^1),\Omega M^7]\cong\pi_6(M^7)\oplus\cdots\oplus\pi_{10}(M^7)\to 0$$
is central, does not split, and the group $[J_{10}({\mathbb S}^1),\Omega M^7]$ is abelian.
To see this, we recall that $\pi_{6}(M^7)={\mathbb Z}_2\langle i_7\rangle$, and in view of \cite{MS,wu}, it follows
that $\pi_{11}(M^7)\cong {\mathbb Z}_2\langle [i_7,i_7]\rangle$. Consider the element $\alpha =(0,0,0,0,i_7,0,0,0,0)$. Then
by \eqref{J_n-product}, we get
$$(\alpha,0) \#_{10} (\alpha,0) = (2\alpha, [i_7,i_7]) =(0,[i_7,i_7]) \quad \text{since $\alpha$ has order $2$.}$$
It follows that
$$(\alpha,0) \#_{10} (\alpha,0) \#_{10} (\alpha,0) \#_{10} (\alpha,0) =(0,0) \quad \text{since the Whitehead product $[i_7,i_7]$ has order 2.}$$
This shows that the group $[J_{10}({\mathbb S}^1),\Omega M^7]$ contains a copy of the group ${\mathbb Z}_4$ determined
by the element $\alpha$.
Hence, $[J_{10}({\mathbb S}^1),\Omega M^7]$ is abelian but it is not isomorphic to the direct sum $\pi_6(M^7)\oplus\cdots\oplus\pi_{11}(M^7)$. Consequently,
the short exact sequence
$$0\longrightarrow \pi_{11}(M^7)\longrightarrow[J_{10}({\mathbb S}^1),\Omega M^7]\longrightarrow[J_9({\mathbb S}^1),\Omega M^7]\cong\pi_6(M^7)\oplus\cdots\oplus\pi_{10}(M^7)\longrightarrow 0$$
does not split.
\end{example}
In Example \ref{ex3}, the group $[J_{k}({\mathbb S}^1),\Omega {\mathbb S}^{4}]$ is non-abelian for $k\ge 7$. We now generalize this example in the following proposition.
\begin{proposition} \label{1-2_stems}{\em
\mbox{\em (1)} The group $[J_{4n-1}({\mathbb S}^1),\Omega {\mathbb S}^{2n}]$ is non-abelian if and only if
$n$ is a power of $2$. In particular, when $n$ is a power of $2$, $[J_k({\mathbb S}^1),\Omega {\mathbb S}^{2n}]$ is non-abelian for $k\ge 4n-1$.
When $n$ is not a power of $2$, $[J_k({\mathbb S}^1),\Omega {\mathbb S}^{2n}]$ is abelian for $k\leq 4n$.
\par \mbox{\em (2)} The group $[J_k({\mathbb S}^1),\Omega M^{2n}]$ is non-abelian for $k\ge 4n-1$ provided $n$ is a power of $2$.}
\end{proposition}
\begin{proof}
(1): Given a prime $p$, consider the base $p$ expansions of the integers $m$ and $n$, where we
assume $0\le m\le n$:
\noindent
$n=a_0+pa_1+\cdots+p^ka_k,$ $0\le a_i<p$ and $m=b_0+pb_1+\cdots+p^kb_k$, $0\le b_i< p$.
Then, by Lukas' Theorem (see e.g., \cite{fine}),
$${n\choose m}\equiv \prod^k_{i=0}{a_i\choose b_i}(\bmod\; p).$$
Hence, ${n\choose m}\not\equiv 0\;(\bmod\; p)$ if and only if $b_i\le a_i$ for all $0\le i\le k$.
\par In particular, for $0\le m\le n$ and $p=2$ with expansions
$n=a_0+2a_1+\cdots+2^ka_k,$ $0\le a_i<2$ and $m=b_0+2b_1+\cdots+2^kb_k$, $0\le b_i< 2$,
the number ${n\choose m}$ is odd if and only if $b_i\le a_i$ for all $0\le i\le k$.
Thus, following Table (\ref{table}), $\Delta={2n-1\choose n}$ is odd if and only if $n=2^l$ for some $l\ge 0$.
Since $[\etaa_{2n},\iota_{2n}]\ne 0$ (see \cite[p.\ 404]{GM}) and $\Delta\ne 0$, we have $(0,\ldots,0,\iota_{2n},0\ldots,0)\#_k(0,\ldots,0,\etaa_{2n},0\ldots,0)\ne(0,\etaa_{2n},0\ldots,0)\#_k(\iota_{2n},0\ldots,0)$
and consequently $[J_{4n-1}({\mathbb S}^1),\Omega {\mathbb S}^{2n}]$ is non-abelian if, and only if,
$n$ is a power of $2$. Hence, $[J_k({\mathbb S}^1),\Omega{\mathbb S}^{2n}]$ is non-abelian for $k\ge 4n-1$ when $n=2^l$ for some $l>0$.
When $n$ is not a power of $2$, we only need to consider the Whitehead product $[\iota_{2n},\etaa_{2n}^2]$ because other Whitehead products from lower
dimensions lie in the abelian group $[J_{4n-1}({\mathbb S}^1),\Omega {\mathbb S}^{2n}]$. Since both $2n-1$ and $(2n+2)-1$ are both odd, it follows from Table (\ref{table})
that $\Delta=0$, i.e., $\iota_{2n}\#_{4n}\etaa_{2n}^2=\etaa_{2n}^2\#_{4n}\iota_{2n}$. Thus, we conclude that $[J_{4n}({\mathbb S}^1),\Omega {\mathbb S}^{2n}]$ is abelian and hence so is $[J_{k}({\mathbb S}^1),\Omega {\mathbb S}^{2n}]$ for any $k\le 4n$.
(2): Let $\tilde{\etaa}_2\in \pi_4(M^3)$ be a lift of $\etaa_3\in\pi_4({\mathbb S}^3)$ satisfying $2\tilde{\etaa}_2=i_n \etaa^2_2$,
$p_3\tilde{\etaa}_2=\etaa_3$ and set $\tilde{\etaa}_n={\mathbb S}igma^{n-2}\tilde{\etaa}_2$ for $n\ge 2$.
Then $\pi_{2n+1}(M^{2n})= {\mathbb Z}_4\langle \tilde{\etaa}_{2n-1}\rangle$ and, by \cite[Lemma 3.8]{G-M}, it holds
$[i_{2n}\etaa_{2n-1},\tilde{\etaa}_{2n-1}]\ne 0$ for $n\ge 2$. Then, we can deduce as in (1)
that $[J_k({\mathbb S}^1),\Omega M^{2n}]$ is non-abelian for $k\ge 4n-1$ and $n=2^l$ for some $l>0$.
\end{proof}
Based upon the results in Proposition \ref{1-2_stems}, the next question is whether $[J_{4n+1}({\mathbb S}^1),\Omega{\mathbb S}^{2n}]$ is abelian when $n$ is not a power of $2$. Proposition \ref{1-2_stems} (1) depends on certain divisibility properties of certain types of binomial coefficients. In the next result, we answer
this question by exploring further such divisibility results concerning the Catalan numbers and thereby strengthen Proposition \ref{1-2_stems}.
Let $T^*(01)$ denote the set of natural numbers $n$ with $(n)_3=(n_i)$ and $n_i\in \{0,1\}$ for $i\ge 1$, where $(n)_3$ is the base $3$ expansion of $n$. Further, denote by $T^*(01)-1$ the set $\{n-1|n\in T^*(01)\}$.
\par Following Table (\ref{table}), we get $\Delta=\binom{2n}{n-1}$. Since $\binom{2n}{n-1}+\binom{2n}{n}=\binom{2n+1}{n}=\frac{2n+1}{n+1}\binom{2n}{n}$,
it follows that
\begin{equation}\label{catalan}
\Delta=\binom{2n}{n-1}=\binom{2n}{n}\left(\frac{2n+1}{n+1}-1\right)=\frac{n}{n+1}\binom{2n}{n}=nC_n,
\end{equation}
where $C_n$ is the $n$-th Catalan number.
\par Write $\nu_{2n}$ for a generator of $\pi_{2n+3}({\mathbb S}^{2n})$ with $n\ge 2$ and consider the Whitehead product $[\iota_{2n},\nu_{2n}]$.
Suppose $n$ is not a power of $2$. According to \cite[p.\ 405]{GM}, the order of $[\iota_{2n},\nu_{2n}]$ is $12$ if $n$ is odd or of order $24$ if $n$ is even.
\begin{proposition}\label{3_stem}{\em
Suppose $n\ne 2^{\ell}$ for any $\ell>0$. Then $[J_{4n+1}({\mathbb S}^1),\Omega {\mathbb S}^{2n}]$ is abelian if and only if,
\begin{itemize}
\item[(1)] \mbox{\em (i)} $n\ne 2^a-1$ and $n\ne 2^a+2^b-1$ for some $b>a\ge 0$;
\noindent
and
\noindent
\mbox{\em (ii)} $n\equiv 0\, (\bmod\, 3)$ or $n\notin T^*(01)-1$ when $n$ is odd;
\item[(2)] $n\equiv 0\, (\bmod \,3)$ or $n\notin T^*(01)-1$ when $n$ is even.
\end{itemize}}
\end{proposition}
\begin{proof}
When $n$ is odd, $\Delta=nC_n$ is divisible by $12$ if and only if, $C_n$ is divisible by $4$ and either $n$ or $C_n$ is divisible by $3$. Similarly, when $n$ is even, $\Delta=nC_n$ is divisible by $24$ if and only if, $C_n$ is divisible by $4$ (since $n$ is even) and either $n$ or $C_n$ is divisible by $3$.
The result follows from \cite[Theorem 5.2]{ds} for the divisibility of $C_n$ by $3$ and from \cite[Theorem 2.3]{ely} for the divisibility of $C_n$ by $4$.
\end{proof}
| 3,654 | 29,488 |
en
|
train
|
0.96.10
|
Based upon the results in Proposition \ref{1-2_stems}, the next question is whether $[J_{4n+1}({\mathbb S}^1),\Omega{\mathbb S}^{2n}]$ is abelian when $n$ is not a power of $2$. Proposition \ref{1-2_stems} (1) depends on certain divisibility properties of certain types of binomial coefficients. In the next result, we answer
this question by exploring further such divisibility results concerning the Catalan numbers and thereby strengthen Proposition \ref{1-2_stems}.
Let $T^*(01)$ denote the set of natural numbers $n$ with $(n)_3=(n_i)$ and $n_i\in \{0,1\}$ for $i\ge 1$, where $(n)_3$ is the base $3$ expansion of $n$. Further, denote by $T^*(01)-1$ the set $\{n-1|n\in T^*(01)\}$.
\par Following Table (\ref{table}), we get $\Delta=\binom{2n}{n-1}$. Since $\binom{2n}{n-1}+\binom{2n}{n}=\binom{2n+1}{n}=\frac{2n+1}{n+1}\binom{2n}{n}$,
it follows that
\begin{equation}\label{catalan}
\Delta=\binom{2n}{n-1}=\binom{2n}{n}\left(\frac{2n+1}{n+1}-1\right)=\frac{n}{n+1}\binom{2n}{n}=nC_n,
\end{equation}
where $C_n$ is the $n$-th Catalan number.
\par Write $\nu_{2n}$ for a generator of $\pi_{2n+3}({\mathbb S}^{2n})$ with $n\ge 2$ and consider the Whitehead product $[\iota_{2n},\nu_{2n}]$.
Suppose $n$ is not a power of $2$. According to \cite[p.\ 405]{GM}, the order of $[\iota_{2n},\nu_{2n}]$ is $12$ if $n$ is odd or of order $24$ if $n$ is even.
\begin{proposition}\label{3_stem}{\em
Suppose $n\ne 2^{\ell}$ for any $\ell>0$. Then $[J_{4n+1}({\mathbb S}^1),\Omega {\mathbb S}^{2n}]$ is abelian if and only if,
\begin{itemize}
\item[(1)] \mbox{\em (i)} $n\ne 2^a-1$ and $n\ne 2^a+2^b-1$ for some $b>a\ge 0$;
\noindent
and
\noindent
\mbox{\em (ii)} $n\equiv 0\, (\bmod\, 3)$ or $n\notin T^*(01)-1$ when $n$ is odd;
\item[(2)] $n\equiv 0\, (\bmod \,3)$ or $n\notin T^*(01)-1$ when $n$ is even.
\end{itemize}}
\end{proposition}
\begin{proof}
When $n$ is odd, $\Delta=nC_n$ is divisible by $12$ if and only if, $C_n$ is divisible by $4$ and either $n$ or $C_n$ is divisible by $3$. Similarly, when $n$ is even, $\Delta=nC_n$ is divisible by $24$ if and only if, $C_n$ is divisible by $4$ (since $n$ is even) and either $n$ or $C_n$ is divisible by $3$.
The result follows from \cite[Theorem 5.2]{ds} for the divisibility of $C_n$ by $3$ and from \cite[Theorem 2.3]{ely} for the divisibility of $C_n$ by $4$.
\end{proof}
\begin{remark}
Let $n=29$. Then, $C_{29}$ is divisible by $4$ but $29$ is not divisible by $3$ and $C_{29}$ is not divisible by $3$ since $29\in T^*(01)-1$. Thus, in case
(1) in Proposition \ref{3_stem}, (i) is satisfied but (ii) is not.
Let $n=34$. Then, $C_{34}$ is divisible by $4$ and by $3$ but $34$ is not divisible by $3$.
\end{remark}
The following result generalizes Example \ref{ex2}.
\begin{proposition}{\em
The group $[J_{4n-1}({\mathbb S}^1),\Omega({\mathbb S}^{2n}\vee{\mathbb S}^{2n+1})]$ is non-abelian.}
\end{proposition}
\begin{proof}
Consider the inclusion maps $i_1 : {\mathbb S}^{2n}\hookrightarrow{\mathbb S}^{2n}\vee{\mathbb S}^{2n+1}$ and $i_2 :{\mathbb S}^{2n+1}\hookrightarrow{\mathbb S}^{2n}\vee{\mathbb S}^{2n+1}$.
Then, Hilton's result \cite{H} asserts that for every positive integer $k$, there is an isomorphism
$${\mathbb T}heta :\bigoplus_{l=1}^\infty\pi_k({\mathbb S}^{n_l})\stackrel{\cong}{\longrightarrow} \pi_k({\mathbb S}^{2n}\vee{\mathbb S}^{2n+1}),$$
where the restriction ${\mathbb T}heta|_{\pi_k({\mathbb S}^{n_l})}=\omegaega_{l\ast} :\pi_k({\mathbb S}^{n_l})\to \pi_k({\mathbb S}^{2n}\vee{\mathbb S}^{2n+1})$
is determined by the iterated Whitehead product of the maps $i_1$ and $i_2$.
In particular, the Whitehead product $[i_1,i_2] : {\mathbb S}^{4n}\to {\mathbb S}^{2n}\vee{\mathbb S}^{2n+1}$ is non-trivial.
Furthermore, $$(0,\ldots,0,i_1,0,\ldots)\#_{4n-1}(0,\ldots,0,i_2,0,\ldots)=(0,\ldots,0,i_1,0,\ldots,0,i_2,0,\ldots)$$ and
$$(0,\ldots,0,i_2,0,\ldots)\#_{4n-1}(0,\ldots,0,i_1,0,\ldots)=(0,\ldots,0,i_1,0,\ldots,0,i_2,0,\ldots,0,[i_1,i_2],0\ldots).$$
The above implies that the group $[J_{4n-1}({\mathbb S}^1),\Omega({\mathbb S}^{2n}\vee{\mathbb S}^{2n+1})]$ is non-abelian.
\end{proof}
To close the paper, we derive few simple properties about the torsion elements in $[J({\mathbb S}^1), \Omega Y]$.
\begin{proposition} {\em Let $\alpha, \beta\in [J({\mathbb S}^1), \Omega Y]$ be two elements which correspond to
homogeneous sequences with $\alpha\in \pi_m(Y)$ and $\beta\in \pi_n(Y)$. Then:
\mbox{\em (1)} if $\alpha$ has order $k$ in $\pi_m(Y)$, then $\alpha$, regarded as an element
of $[J({\mathbb S}^1), \Omega Y]$, has order $k$ or $k^2$;
\mbox{\em (2)} if $\alpha,\beta$ are torsion elements of order $|\alpha|$ and $|\beta|$, respectively such that ${\rm gcd}(|\alpha|,|\beta|)=1$, then $\alpha$ and $\beta$ commute in $[J({\mathbb S}^1), \Omega Y]$.}
\end{proposition}
\begin{proof} (1): By \cite[Chapter XI, Section 8, Theorem 8.8]{Whi}, all Whitehead products of weight $\geq 3$ of an element of odd dimension and all Whitehead products of weight $\geq 4$ of an element of even dimension, vanish. Therefore, using our formula and the result above,
we obtain that $\alpha^k$ (as an element of $[J({\mathbb S}^1), \Omega Y]$) is a sequence of the form $(0,\ldots,0, k\alpha, \lambda_1[\alpha, \alpha], \lambda_2[\alpha, [\alpha, \alpha]],0,\ldots)=
(0\ldots,0, \lambda_1[\alpha, \alpha], \lambda_2[\alpha, [\alpha, \alpha]],0,\ldots)$. Again, by the result cited above, we obtain that
\begin{equation*}
\begin{aligned}
(0,\ldots,0,\lambda_1[\alpha, \alpha], \lambda_2[\alpha, [\alpha, \alpha]],0,\ldots)^k&=(0,\ldots,0, k\lambda_1[\alpha, \alpha], k\lambda_2[\alpha, [\alpha, \alpha]],0,\ldots) \\
&=(0,\ldots,0, \lambda_1[k\alpha, \alpha], \lambda_2[k\alpha, [\alpha, \alpha]],0,\ldots) \\
&=0
\end{aligned}
\end{equation*} and (1) follows.
(2): It suffices to observe that the Whitehead product $[\alpha, \beta]$ vanishes. Since $|\alpha|[\alpha, \beta]=[|\alpha|\alpha, \beta]=0$,
$|\beta|[\alpha, \beta]=[\alpha, |\beta|\beta]=0$ and ${\rm gcd}(|\alpha|,|\beta|)=1$, it follows that $[\alpha, \beta]=0$ and
the proof is complete.
\end{proof}
\end{document}
| 2,364 | 29,488 |
en
|
train
|
0.97.0
|
\begin{document}
\title{Generalized Laplacian decomposition of vector fields on fractal surfaces}
\author{Daniel Gonz\'alez-Campos$^{(1)}$, Marco Antonio P\'erez-de la Rosa$^{(2)}$\\and\\ Juan Bory-Reyes$^{(3)}$}
\date{ \small $^{(1)}$ Escuela Superior de F\'isica y Matem\'aticas. Instituto Polit\'ecnico Nacional. CDMX. 07738. M\'exico. \\ E-mail: daniel\[email protected] \\
$^{(2)}$ Department of Actuarial Sciences, Physics and Mathematics, Universidad de las Am\'ericas Puebla.
San Andr\'es Cholula, Puebla. 72810. M\'exico. \\ Email: [email protected] \\
$^{(3)}$ ESIME-Zacatenco. Instituto Polit\'ecnico Nacional. CDMX. 07738. M\'exico. \\ E-mail: [email protected] }
\maketitle
\begin{abstract}
We consider the behavior of generalized Laplacian vector fields on a Jordan domain of $\mathbb{R}^{3}$ with fractal boundary. Our approach is based on properties of the Teodorescu transform and suitable extension of the vector fields. Specifically, the present article addresses the decomposition problem of a H\"older continuous vector field on the boundary (also called reconstruction problem) into the sum of two generalized Laplacian vector fields in the domain and in the complement of its closure, respectively. In addition, conditions on a H\"older continuous vector field on the boundary to be the trace of a generalized Laplacian vector field in the domain are also established.
\end{abstract}
\small{
\noindent
\textbf{Keywords.} Quaternionic analysis; vector field theory; fractals.\\
\noindent
\textbf{Mathematics Subject Classification (2020).} 30G35, 32A30, 28A80.}
\section{Introduction}
Quaternionic analysis is regarded as a broadly accepted branch of classical analysis referring to many different types of extensions of the Cauchy-Riemann equations to the quaternion skew field $\mathbb{H}$, which would somehow resemble the classical complex one-dimensional function theory.
An ordered set of quaternions $\psi:=(\psi_1, \psi_2, \psi_3)\in \mathbb{H}^{3}$, which form an orthonormal (in the usual Euclidean sense) basis in $\mathbb{R}^{3}$ is called a structural $\mathbb{H}$-vector.
The foundation of the so-called $\psi$-hyperholomorphic quaternion valued function theory, see \cite{NM, VSMV, MS} and elsewhere, is that the structural $\mathbb{H}$-vector $\psi$ must be chosen in a way that the factorization of the quaternionic Laplacian holds for $\psi$-Cauchy-Riemann operators. This question goes back at least as far as N\^{o}no's work \cite{Nono1, Nono2}.
The use of a general orthonormal basis introducing a generalized Moisil-Teodorescu system is the cornerstone of a generalized quaternionic analysis, where the generalized Cauchy-Riemann operator with respect to the standard basis in $\mathbb{R}^3$ are submitted to an orthogonal transformation. Despite the fact that some of the results in the present work can be obtained after the action of an orthogonal transformation on the standard basis; we keep their proofs in the work for the sake of completeness.
The $\psi$-hyperholomorphic functions theory by itself is not much of a novelty since it can be reduced by an orthogonal transformation to the standard case. In the face of this, the picture changes entirely by studying some important operators involving a pair of different orthonormal basis.
Moreover, the possibility to study simultaneously several conventional known theories, which can be embedded into a corresponding version of $\psi$-hyperholomorphic functions theory, again cannot be reduced to the standard context and reveal indeed the relevance of the $\psi$-hyperholomorphic functions theory.
The advantageous idea behind the unified study of particular cases of a generalized Moisil-Teodorescu system in $\psi$-hyperholomorphic functions theory simultaneously is considered in the present work.
The special case of structural $\mathbb{H}$-vector $\psi^\theta:=\{\textbf{i},\, \textbf{i}e^{\textbf{i}\theta}\textbf{j},\, e^{\textbf{i}\theta}\textbf{j}\}$ for $\theta\in[0,2\pi)$ fixed and its associated $\psi^\theta$-Cauchy-Riemann operator
\begin{equation*}
{^{\psi^{\theta}}}D:=\displaystyle\frac{\partial}{\partial x_{1}}\textbf{i}+\frac{\partial}{\partial x_{2}}\textbf{i}e^{\textbf{i}\theta}\textbf{j}+\frac{\partial}{\partial x_{3}} e^{\textbf{i}\theta}\textbf{j},
\end{equation*}
are used in \cite{BAPS} to give a quaternionic treatment of inhomogeneous case of the system
\begin{equation}\label{sedi}
\left\{
\begin{array}{rcl}
-\displaystyle \frac{\partial f_{1}}{\partial x_{1}}+\left(\frac{\partial f_{2}}{\partial x_{2}}-\frac{\partial f_{3}}{\partial x_{3}}\right)\sin\theta-\left(\frac{\partial f_{3}}{\partial x_{2}}+\frac{\partial f_{2}}{\partial x_{3}}\right)\cos\theta & = & 0,
\\ {}\\ \displaystyle {\left(\frac{\partial f_{3}}{\partial x_{3}}-\frac{\partial f_{2}}{\partial x_{2}}\right)}\cos\theta-\left(\frac{\partial f_{3}}{\partial x_{2}}+\frac{\partial f_{2}}{\partial x_{3}}\right)\sin\theta & = & 0,
\\ {}\\ \displaystyle {-\frac{\partial f_{3}}{\partial x_{1}}+\frac{\partial f_{1}}{\partial x_{3}}\sin\theta+\frac{\partial f_{1}}{\partial x_{2}}\cos\theta} & = & 0, \\ {}\\
\displaystyle {\frac{\partial f_{2}}{\partial x_{1}}-\frac{\partial f_{1}}{\partial x_{3}}\cos\theta+\frac{\partial f_{1}}{\partial x_{2}}\sin\theta} & = & 0,
\end{array}
\right.
\end{equation}
wherein the unknown well-behaved functions $f_m: \Omega \rightarrow \mathbb{C}, m=1,2,3$ are prescribed in an smooth domain $\Omega\subset\mathbb{R}^{3}$.
From now on, an smooth vector field $\vec{f}=(f_{1}, f_{2}, f_{3})$ that satisfies \eqref{sedi}, will said to be a generalized Laplacian vector field.
We will consider complex quaternionic valued functions (a detailed exposition of notations and definitions will be given in Section 2) to be expressed by
\begin{equation}
\notag
f=f_{0}+f_{1}\textbf{i}+f_{2}\textbf{j}+f_{3}\textbf{k},
\end{equation}
where $\textbf{i}$, $\textbf{j}$ and $\textbf{k}$ denote the quaternionic imaginary units.
On the other hand, the one-to-one correspondence
\begin{equation}\label{corre}
\mathbf{f}=f_1\mathbf{i}+f_2\mathbf{j}+f_3\mathbf{k}\, \longleftrightarrow \vec{f}=(f_{1}, f_{2}, f_{3})
\end{equation}
makes it obvious that $\eqref{sedi}$ can be obtained from the classical Moisil-Theodorescu system after the action of some element in $O(3)$ as:
$${^{\psi^{\theta}}}D[\mathbf{f}]= 0.$$
System \eqref{sedi} contains as a particular case the well-known solenoidal and irrotational, or harmonic system of vector fields (see \cite{ABS, ABMP} and the references given there). Indeed, under the correspondence $\mathbf{f}=f_1\mathbf{i}+f_3\mathbf{j}+f_2\mathbf{k}\, \longleftrightarrow \vec{f}=(f_{1}, f_{2}, f_{3})\,$ we have for $\theta=0$:
\begin{equation}\label{equi}
{}{^{\psi^{0}}}D[\mathbf{f}]=0\,\Longleftrightarrow \,
\begin{cases}
\text{div} \vec{f}=0,\cr
\text{rot} \vec{f}=0.
\end{cases}
\end{equation}
Besides, the system \eqref{sedi} includes other partial differential equations systems (see \cite{BAPS} for more details): A particular case of the inhomogeneous Cimmino system (\cite{C}) when one looks for a solution $(f_1,f_2,f_3)$, where each $f_m,\,m=1,2,3$ does not depend on $x_0$. This system is obtained from \eqref{sedi} for $\theta=\frac{\pi}{2}$. Also, an equivalent system to the so-called the Riesz system \cite{Riesz} studied in \cite{Gur, Gur2}, which can be obtained from \eqref{sedi} for $\theta=\pi$ and the convenient embedding in $\mathbb{R}^3$.
In order to get more generalized results than those of \cite{ABMP}, it is assumed in this paper that $\Omega\subset \mathbb{R}^{3}$ is a Jordan domain (\cite{HN}) with fractal boundary $\Gamma$ in the Mandelbrot sense, see \cite{FKJ, FJ}.
Let us introduce the temporary notations $\Omega_{+}:=\Omega$ and $\Omega_{-}:=\mathbb{R}^{3}\setminus \{\Omega_{+}\cup\Gamma\}$. We are interested in the following problems: Given a continuous three-dimensional vector field $\vec{f}: \Gamma \rightarrow \mathbb{C}^{3}$:
\begin{itemize}
\item [$(I)$]
(Problem of reconstruction) Under which conditions can $\vec{f}$ be decomposed on $\Gamma$ into the sum:
\begin{equation} \label{des}
\vec{f}(t)=\vec{f}^{+}(t)+\vec{f}^{-}(t), \quad \forall \, t\in\Gamma,
\end{equation}
where $\vec{f}^{\pm}$ are extendable to generalized Laplacian vector fields $\vec{F}^{\pm}$ in $\Omega_{\pm}$, with $\vec{F}^{-}(\infty)=0$?
\item [$(II)$] When $\vec{f}$ is the trace on $\Gamma$ of a generalized Laplacian vector field $\vec{F}^{\pm}$ in $\Omega_{\pm}\cup\Gamma$?
\end{itemize}
In what follows, we deal with problems $(I)$ and $(II)$ using the quaternionic analysis tools and working with $\mathbf{f}$ instead of $\vec{f}$ under the one-to-one correspondence (\ref{corre}). It will cause no confusion if we call $\mathbf{f}$ also vector field.
In the case of a rectifiable surface $\Gamma$ (the Lipschitz image of some bounded subset of $\mathbb{R}^{2}$) these problems have been investigated in \cite{GPB}.
| 2,795 | 20,490 |
en
|
train
|
0.97.1
|
\section{Preliminaries.}
\subsection{Basics of $\psi^{\theta}$-hyperholomorphic function theory.}
Let $\mathbb{H}:=\mathbb{H(\mathbb{R})}$ and $\mathbb{H(\mathbb{C})}$ denote the sets of real and complex quaternions respectively. If $a\in\mathbb{H}$ or $a\in\mathbb{H(\mathbb{C})}$, then $a=a_{0}+a_{1}\textbf{i}+a_{2}\textbf{j}+a_{3}\textbf{k}$, where the coefficients $a_{k}\in\mathbb{R}$ if $a\in\mathbb{H}$ and $a_{k}\in\mathbb{C}$ if $a\in\mathbb{H(\mathbb{C})}$. The symbols
$\textbf{i}$, $\textbf{j}$ and $\textbf{k}$ denote different imaginary units, i. e. $\textbf{i}^{2}=\textbf{j}^{2}=\textbf{k}^{2}=-1$ and they satisfy the following multiplication rules $\textbf{i}\textbf{j}=-\textbf{j}\textbf{i}=\textbf{k}$; $\textbf{j}\textbf{k}=-\textbf{k}\textbf{j}=\textbf{i}$; $\textbf{k}\textbf{i}=-\textbf{i}\textbf{k}=\textbf{j}$. The unit imaginary $i\in\mathbb{C}$ commutes with every quaternionic unit imaginary.
It is known that $\mathbb{H}$ is a skew-field and $\mathbb{H(\mathbb{C})}$ is an associative, non-commutative complex algebra with zero divisors.
If $a\in\mathbb{H}$ or $a\in\mathbb{H(\mathbb{C})}$, $a$ can be represented as $a=a_{0}+\vec{a}$, with $\vec{a}=a_{1}\textbf{i}+a_{2}\textbf{j}+a_{3}\textbf{k}$,
$\text{Sc}(a):=a_{0}$ is called the scalar part and $\text{Vec}(a):=\vec{a}$ is called the vector part of the quaternion $a$.
Also, if $a\in\mathbb{H(\mathbb{C})}$, $a$ can be represented as $a=\alpha_{1}+i\alpha_{2}$ with $\alpha_{1},\,\alpha_{2}\in\mathbb{H}$.
Let $a,\,b\in\mathbb{H(\mathbb{C})}$, the product between these quaternions can be calculated by the formula:
\begin{equation} \label{pc2}
ab=a_{0}b_{0}-\langle\vec{a},\vec{b}\rangle+a_{0}\vec{b}+b_{0}\vec{a}+[\vec{a},\vec{b}],
\end{equation}
where
\begin{equation} \label{proint}
\langle\vec{a},\vec{b}\rangle:=\sum_{k=1}^{3} a_{k}b_{k}, \quad
[\vec{a},\vec{b}]:= \left|\begin{matrix}
\textbf{i} & \textbf{j} & \textbf{k}\\
a_{1} & a_{2} & a_{3}\\
b_{1} & b_{2} & b_{3}
\end{matrix}\right|.
\end{equation}
We define the conjugate of $a=a_{0}+\vec{a}\in\mathbb{H(\mathbb{C})}$ by $\overline{a}:=a_{0}-\vec{a}$.
The Euclidean norm of a quaternion $a\in\mathbb{H}$ is the number $\abs{a}$ given by:
\begin{equation}\label{normar}
\abs{a}=\sqrt{a\overline{a}}=\sqrt{\overline{a}a}.
\end{equation}
We define the quaternionic norm of $a\in\mathbb{H(\mathbb{C})} $ by:
\begin{equation}
\abs{a}_{c}:=\sqrt{{{\abs {a_{0}}}_{\mathbb{C}}}^{2}+{{\abs {a_{1}}}_{\mathbb{C}}}^{2}+{{\abs {a_{2}}}_{\mathbb{C}}}^{2}+{{\abs {a_{3}}}_{\mathbb{C}}}^{2}},
\end{equation}
where ${\abs {a_{k}}}_{\mathbb{C}}$ denotes the complex norm of each component of the quaternion $a$.The norm of a complex quaternion $a=a_{1}+ia_{2}$ with $a_{1}, a_{2} \in \mathbb{H}$ can be rewritten in the form
\begin{equation} \label{nc2}
{\abs{a}_{c}}=\sqrt{\abs{\alpha_{1}}^2+\abs{\alpha_{2}}^2}.
\end{equation}
If $a \in \mathbb{H}$, $b \in \mathbb{H(\mathbb{C})}$, then
\begin{equation}
{\abs{ab}}_{c}=\abs{a}{\abs{b}}_{c}.
\end{equation}
If $a\in\mathbb{H(\mathbb{C})}$ is not a zero divisor then $\displaystyle a^{-1}:=\frac{\overline{a}}{a\overline{a}}$ is the inverse of the complex quaternion $a$.
\begin{subsection}{Notations}
\begin{itemize}
\item We say that $f:\Omega \rightarrow \mathbb{H(\mathbb{C}})$ has properties in $\Omega$ such as continuity and real differentiability of order $p$ whenever all $f_{j}$ have these properties. These spaces are usually denoted by $C^{p}(\Omega,\, \mathbb{H(\mathbb{C})})$ with $p\in \mathbb{N}\cup\{0\}$.
\item Throughout this work, $\text{Lip}_{\mu}(\Omega,\, \mathbb{H(\mathbb{C})})$, $0<\mu\leq 1$, denotes the set of H\"older continuous functions $f:\Omega \rightarrow \mathbb{H(\mathbb{C}})$ with H\"older exponent $\mu$. By abuse of notation, when $f_{0}=0$ we write $\mathbf{Lip}_{\mu}(\Omega,\, \mathbb{C}^{3})$ instead of $\text{Lip}_{\mu}(\Omega,\, \mathbb{H(\mathbb{C})})$.
\end{itemize}
\end{subsection}
In this paper, we consider the structural set $\psi^\theta:=\{\textbf{i},\, \textbf{i}e^{\textbf{i}\theta}\textbf{j},\, e^{\textbf{i}\theta}\textbf{j}\}$ for $\theta\in[0,2\pi)$ fixed, and the associated operators ${^{\psi^\theta}}D$ and $D{^{\psi^\theta}}$ on $C^{1}(\Omega,\, \mathbb{H(\mathbb{C})})$ defined by
\begin{equation}
{^{\psi^{\theta}}}D[f]:=\textbf{i}\frac{\partial f}{\partial x_{1}}+\textbf{i}e^{\textbf{i}\theta}\textbf{j}\frac{\partial f}{\partial x_{2}}+e^{\textbf{i}\theta}\textbf{j}\frac{\partial f}{\partial x_{3}},
\end{equation}
\begin{equation}
D{^{\psi^\theta}}[f]:=\frac{\partial f}{\partial x_{1}}\textbf{i}+\frac{\partial f}{\partial x_{2}}\textbf{i}e^{\textbf{i}\theta}\textbf{j}+\frac{\partial f}{\partial x_{3}} e^{\textbf{i}\theta}\textbf{j},
\end{equation}
which linearize the Laplace operator $\Delta_{\mathbb{R}^{3}}$ in the sense that
\begin{equation}
{^{\psi^{\theta}}}D^{2}= \left[D{^{\psi^\theta}}\right]^{2}=-\Delta_{\mathbb{R}^{3}}.
\end{equation}
All functions belong to $\ker \left({^{\psi^{\theta}}}D\right) := \left\{f : {^{\psi^{\theta}}}D[f]=0\right\}$ are called left-$\psi^{\theta}$-hyperholomorphic in $\Omega$. Similarly, those functions which belong to $\ker \left(D{^{\psi^{\theta}}}\right):= \left\{f : D{^{\psi^{\theta}}}[f]=0\right\}$ will be called right-$\psi^{\theta}$-hyperholomorphic in $\Omega$. For a deeper discussion of the hyperholomorphic function theory we refer the reader to \cite{KVS}.
The function
\begin{equation} \label{kernel}
\mathscr{K}_{\psi^{\theta}}(x):=-\frac{1}{4\pi}\frac{(x)_{\psi^{\theta}}}{\abs{x}^3}, \quad x\in\mathbb{R}^{3}\setminus\{0\},
\end{equation}
where
\begin{equation}
(x)_{\psi^{\theta}}:=x_{1}\textbf{i}+x_{2} \textbf{i}e^{\textbf{i}\theta}\textbf{j}+x_{3}e^{\textbf{i}\theta}\textbf{j},
\end{equation}
is a both-side-$\psi^{\theta}$-hyperholomorphic fundamental solution of $^{\psi^{\theta}}D$. Observe that $\abs{(x)_{\psi^{\theta}}}=\abs{x}$ for all $ x \in \mathbb{R}^{3}$.
For $f=f_{0}+\mathbf{f}\in C^1(\Omega,\mathbb{H(\mathbb{C})})$ let us define
\begin{equation}
{^{\psi^{\theta}}}\text{div}[\mathbf{f}]:=\frac{\partial f_{1}}{\partial x_{1}}+\left({\frac{\partial f_{2}}{\partial x_{2}}-\frac{\partial f_{3}}{\partial x_{3}}}\right)\textbf{i}e^{\textbf{i}\theta},
\end{equation}
\begin{equation}
{^{\psi^{\theta}}}\text{grad}[f_{0}]:=\frac{\partial f_{0}}{\partial x_{1}}\textbf{i}+\frac{\partial f_{0}}{\partial x_{2}}\textbf{i}e^{\textbf{i}\theta}\textbf{j}+\frac{\partial f_{0}}{\partial x_{3}}e^{\textbf{i}\theta}\textbf{j},
\end{equation}
\begin{equation}
\begin{split}
{^{\psi^{\theta}}}\text{rot}[\mathbf{f}]:=\left({-\frac{\partial f_{3}}{\partial x_{2}}-\frac{\partial f_{2}}{\partial x_{3}}}\right)e^{\textbf{i}\theta}+\left({-\frac{\partial f_{1}}{\partial x_{3}}\textbf{i}e^{\textbf{i}\theta}-\frac{\partial f_{3}}{\partial x_{1}}}\right)\textbf{j} +\left({\frac{\partial f_{2}}{\partial x_{1}}-\frac{\partial f_{1}}{\partial x_{2}}\textbf{i}e^{\textbf{i}\theta}}\right)\textbf{k}.
\end{split}
\end{equation}
The action of ${^{\psi^{\theta}}}D$ on $f\in C^1(\Omega, \, \mathbb{H(\mathbb{C})})$ yields
\begin{equation}
{^{\psi^{\theta}}}D[f]=-{^{\psi^{\theta}}}\text{div}[\mathbf{f}]+{^{\psi^{\theta}}}\text{grad}[f_{0}]+ {^{\psi^{\theta}}}\text{rot}[\mathbf{f}],
\end{equation}
which implies that $f \in \ker ({^{\psi^{\theta}}}D) $ is equivalent to
\begin{equation} \label{eq1}
-{^{\psi^{\theta}}}\text{div}[\mathbf{f}]+{^{\psi^{\theta}}}\text{grad}[f_{0}]+ {^{\psi^{\theta}}}\text{rot}[\mathbf{f}]=0.
\end{equation}
If $f_{0}=0$, \eqref{eq1} reduces to
\begin{equation} \label{eq2}
-{^{\psi^{\theta}}}\text{div}[\mathbf{f}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}]=0.
\end{equation}
We check at once that \eqref{sedi} is equivalent to \eqref{eq2}.
Similar considerations apply to $D^{\psi^{\theta}}$, for this case one obtains
\begin{equation} \label{eq3}
D^{\psi^{\theta}}[f]=-{^{\overline{\psi^{\theta}}}}\text{div}[\mathbf{f}]+{^{\psi^{\theta}}}\text{grad}[f_{0}]+{^{\overline{\psi^{\theta}}}}\text{rot}[\mathbf{f}],
\end{equation}
where
\begin{equation}
{^{\overline{\psi^{\theta}}}}\text{div}[\mathbf{f}]:=\frac{\partial f_{1}}{\partial x_{1}}+\left({\frac{\partial f_{2}}{\partial x_{2}}-\frac{\partial f_{3}}{\partial x_{3}}}\right)\overline{\textbf{i}e^{\textbf{i}\theta}},
\end{equation}
\begin{equation}
\begin{split}
{^{\overline{\psi^{\theta}}}}\text{rot}[\mathbf{f}]:=\left({-\frac{\partial f_{3} }{\partial x_{2}}-\frac{\partial f_{2}}{\partial x_{3}}}\right) \overline{e^{\textbf{i}\theta}}-{\frac{\partial f_{1}}{\partial x_{3}}\overline{\textbf{i}e^{\textbf{i}\theta}\textbf{j}}+\frac{\partial f_{3}}{\partial x_{1}}}\textbf{j} -\frac{\partial f_{2}}{\partial x_{1}}\textbf{k}-\frac{\partial f_{1}}{\partial x_{2}}\overline{\textbf{i}e^{\textbf{i}\theta}\textbf{k}}.
\end{split}
\end{equation}
If $f_{0}=0$, \eqref{eq3} reduces to
\begin{equation} \label{eq4}
D^{\psi^{\theta}}[f]=-{^{\overline{\psi^{\theta}}}}\text{div}[\mathbf{f}]+{^{\overline{\psi^{\theta}}}}\text{rot}[\mathbf{f}].
\end{equation}
It follows easily that
\begin{equation} \label{eq5}
-{^{\overline{\psi^{\theta}}}}\text{div}[\mathbf{f}]+{^{\overline{\psi^{\theta}}}}\text{rot}[\mathbf{f}]=0,
\end{equation}
is also equivalent to \eqref{sedi}.
\begin{lemma} \label{two-sided} Let $f=f_{0}+\mathbf{f}\in C^{1}(\Omega, \, \mathbb{H(\mathbb{C})})$. Then $f$ is both-side-$\psi^\theta$-hyperholomorphic in $\Omega$ if and only if ${^{\psi^{\theta}}}\text{grad}[f_{0}](x)\equiv 0$ in $\Omega$ and $\mathbf{f}$ is a generalized Laplacian vector field in $\Omega$.
\begin{proof}
The proof is based on the fact that \eqref{eq2} and \eqref{eq5} are equivalent to \eqref{sedi}.
\end{proof}
\end{lemma}
| 3,599 | 20,490 |
en
|
train
|
0.97.2
|
\subsection{Fractal dimension and the Whitney operator}
Let $E$ a subset in $\mathbb{R}^{3}$, we denote by $\mathcal{H}_{\lambda}(E)$ the $\lambda$-Hausdorff measure of $E$ (\cite{GJ}).
Assume that $E$ is a bounded set, the Hausdorff dimension of $E$ (denoted by $\lambda(E)$) is the infimum $\lambda$ such that $\mathcal{H}_{\lambda}(E)<\infty$.
Frequently, the Minkowski dimension of $E$ (also called box dimension and denoted by $\alpha(E)$) is more appropriate than the Hausdorff dimension to measure the roughness of E (\cite{ABMP,ABS}).
It is known that Minkowski and Hausdorff dimensions can be equal, for example, for rectifiable surfaces (the Lipschitz image of some bounded subset of $\mathbb{R}^{2}$). But in general, if $E$ is a two-dimensional set in $\mathbb{R}^{3}$
\begin{equation}
2\leq \lambda(E)\leq \alpha(E)\leq3.
\end{equation}
If $2<\lambda(E)$, $E$ is called a fractal set in the Mandelbrot sense. For more information about the Hausdorff and Minkowski dimension, see \cite{FKJ,FJ}.
Let $f\in \text{Lip}_{\mu}(\Gamma, \mathbb{H(\mathbb{C})})$, then $f=f_{1}+if_{2}$ with $f_{k}\in \text{Lip}_{\mu}(\Gamma, \mathbb{H(\mathbb{R})})$ and $\mathcal{E}_{0}(f):=\mathcal{E}_{0}(f_{1})+i\mathcal{E}_{0}(f_{2})$. Write
\begin{equation}
f^{w}:=\mathcal{X}\mathcal{E}_{0}(f),
\end{equation}
where $\mathcal{E}_{0}$ is the Whitney operator and $\mathcal{X}$ denotes the characteristic function in $\Omega_{+}\cup\Gamma$.
For completeness, we recall the main lines in the construction of the Whitney decomposition $\mathcal W$ of the Jordan domain $\Omega$ with boundary $\Gamma$ by squares $Q$ of diameter $||Q||_{\mathbb{R}^{3}}$ and the notion of Whitney operator. This can be found in \cite[Ch VI]{SEM}.
Consider the lattice $\mathbb Z^{3}$ in $\mathbb R^{3}$ and the collection of closed unit cubes defined by it; let $\mathcal{M}_1$ be the mesh consisting of those unit cubes having a non-empty intersection with $\Omega$. Then, we recursively define the meshes $\mathcal{M}_k$, $k=2,3,\ldots$, each time bisecting the sides of the cubes of the previous one. The cubes in $\mathcal{M}_k$ thus have side length $2^{-k+1}$ and diameter $||Q||_{\mathbb{R}^{3}} = (\sqrt{3})\, 2^{-k+1}$. Define, for $k=2,3,\ldots$,
\begin{eqnarray*}
\mathcal{W}^1 & := & \left \{ Q\in \mathcal{M}_1 \, | \, \mbox{$Q$ and every cube of $\mathcal{M}_1$ touching $Q$ are contained in $\Omega$} \right \}, \\
\mathcal{W}^k & := & \left \{ Q\in \mathcal{M}_k \, | \, \mbox{$Q$ and every cube of $\mathcal{M}_k$ touching $Q$ are contained in $\Omega$} \right .\\
& & \hspace*{50mm} \left . \mbox{and}\,\not \exists \, Q^\ast \in \mathcal{W}^{k-1}: Q \subset Q^\ast \right \},
\end{eqnarray*}
for which it can be proven that
$$
\Omega = \bigcup_{k=1}^{+\infty} \mathcal{W}^k = \bigcup_{k=1}^{+\infty} \bigcup_{Q \in \mathcal{W}^k} Q \equiv \bigcup_{Q \in \mathcal{W}} Q,
$$
all cubes $Q$ in the Whitney decomposition $\mathcal{W}$ of $\Omega$ having disjoint interiors.
We denote by $Q_{0}$ the unit cube with center at the origin and fix a $C^{\infty}$ function with the properties: $0\leq \varphi \leq 1$; $\varphi(x)=1$ if $x\in Q_{0}$; and $\varphi(x)=0 $ if $x\notin Q^*_{0}$.
Let $\varphi_{k}$ the function $\varphi(x)$ adjusted to the cube $Q_{k}\in\mathcal{W}$, that is
\begin{equation}
\varphi_{k}(x):=\varphi\bigg(\frac{x-x^{k}}{l_{k}}\bigg),
\end{equation}
where $x^{k}$ is the center of $Q_{k}$ and $l_{k}$ the common length of its sides.
Function $\varphi_{k}$ satisfies that $0\leq \varphi_{k} \leq 1$, $\varphi_{k}(x)=1$ if $x\in Q_{k}$ and $\varphi_{k}(x)=0 $ if $x\notin Q^*_{k}$. Let ${\varphi_{k}^*}(x)$ be defined for $x\in \Omega$ by
\begin{equation} \label{pdu}
{\varphi_{k}^*}(x):=\frac{\varphi_{k}(x)}{\Phi (x)},
\end{equation}
with
\begin{equation}
\Phi(x):=\sum_{k}^{}\varphi_{k}(x)
\end{equation}
and
$\sum_{k}^{}\varphi_{k}^{*}(x)=1$ for $x\in \Omega$.
For each cube $Q_{k}$ let $p_{k}$ be a point fixed in $\Gamma$ such that $dist(Q_{k}, \Gamma)=dist(Q_{k}, p_{k})$. Then the Whitney operator is defined as follows
\begin{equation}
\mathcal{E}_{0}(f)(x):=f(x), \quad \text{if} \quad x\in \Gamma,
\end{equation}
\begin{equation} \label{suma}
\mathcal{E}_{0}(f)(x):=\sum_{k}f(p_{k})\varphi_{k}^{*}(x), \quad \text{if} \quad x\in \Omega.
\end{equation}
Similar construction may be made for the domain $\mathbb{R}^{3}\setminus \{\Omega\cup\Gamma\}$.
The operator $\mathcal{E}_{0}$ extends functions $f$ defined in $\Gamma$ to functions defined in $\mathbb{R}^{3}$. Its main properties are given as follows:
\begin{itemize}
\item Assume $f\in\text{Lip}_{\mu}(\Omega\cup\Gamma, \mathbb{H(\mathbb{C})})$. Then $\mathcal{E}_{0}(f) \in \text{Lip}_{\mu}(\mathbb{R}^{3}, \mathbb{H(\mathbb{C})})$ and in fact is $C^{\infty}$ in $\mathbb{R}^{3}\setminus\Gamma$, see \cite[Proposition, pag. 172]{SEM}.
\item The following quantitative estimate holds (see \cite[(14), pag. 174]{SEM})
\begin{equation}
\absol{\frac{\partial{\mathcal{E}_{0}(f)}}{ { \partial x_{i} } } (x)}\leq c (dist(x, \Gamma))^{\mu-1}, \, \text{for}\, x \in \mathbb{R}^{3}\setminus\Gamma.
\end{equation}
\end{itemize}
It is necessary to go further and to express the essential fact that under some specific relation between $\mu$ and $\alpha(\Gamma)$ we have that
\begin{equation}\label{integrability}
{^{\psi^{\theta}}D}[f^{w}]\in L_{p}(\mathbb{R}^{3}, \mathbb{H(\mathbb{R})})\ \mbox{for}\; \displaystyle p<\frac{3-\alpha(\Gamma)}{1-\mu}.
\end{equation}
This follows in much by the same methods as \cite[Proposition 4.1]{AB}.
| 1,952 | 20,490 |
en
|
train
|
0.97.3
|
\section{Auxiliary results on $\psi^{\theta}$-hyperholomorphic function theory.}
It is a well-known fact that in proving the existence of the boundary value of the Cauchy transform via the Plemelj-Sokhotski formulas, the solvability of the jump problem is an easy task whenever the data is a H\"older continuous function and the boundary of the considered domain is assumed sufficiently smooth. But by far much more subtle is the case where it can be thought of as a fractal surface. Then the standard method is no longer applicable, and it is necessary to introduce an alternative way of defining Cauchy transform, where a central role is played by the Teodorescu operator involving fractal dimensions. This is the idea behind the proofs of the following auxiliary results.
\begin{theorem} \label{thm 6} Let $f\in \text{Lip}_{\mu}(\Gamma,\, \mathbb{H(\mathbb{C})})$, $\displaystyle \frac{\alpha(\Gamma)}{3}<\mu \leq 1$. Then the function $f$ can be represented as $f=\left.F^{+}\right|_{\Gamma}-\left.F^{-}\right|_{\Gamma}$, where $F^{\pm}\in \text{Lip}_{\nu}(\Omega_{\pm}\cup\Gamma)\cap \ker\left( {^{\psi^{\theta}}}D\right)$ for some $\nu<\mu$, $F^{\pm}$ are given by
\begin{equation}\label{tc}
F^{\pm}(x):=-{^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[f^{w}]\right](x)+ f^{w}(x), \quad x\in\big({\Omega}_{\pm}\cup\Gamma\big),
\end{equation}
where
\begin{equation}
{^{\psi^{\theta}}}T[v](x):=\int_{\Omega_{+}}{\mathscr{K}_{\psi^{\theta}}(x-\xi) \,v(\xi) }\,dm(\xi), \quad x\in \mathbb{R}^{3}.
\end{equation}
is the well-defined Teodorescu transform for the $\mathbb{H(\mathbb{C})}$-valued function $v$, see \cite{KVS}.
\end{theorem}
\begin{proof}
Since ${f}^{w}={f}_{1}^{w}+i {f}_{2}^{w}$ with ${f}_{k}^{w}:\Omega\cup\Gamma\to\mathbb{H}$, $\displaystyle \mu>\frac{\alpha(\Gamma)}{3}$, and by (\ref{integrability}) ${^{\psi^{\theta}}}D[{f}_{k}^{w}]\in L_{p}(\Omega,\, \mathbb{H})$ for some $p\in\left( 3, \,\displaystyle\frac{3-\alpha(\Gamma)}{1-\mu}\right)$. Then the integral on the right side of \eqref{tc} exists and represents a continuous function in the whole $\mathbb{R}^{3}$ (see \cite[Theorem 2.8]{GPB}). Hence, the functions $F^{\pm}$ possess continuous extensions to the closures of the domains $\Omega_{\pm}$ and they satisfy that $\left.F^{+}\right|_{\Gamma}-\left.F^{-}\right|_{\Gamma}=f$. By the property of the Teodorescu operator to still being a right inverse to the Cauchy-Riemann operator (see \cite{KVS}, p. 73), ${^{\psi^{\theta}}}D[F^{+}]=0$ and ${^{\psi^{\theta}}}D[F^{-}]=0$ in the domains $\Omega_{\pm}$, respectively.
\end{proof}
\begin{remark}
Uniqueness in the statement of Theorem 3.1 could be ensured introducing an additional requirement analogous to that in \cite[Theorem 6.6]{ABJ}
\end{remark}
In the remainder of this section we assume that $\displaystyle \frac{\alpha(\Gamma)}{3}<\mu \leq 1$.
The following results are related to the problem of extending $\psi^{\theta}$-hyperholomorphically a $\mathbb{H(\mathbb{C})}$-valued H\"older continuous function.
\begin{theorem} \label{t1}
Let $f\in \text{Lip}_{\mu}(\Gamma,\mathbb{H(\mathbb{C})})$ the trace of $F\in \text{Lip}_{\mu}(\Omega_{+}\cup\Gamma,\mathbb{H(\mathbb{C})})\cap \ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{+}}\right).$ Then
\begin{equation}\label{c1}
{^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f^{w}]\right]\right|_{\Gamma}=0.
\end{equation}
Conversely, if \eqref{c1} is satisfied, then $f$ is the trace of $F\in \text{Lip}_{\nu}(\Omega_{+}\cup\Gamma,\mathbb{H(\mathbb{C})})\cap \ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{+}}\right)$ for some $\nu<\mu$.
\begin{proof}
Sufficiency. As we can write $f=f_{1}+if_{2}$ and $F=F_{1}+iF_{2}$ with $f_{r}\in \text{Lip}_{\mu}(\Gamma,\mathbb{H(\mathbb{R})}), r=1,2$ and $F_{r}\in \text{Lip}_{\nu}(\Omega_{+}\cup\Gamma,\mathbb{H(\mathbb{R})})\cap \ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{+}}\right)$. Then $f^{w}=f^{w}_{1}+if^{w}_{2}$ and
\begin{equation}
{^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[f^{w}]\right]={^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[f_{1}^{w}]\right]+i\;{^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[f_{2}^{w}]\right].
\end{equation}
Following \cite[Theorem 3.1]{ABMT}, let $F_{r}^*=f_{r}^{w}-F_{r}$, $\tilde{Q}_{k}$ the union of cubes of the mesh $\mathcal{M}_{k}$ intersecting $\Gamma$, $\Omega_{k}=\Omega_{+}\setminus \tilde{Q}_{k}$, $\Delta_{k}=\Omega_{+}\setminus\Omega_{k}$ and denote by $\Gamma_{k}$ the boundary of $\Omega_{k}$. Applying the definition of $\alpha(\Gamma)$, given $\varepsilon>0$ there is a constant $C(\varepsilon)$ such that $\mathcal{H}^{2}(\Gamma_{k})$ (the Hausdorff measure of $\Gamma_{k}$) is less or equal than $6C(\varepsilon)2^{k(\alpha(\Gamma)-2+\varepsilon)}$.
Since $F_{r}^*\in\text{Lip}_{\mu}(\Gamma,\mathbb{H(\mathbb{C})})$, $F_{r}^*|_{\Gamma}=0$ and any point of $\Gamma_{k}$ is distant by no more than $C_{1}2^{-k}$, then
\begin{equation*}
\text{max}_{\xi\in\Gamma_{k}}\abs{F_{r}^*(\xi)}\leq C_{2}2^{-\mu k}
\end{equation*}
where $C_{1}$, $C_{2}$ denoted absolute constants.
Therefore, for $x\in\Omega_{-}$, let $s=dist(x,\Gamma)$
\begin{equation*}
\abso{\int_{\Gamma_{k}}\mathscr{K}_{\psi^{\theta}}(\xi-x){^{\psi^{\theta}}}D[F_{r}^{*}](\xi)dS(\xi)}\leq C_{2}C(\varepsilon)\frac{6}{s^{2}}2^{(\alpha(\Gamma)-2-\mu+\varepsilon)}.
\end{equation*}
As $\displaystyle \frac{\alpha(\Gamma)}{3}<\mu \leq 1$ the right-hand side of the previous inequality tends to zero as $k\to \infty$. By the Stokes formula, we have that
\begin{equation*}
\begin{split}
&\int_{\Omega_{+}}\mathscr{K}_{\psi^{\theta}}(\xi-x){^{\psi^{\theta}}}D[F_{r}^{*}](\xi)dm(\xi)=\lim_{k\to\infty}\bigg( \int_{\Delta_{k}}+\int_{\Omega_{k}}\bigg)\mathscr{K}_{\psi^{\theta}}(\xi-x){^{\psi^{\theta}}}D[F_{r}^{*}](\xi)dm(\xi)\\ &=\lim_{k\to\infty}\bigg( \int_{\Delta_{k}}\mathscr{K}_{\psi^{\theta}}(\xi-x){^{\psi^{\theta}}}D[F_{r}^{*}](\xi)dm(\xi)-\int_{\Gamma_{k}}\mathscr{K}_{\psi^{\theta}}(\xi-x){^{\psi^{\theta}}}D[F_{r}^{*}](\xi)dS(\xi)\bigg)=0.
\end{split}
\end{equation*}
Then
\begin{equation}
{^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f_{r}^{w}]\right]\right|_{\Gamma}={^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[F_{r}]\right]\right|_{\Gamma}=0.
\end{equation}
Necessity. If \eqref{c1} is satisfied we have
\begin{equation}
{^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f^{w}]\right]\right|_{\Gamma}={^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f_{1}^{w}]\right]\right|_{\Gamma}+i\;{^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f_{2}^{w}]\right]\right|_{\Gamma}=0,
\end{equation}
and we take
\begin{equation}
\begin{split}
F(x):=-{^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[f^{w}]\right](x)+ f^{w}(x), \quad x\in \Omega_{+}\cup\Gamma.
\end{split}
\end{equation}
\end{proof}
\end{theorem}
In the same manner next theorem can be verified
\begin{theorem}
Let $f\in \text{Lip}_{\mu}(\Gamma,\mathbb{H(\mathbb{C})})$. If $f$ is the trace of a function $F\in \text{Lip}_{\mu}(\Omega_{-}\cup\Gamma,\mathbb{H(\mathbb{C})})\cap \ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{-}}\right)$
\begin{equation}\label{c2}
{^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f^{w}]\right]\right|_{\Gamma}=-f.
\end{equation}
Conversely, if \eqref{c2} is satisfied, then $f$ is the trace of a function $F\in \text{Lip}_{\nu}(\Omega_{-}\cup\Gamma,\mathbb{H(\mathbb{C})})\cap \ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{-}}\right)$ for some $\nu<\mu$.
\end{theorem}
These two results generalize those of\cite[Theorem 3.1, Theorem 3.2]{ABMT}.
\begin{remark}
Similar results can be drawn for the case of right $\psi^{\theta}$-hyperholomorphic extensions. The only necessity being to replace in both theorems $\ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{\pm}}\right)$ by $\ker\left(\left.D^{\psi^{\theta}}\right|_{\Omega_{\pm}}\right)$ and ${^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f^{w}]\right]\right|_{\Gamma}$ by $\left[D^{\psi^{\theta}}[f^{w}]\right]\, \left.{{^{\psi^{\theta}}}T}\right|_{\Gamma}$, where for every $\mathbb{H(\mathbb{C})}$-valued function $v$ we have set
\begin{equation}
[v]\, {{^{\psi^{\theta}}}T}=\int_{\Omega_{+}}{ v(\xi)\, \mathscr{K}_{\psi^{\theta}}(x-\xi) }\,dm(\xi), \quad x\in \mathbb{R}^{3}.
\end{equation}
The following theorem presents a result connecting two-sided $\psi^{\theta}$-hyperholomorphicity in the domain $\Omega_{+}$ and it is obtained by application of the previous results
\begin{theorem}
If $F\in \text{Lip}_{\mu}(\Gamma,\mathbb{H(\mathbb{C})})\cap \ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{+}}\right)$ has trace $\left.F\right|_{\Gamma}=f$, then the following assertions are equivalent:
\begin{itemize}
\item [1.] F is left and right $\psi^{\theta}$-hyperholomorphic in $\Omega_{+}$,
\item [2.] ${^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f^{w}]\right]\right|_{\Gamma}= \left[D^{\psi^{\theta}}[f^{w}]\right]\, \left.{{^{\psi^{\theta}}}T}\right|_{\Gamma}$.
\end{itemize}
\begin{proof}
The proof is obtained reasoning as in \cite[Theorem 3.3]{ABMP}.
\end{proof}
| 3,373 | 20,490 |
en
|
train
|
0.97.4
|
\end{theorem}
In the same manner next theorem can be verified
\begin{theorem}
Let $f\in \text{Lip}_{\mu}(\Gamma,\mathbb{H(\mathbb{C})})$. If $f$ is the trace of a function $F\in \text{Lip}_{\mu}(\Omega_{-}\cup\Gamma,\mathbb{H(\mathbb{C})})\cap \ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{-}}\right)$
\begin{equation}\label{c2}
{^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f^{w}]\right]\right|_{\Gamma}=-f.
\end{equation}
Conversely, if \eqref{c2} is satisfied, then $f$ is the trace of a function $F\in \text{Lip}_{\nu}(\Omega_{-}\cup\Gamma,\mathbb{H(\mathbb{C})})\cap \ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{-}}\right)$ for some $\nu<\mu$.
\end{theorem}
These two results generalize those of\cite[Theorem 3.1, Theorem 3.2]{ABMT}.
\begin{remark}
Similar results can be drawn for the case of right $\psi^{\theta}$-hyperholomorphic extensions. The only necessity being to replace in both theorems $\ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{\pm}}\right)$ by $\ker\left(\left.D^{\psi^{\theta}}\right|_{\Omega_{\pm}}\right)$ and ${^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f^{w}]\right]\right|_{\Gamma}$ by $\left[D^{\psi^{\theta}}[f^{w}]\right]\, \left.{{^{\psi^{\theta}}}T}\right|_{\Gamma}$, where for every $\mathbb{H(\mathbb{C})}$-valued function $v$ we have set
\begin{equation}
[v]\, {{^{\psi^{\theta}}}T}=\int_{\Omega_{+}}{ v(\xi)\, \mathscr{K}_{\psi^{\theta}}(x-\xi) }\,dm(\xi), \quad x\in \mathbb{R}^{3}.
\end{equation}
The following theorem presents a result connecting two-sided $\psi^{\theta}$-hyperholomorphicity in the domain $\Omega_{+}$ and it is obtained by application of the previous results
\begin{theorem}
If $F\in \text{Lip}_{\mu}(\Gamma,\mathbb{H(\mathbb{C})})\cap \ker\left(\left.^{\psi^{\theta}}D\right|_{\Omega_{+}}\right)$ has trace $\left.F\right|_{\Gamma}=f$, then the following assertions are equivalent:
\begin{itemize}
\item [1.] F is left and right $\psi^{\theta}$-hyperholomorphic in $\Omega_{+}$,
\item [2.] ${^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[f^{w}]\right]\right|_{\Gamma}= \left[D^{\psi^{\theta}}[f^{w}]\right]\, \left.{{^{\psi^{\theta}}}T}\right|_{\Gamma}$.
\end{itemize}
\begin{proof}
The proof is obtained reasoning as in \cite[Theorem 3.3]{ABMP}.
\end{proof}
\end{theorem}
\end{remark}
\section{Main results}
In this section our main results are stated and proved. They give sufficient conditions for solving the Problems $(I)$ and $(II)$.
Let $\mathscr{M}_{\psi^{\theta}}^{*}$ be the subclass of vector fields $\mathbf{f}\in C^{1}(\Omega, \mathbb{C}^{3})\cap\mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ defined by
\begin{equation} \label{set}
\mathscr{M}_{\psi^{\theta}}^{*}:=\left\{\mathbf{f}: \int_{\Omega_{+}}{\left\langle\mathscr{K}_{\psi^{\theta}}(x-\xi)\,,\,\mathbf{f}(\xi)\right\rangle}\,dm(\xi)=0, \; x\in\Gamma \right\},
\end{equation}
where $m$ denotes the Lebesgue measure in $\mathbb{R}^{3}$. The set $\mathscr{M}_{\psi^{\theta}}^{*}$ can be seen as a fractal version of the corresponding class in \cite{ZMS}, which can be described in purely physical terms.
\begin{theorem} \label{TH1}
Let $\mathbf{f}\in \mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ such that $\displaystyle \mu>\frac{\alpha(\Gamma)}{3}$. Then the problem (I) is solvable if
\begin{equation}
\begin{split}
\text{Vec}\left(-{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right)&:=\left({\left(\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{3}}-\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{2}}\right)}\cos\theta-\left(\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{2}}+\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{3}}\right)\sin\theta\right)\textbf{i}\\ & +\left(\displaystyle {-\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{1}}+\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{3}}\sin\theta+\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{2}}\cos\theta}\right)\textbf{j}\\ & +\left(\displaystyle {\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{1}}-\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{3}}\cos\theta+\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{2}}\sin\theta}\right)\textbf{k}\in\mathscr{M}_{\psi^{\theta}}^{*}.
\end{split}
\end{equation}
\begin{proof}
It is enough to prove that
\begin{equation}
\mathbf{F^{\pm}}(x):=-{^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[\mathbf{f}^{w}]\right](x)+ \mathbf{f}^{w}(x), \quad x\in\big({\Omega}_{\pm}\cup\Gamma\big),
\end{equation}
are vector fields.
Observe that
\begin{equation}
\notag
\text{Sc}\left({^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[\mathbf{f}^{w}]\right]\right)(x)=-\int_{\Omega_{+}}{\left\langle \mathscr{K}_{\psi^{\theta}}(x-\xi),\text{Vec}\left( -{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right) \right\rangle }\,dm(\xi), \quad x\in \Omega_{\pm},
\end{equation}
\begin{equation}
\notag
\Delta\left(\text{Sc}\left({^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[\mathbf{f}^{w}]\right]\right)\right)(x)=0, \quad x\in \Omega_{\pm}
\end{equation}
and
\begin{equation}
\notag
\text{Sc}\left.\left({^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[\mathbf{f}^{w}]\right]\right)\right|_{\Gamma}=0,
\end{equation}
because $\text{Vec}\left( -{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right)\in\mathscr{M}_{\psi^{\theta}}^{*}$. Therefore $\text{Sc}\left({^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[\mathbf{f}^{w}]\right]\right)\equiv 0$ in $\Omega_{\pm}$. Then $\mathbf{F^{\pm}}(x)$
are vector fields.
\end{proof}
\end{theorem}
\begin{theorem} \label{TH2}
Let $\mathbf{f}$ $\in \mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ such that $\displaystyle \mu>\frac{\alpha(\Gamma)}{3}$ and suppose that\\ $\text{Vec}\left( -{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right)\in\mathscr{M}_{\psi^{\theta}}^{*}$. If $\bf{f}$ is the trace of a generalized Laplacian vector field in $\mathbf{Lip}_{\mu}(\Omega_{+}\cup\Gamma,\, \mathbb{C}^{3})$, then
\begin{equation}\label{c3}
\begin{split}
&\int_{\Omega_{+}}{\mathscr{K}_{\psi^{\theta}}(t-\xi)\; \text{Sc}\left(-{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right) }dm(\xi)\\ &=\int_{\Omega_{+}}{\left[\mathscr{K}_{\psi^{\theta}}(t-\xi)\, ,\,\text{Vec}\left(-{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right)\right] }dm(\xi), \quad t\in \Gamma,
\end{split}
\end{equation}
where
\begin{equation}
\begin{split}
\text{Sc}\left(-{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right)&=-\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{1}}+\left(\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{2}}-\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{3}}\right)\sin\theta-\left(\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{2}}+\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{3}}\right)\cos\theta.
\end{split}
\end{equation}
Conversely, if \eqref{c3} is satisfied, then $\bf{f}$ is the trace of a generalized Laplacian vector field in $\mathbf{Lip}_{\nu}(\Omega_{+}\cup\Gamma,\, \mathbb{C}^{3})$ for some $\nu<\mu$.
\begin{proof}
Suppose that $\mathbf{f}$ $\in \mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ is the trace of a generalized Laplacian vector field in $\mathbf{Lip}_{\mu}(\Omega_{+}\cup\Gamma,\, \mathbb{C}^{3})$. Therefore
\begin{equation*}
{^{\psi^{\theta}}}T\left.\left[{^{\psi^{\theta}}}D[\mathbf{f}^{w}]\right]\right|_{\Gamma}=0,
\end{equation*}
by Theorem \ref{t1}.
Of course
\begin{equation*}
\begin{split}
&\int_{\Omega_{+}}{\mathscr{K}_{\psi^{\theta}}(t-\xi)\; \text{Sc}\left(-{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right) }\,dm(\xi)\\ &=\int_{\Omega_{+}}{\left[\mathscr{K}_{\psi^{\theta}}(t-\xi)\, ,\,\text{Vec}\left(-{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right)\right] }\,dm(\xi), \quad t\in \Gamma,
\end{split}
\end{equation*}
as is easy to check.
Now, if \eqref{c3} is satisfied. Set
\begin{equation}
\mathbf{F^{+}}(x):=-{^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[\mathbf{f}^{w}]\right](x)+ \mathbf{f}^{w}(x), \quad x\in\big(\Omega_{+}\cup\Gamma\big).
\end{equation}
As $\text{Vec}\left( -{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right)\in\mathscr{M}_{\psi^{\theta}}^{*}$, $\mathbf{F^{+}}$ is a generalized Laplacian vector field in $\Omega_{+}$. By Theorem \ref{thm 6}, $\left.\mathbf{F^{+}}\right|_{\Gamma}=\mathbf{f}$, which completes the proof.
\end{proof}
| 3,312 | 20,490 |
en
|
train
|
0.97.5
|
\end{theorem}
The method of proof carries to domain $\Omega_{-}$. Indeed, we have
\begin{theorem} \label{TH3}
Let $\mathbf{f}\in \mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ such that $\displaystyle \mu>\frac{\alpha(\Gamma)}{3}$ and suppose that\\ $\text{Vec}\left( -{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right)\in\mathscr{M}_{\psi^{\theta}}^{*}$. If $\bf{f}$ is the trace of a generalized Laplacian vector field in $\mathbf{Lip}_{\mu}(\Omega_{-}\cup\Gamma,\, \mathbb{C}^{3})$ which vanishes at infinity, then
\begin{equation}\label{c4}
\begin{split}
&\int_{\Omega_{+}}{\mathscr{K}_{\psi^{\theta}}(t-\xi)\; \text{Sc}\left(-{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right) }\,dm(\xi)\\ &-\int_{\Omega_{+}}{\left[\mathscr{K}_{\psi^{\theta}}(t-\xi)\, ,\,\text{Vec}\left(-{^{\psi^{\theta}}}\text{div}[\mathbf{f}^{w}]+{^{\psi^{\theta}}}\text{rot}[\mathbf{f}^{w}]\right)\right] }\,dm(\xi)=-\mathbf{f}(t), \quad t\in \Gamma.
\end{split}
\end{equation}
Conversely, if \eqref{c4} is satisfied, then $\bf{f}$ is the trace of a generalized Laplacian vector field in $\mathbf{Lip}_{\nu}(\Omega_{-}\cup\Gamma,\, \mathbb{C}^{3})$ for some $\nu<\mu$, which vanishes at infinity.
\end{theorem}
\begin{remark} The mains results of this paper are generalizations of those in \cite{ABMP}, where is considered the operator Moisil-Teodorescu
\begin{equation}
D_{MT}:=\textbf{i}\frac{\partial }{\partial x_{1}}+\textbf{j}\frac{\partial }{\partial x_{2}}+\textbf{k}\frac{\partial }{\partial x_{3}}.
\end{equation}
Applying the operator $D_{MT}$ to $\mathbf{h}^{w}:=\mathbf{f}^{w}_{1}\textbf{i}+\mathbf{f}^{w}_{2}\textbf{j}+\mathbf{f}^{w}_{3}\textbf{k}\in C^{1}(\Omega, \mathbb{C}^{3})\cap\mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ we get
\begin{equation}
\begin{split}
D_{MT}[\mathbf{h}^{w}]&=-\text{div}[\mathbf{h}^{w}]+\text{rot}[\mathbf{h}^{w}]\\ &=-\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{1}}-\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{2}}-\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{3}}+\left(\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{2}}-\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{3}}\right)\textbf{i}\\ & +\left(\displaystyle {\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{3}}-\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{1}}}\right)\textbf{j}+\left(\displaystyle {\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{1}}-\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{2}}}\right)\textbf{k}.
\end{split}
\end{equation}
For abbreviation, we let $D_{MT}[\mathbf{h}^{w}]$ stand for
\begin{equation} \label{2}
\begin{split}
D_{MT}[\mathbf{h}^{w}]=\left[D_{MT}[\mathbf{h}^{w}]\right]_{0}+\left[D_{MT}[\mathbf{h}^{w}]\right]_{1}\textbf{i} +\left[D_{MT}[\mathbf{h}^{w}]\right]_{2}\textbf{j}+\left[D_{MT}[\mathbf{h}^{w}]\right]_{3}\textbf{k}.
\end{split}
\end{equation}
On the other hand, setting $\mathbf{f}^{w}:=\mathbf{f}^{w}_{1}\textbf{i}+\mathbf{f}^{w}_{3}\textbf{j}+\mathbf{f}^{w}_{2}\textbf{k}\in C^{1}(\Omega, \mathbb{C}^{3})\cap\mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ it follows that
\begin{equation}
\begin{split}
{^{\psi^{0}}D}[\mathbf{f}^{w}]&=-\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{1}}-\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{2}}-\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{3}}+\left({\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{3}}-\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{2}}}\right)\textbf{i}\\ & +\left(\displaystyle {\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{2}}-\frac{\partial \mathbf{f}^{w}_{2}}{\partial x_{1}}}\right)\textbf{j}+\left(\displaystyle {\frac{\partial \mathbf{f}^{w}_{3}}{\partial x_{1}}-\frac{\partial \mathbf{f}^{w}_{1}}{\partial x_{3}}}\right)\textbf{k}.
\end{split}
\end{equation}
The above expression may be written as
\begin{equation} \label{1}
\begin{split}
{^{\psi^{0}}D}[\mathbf{f}^{w}]=\left[{^{\psi^{0}}D}[\mathbf{f}^{w}]\right]_{0}+\left[{^{\psi^{0}}D}[\mathbf{f}^{w}]\right]_{1}\textbf{i}+\left[{^{\psi^{0}}D}[\mathbf{f}^{w}]\right]_{2}\textbf{j}+\left[{^{\psi^{0}}D}[\mathbf{f}^{w}]\right]_{3}\textbf{k}.
\end{split}
\end{equation}
It is worth noting that under the correspondence $\left(\mathbf{f}^{w}_{1},\,\mathbf{f}^{w}_{2},\,\mathbf{f}^{w}_{3}\right)\, \leftrightarrow \, \left(\mathbf{f}^{w}_{1},\,\mathbf{f}^{w}_{3},\,\mathbf{f}^{w}_{2}\right)$ we can assert that
\begin{equation}\label{equiv}
D_{MT}[\mathbf{h}^{w}]=0\,\Longleftrightarrow \, {}{^{\psi^{0}}D}[\mathbf{f}^{w}]=0,
\end{equation}
which follow from
\begin{align*}
\left[D_{MT}[\mathbf{h}^{w}]\right]_{0} &=\left[{^{\psi^{0}}D}[\mathbf{f}^{w}]\right]_{0} ,\\
\left[D_{MT}[\mathbf{h}^{w}]\right]_{1} & =- \left[{^{\psi^{0}}D}[\mathbf{f}^{w}]\right]_{1},\\
\left[D_{MT}[\mathbf{h}^{w}]\right]_{2} & =-\left[{^{\psi^{0}}D}[\mathbf{f}^{w}\right]_{3}, \\
\left[D_{MT}[\mathbf{h}^{w}]\right]_{3} & =-\left[{^{\psi^{0}}D}[\mathbf{f}^{w}]\right]_{2}.
\end{align*}
\end{remark}
\begin{remark}
In \cite{ABMP} is defined
\begin{equation}
\mathscr{M}^{*}:=\left\{\mathbf{f}: \frac{1}{4\pi}\int_{\Omega_{+}}{\left\langle \text{grad}\;\frac{1}{\abs{t-\xi}}\, ,\,\mathbf{f}(\xi)\right\rangle}\,dm(\xi)=0, \, t\in\Gamma \right\}.
\end{equation}
For $\mathbf{h}:=\mathbf{f_{1}}\textbf{i}+\mathbf{f_{2}}\textbf{j}+\mathbf{f_{3}}\textbf{k} \in \mathscr{M}^{*}$ it is clear that
\begin{equation}
\begin{split}
\frac{1}{4\pi}\int_{\Omega_{+}}{\left\langle \text{grad}\;\frac{1}{\abs{t-\xi}}\, ,\,\mathbf{h}(\xi)\right\rangle}\,dm(\xi)=\int_{\Omega_{+}}{\left\langle\mathscr{K}_{\psi^{0}}(t-\xi)\, ,\,\mathbf{f}(\xi)\right\rangle}\,dm(\xi)=0,
\end{split}
\end{equation}
where $\mathbf{f}:=\mathbf{f}_{1}\textbf{i}+\mathbf{f}_{3}\textbf{j}+\mathbf{f}_{2}\textbf{k} \in \mathscr{M}^{*}_{\psi^{0}}$. Hence
$$\mathbf{h}:=\mathbf{f}_{1}\textbf{i}+\mathbf{f}_{2}\textbf{j}+\mathbf{f}_{3}\textbf{k}\in \mathscr{M}^{*} \iff \mathbf{f}:=\mathbf{f}_{1}\textbf{i}+\mathbf{f}_{3}\textbf{j}+\mathbf{f}_{2}\textbf{k} \in \mathscr{M}^{*}_{\psi^{0}}.$$
\end{remark}
From Theorems \ref{TH1}, \ref{TH2}, \ref{TH3} and the previous remarks the followings corollaries are obtained.
\begin{corollary} \cite[Theorem 2.2]{ABMP}.
Let $\mathbf{f}\in \mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ such that $\displaystyle \mu>\frac{\alpha(\Gamma)}{3}$. Then the reconstruction problem for the div-rot system is solvable if $\text{rot}[\mathbf{f}^{w}]\in\mathscr{M}^{*}$.
\end{corollary}
\begin{corollary} \cite[Theorem 2.3]{ABMP}.
Let $\mathbf{f}\in \mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ such that $\displaystyle \mu>\frac{\alpha(\Gamma)}{3}$ and suppose that $\text{rot}[\mathbf{f}^{w}]\in\mathscr{M}^{*}$. If $\bf{f}$ is the trace of a Laplacian vector field in $\mathbf{Lip}_{\mu}(\Omega_{+}\cup\Gamma,\, \mathbb{C}^{3})$, then
\begin{equation}\label{c31}
\begin{split}
&\frac{1}{4\pi}\int_{\Omega_{+}}{ \text{grad}\;\frac{1}{\abs{t-\xi}}\; \text{div}[\mathbf{f}^{w}]}\,dm(\xi)\\ &=\frac{1}{4\pi}\int_{\Omega_{+}}{\left[ \text{grad}\;\frac{1}{\abs{t-\xi}}\,,\, \text{rot}[\mathbf{f}^{w}]\right] }\,dm(\xi), \quad t\in \Gamma.
\end{split}
\end{equation}
Conversely, if \eqref{c31} is satisfied, then $\bf{f}$ is the trace of a Laplacian vector field in $\mathbf{Lip}_{\nu}(\Omega_{+}\cup\Gamma,\, \mathbb{C}^{3})$ for some $\nu<\mu$.
\end{corollary}
\begin{corollary} \cite[Theorem 2.4]{ABMP}.
Let $\mathbf{f}\in\mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ such that $\displaystyle \mu>\frac{\alpha(\Gamma)}{3}$ and suppose that $\text{rot}[\mathbf{f}^{w}]\in\mathscr{M}^{*}$. If $\bf{f}$ is the trace of a Laplacian vector field in $\mathbf{Lip}_{\mu}(\Omega_{-}\cup\Gamma, \, \mathbb{C}^{3})$ which vanishes at infinity, then
\begin{equation}\label{c42}
\begin{split}
&\frac{1}{4\pi}\int_{\Omega_{+}}{ \text{grad}\;\frac{1}{\abs{t-\xi}}\;\text{div}[\mathbf{f}^{w}]}\,dm(\xi)\\ &-\frac{1}{4\pi}\int_{\Omega_{+}}{\left[ \text{grad}\;\frac{1}{\abs{t-\xi}}\,,\,\text{rot}[\mathbf{f}^{w}]\right] }\,dm(\xi)=-\mathbf{f}(t), \quad t\in \Gamma.
\end{split}
\end{equation}
Conversely, if \eqref{c42} is satisfied, then $\bf{f}$ is the trace of a Laplacian vector field in $\mathbf{Lip}_{\nu}(\Omega_{-}\cup\Gamma,\, \mathbb{C}^{3})$ for some $\nu<\mu$, which vanishes at infinity.
\end{corollary}
| 3,230 | 20,490 |
en
|
train
|
0.97.6
|
\begin{corollary} \cite[Theorem 2.3]{ABMP}.
Let $\mathbf{f}\in \mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ such that $\displaystyle \mu>\frac{\alpha(\Gamma)}{3}$ and suppose that $\text{rot}[\mathbf{f}^{w}]\in\mathscr{M}^{*}$. If $\bf{f}$ is the trace of a Laplacian vector field in $\mathbf{Lip}_{\mu}(\Omega_{+}\cup\Gamma,\, \mathbb{C}^{3})$, then
\begin{equation}\label{c31}
\begin{split}
&\frac{1}{4\pi}\int_{\Omega_{+}}{ \text{grad}\;\frac{1}{\abs{t-\xi}}\; \text{div}[\mathbf{f}^{w}]}\,dm(\xi)\\ &=\frac{1}{4\pi}\int_{\Omega_{+}}{\left[ \text{grad}\;\frac{1}{\abs{t-\xi}}\,,\, \text{rot}[\mathbf{f}^{w}]\right] }\,dm(\xi), \quad t\in \Gamma.
\end{split}
\end{equation}
Conversely, if \eqref{c31} is satisfied, then $\bf{f}$ is the trace of a Laplacian vector field in $\mathbf{Lip}_{\nu}(\Omega_{+}\cup\Gamma,\, \mathbb{C}^{3})$ for some $\nu<\mu$.
\end{corollary}
\begin{corollary} \cite[Theorem 2.4]{ABMP}.
Let $\mathbf{f}\in\mathbf{Lip}_{\mu}(\Gamma,\, \mathbb{C}^{3})$ such that $\displaystyle \mu>\frac{\alpha(\Gamma)}{3}$ and suppose that $\text{rot}[\mathbf{f}^{w}]\in\mathscr{M}^{*}$. If $\bf{f}$ is the trace of a Laplacian vector field in $\mathbf{Lip}_{\mu}(\Omega_{-}\cup\Gamma, \, \mathbb{C}^{3})$ which vanishes at infinity, then
\begin{equation}\label{c42}
\begin{split}
&\frac{1}{4\pi}\int_{\Omega_{+}}{ \text{grad}\;\frac{1}{\abs{t-\xi}}\;\text{div}[\mathbf{f}^{w}]}\,dm(\xi)\\ &-\frac{1}{4\pi}\int_{\Omega_{+}}{\left[ \text{grad}\;\frac{1}{\abs{t-\xi}}\,,\,\text{rot}[\mathbf{f}^{w}]\right] }\,dm(\xi)=-\mathbf{f}(t), \quad t\in \Gamma.
\end{split}
\end{equation}
Conversely, if \eqref{c42} is satisfied, then $\bf{f}$ is the trace of a Laplacian vector field in $\mathbf{Lip}_{\nu}(\Omega_{-}\cup\Gamma,\, \mathbb{C}^{3})$ for some $\nu<\mu$, which vanishes at infinity.
\end{corollary}
\section*{Appendix. Criteria for the generalized Laplacianness of a vector field}
We continue to assume that $\Omega\subset \mathbb{R}^{3}$ is a Jordan domain with a fractal boundary $\Gamma$. Our interest here is to find necessary and sufficient conditions for the generalized Laplacianness of an vector field $\mathbf{F}\in\mathbf{Lip}_{\nu}(\Omega\cup\Gamma,\, \mathbb{C}^{3})$ in terms of its boundary value $\mathbf {f}:=\left.\mathbf {F}\right|_\Gamma$.
The inspiration for the following definition is that in \cite[Definition 2.1]{ARBR}.
\begin{defi} \label{dtc1}
Let $\Omega$ a Jordan domain with fractal boundary $\Gamma$. Then we define the Cauchy transform of $\mathbf{f}\in \mathbf{Lip}_{\mu}(\Gamma,\,\mathbb{C}^{3})$ by
\begin{equation}\label{tc2}
K_{\Gamma}^{*}[\mathbf{f}](x):=-{^{\psi^{\theta}}}T\left[{^{\psi^{\theta}}}D[\mathbf{f}^{w}]\right](x)+ \mathbf{f}^{w}(x), \quad x\in \mathbb{R}^{3}\setminus\Gamma.
\end{equation}
\end{defi}
Under condition $\displaystyle \frac{\alpha(\Gamma)}{3}<\mu \leq 1$ the Cauchy transform $K_{\Gamma}^{*}[\mathbf{f}]$ has continuous extension to $\Omega\cup\Gamma$ for every vector field $\mathbf {f}\in \mathbf{Lip}_{\mu}(\Gamma,\,\mathbb{C}^{3})$ (take a fresh look at Theorem \ref{thm 6}). On the other hand, using the properties of the Theodorescu operator (see \cite{KVS}, p. 73) we obtain that $K_{\Gamma}^{*}[\bf{f}]$ is left-$\psi^{\theta}$-hyperholomorphic in $\mathbb{R}^{3}\setminus\Gamma$. Note that $K_{\Gamma}^{*}[\mathbf{f}](x)$ vanishes at infinity.
Let us introduce the following fractal version of the Cauchy singular integral operator
\begin{equation*}
\mathcal{S}_{\Gamma}^{*}[\mathbf{f}](x):=2K^{*}_{\Gamma}[\mathbf{f}]^{+}(x)-f(x), \quad x\in\Gamma.
\end{equation*}
Here and subsequently, $K^{*}_{\Gamma}[\mathbf{f}]^{+}$ denotes the trace on $\Gamma$ of the continuous extension of $K^{*}_{\Gamma}[\mathbf{f}]$ to $\Omega\cup\Gamma$.
Let us now establish and prove the main result of this appendix, which gives necessary and sufficient conditions for the generalized Laplacianness of a vector field in terms of its boundary value.
\begin{theorem}
Let $\mathbf{F}\in\mathbf{Lip}_{\mu}(\Omega\cup\Gamma,\mathbb{C}^{3})$ with trace $\mathbf{f}=\left.\mathbf{F}\right|_{\Gamma}$. Then the following sentences are equivalent:
\begin{itemize}
\item [(i)] $\mathbf{F}$ is a generalized Laplacian vector field.
\item [(ii)] $\mathbf{F}$ is harmonic in $\Omega$ and $\mathcal{S}_{\Gamma}^{*}[\mathbf{f}]=\mathbf{f}$.
\end{itemize}
\begin{proof} Let $\mathbf{F}^{w}$ be the Whitney extension of $\mathbf{F}$ in $\mathbf{Lip}_{\mu}(\Omega\cup\Gamma,\mathbb{C}^{3})$. Suppose that $\mathbf{F}$ is a generalized Laplacian vector field in $\Omega$. Since ${^{\psi^{\theta}}}D[\mathbf{F}]=0$ in $\Omega$, it follows that $\mathbf{F}$ is harmonic. Also $\mathbf{F}^{w}$ is a Whitney extension of $\mathbf{f},$ i.e. $\mathbf{f}=\left.\mathbf{F}^{w}\right|_{\Gamma}$.
According to Definition \ref{dtc1}, with $\mathbf{f}^{w}$ replaced by $\mathbf{F}^{w}$, we get
\begin{equation*}
K_{\Gamma}^{*}[\mathbf{f}](x)=-\int_{\Omega}{\mathscr{K}_{\psi^{\theta}}(x-\xi)\, {{^{\psi^{\theta}}}D}[\mathbf{F}^{w}](\xi) }\,dm(\xi)+ \mathbf{F}^{w}(x)=\mathbf{F}(x), \quad x\in\Omega,
\end{equation*}
which imply that $K_{\Gamma}^{*}[\mathbf{f}]^{+}=\mathbf{f}$ and $\mathcal{S}_{\Gamma}^{*}[\mathbf{f}]=\mathbf{f}$.
Conversely, assume that $(ii)$ holds and define
\begin{equation}
\Psi(x):= \left\{
\begin{array}{ll}
K_{\Gamma}^{*}[\mathbf{f}](x), & x \in\Omega, \\
\mathbf{f}(x), & x \in \Gamma.
\end{array}
\right.
\end{equation}
Note that $\Psi(x)$ is left-$\psi^{\theta}$-hyperholomorphic function, hence harmonic in $\Omega$. Since $\mathcal{S}_{\Gamma}^{*}[\mathbf{f}]=\mathbf{f}$ in $\Gamma$, it follows that $K_{\Gamma}^{*}[\mathbf{f}]^{+}=\mathbf{f}$. Therefore $K_{\Gamma}^{*}[\mathbf{f}]$ is also continuous on $\Omega\cup\Gamma$.
As $\mathbf{F}-\Psi$ is harmonic in $\Omega$ and $\left.(\mathbf{F}-\Psi)\right|_{\Gamma}=0$ we have that $\mathbf{F}(x)=K_{\Gamma}^{*}[\mathbf{f}](x)$ for all $x \in\Omega,$ which follows from the harmonic maximum principle. Lemma \ref{two-sided} now forces $\mathbf{F}$ to be a generalized Laplacian vector field in $\Omega,$ and the proof is complete.
\end{proof}
| 2,213 | 20,490 |
en
|
train
|
0.97.7
|
\end{theorem}
\end{document}
| 16 | 20,490 |
en
|
train
|
0.98.0
|
\begin{document}
\title{The role of coherence on two-particle quantum walks}
\author{Li-Hua Lu$^1$, Shan Zhu and You-Quan Li$^{1,2}$}
\affiliation{1. Zhejiang Institute of Modern Physics and Department of Physics,\\ Zhejiang
University, Hangzhou 310027, P. R. China\\
2. Collaborative Innovation Center of Advanced Microstructures, Nanjing, P. R. China
}
\begin{abstract}
We investigate the dynamical properties of the two-bosons quantum walk in system with different degrees of coherence, where the effect of the coherence on the two-bosons quantum walk can be naturally introduced. A general analytical expression of the two-bosons correlation function for both pure states and mixed states is given. We propose a possible two-photon quantum-walk scheme with a mixed initial state and find that the two-photon correlation function and the average distance between two photons can be influenced by either the initial photon distribution, or the relative phase, or the degree of coherence. The propagation features of our numerical results can be explained by our analytical two-photon correlation function.
Keywords: two-particle quantum walk, degree of coherence, two-photon correlation function, pure state, mixed state
\end{abstract}
\pacs{03.67.Ac, 03.67.Lx, 05.40.Fb, 05.90.+m}
\received{\today}
\maketitle
\section{Introduction}
As the quantum mechanical counterparts of the classical random walk~\cite{Ahar},
the quantum random walk has been increasingly receiving attentions
because of their potential applications range from quantum information to simulation of physical phenomena. For example, the quantum walk offers an advanced tool for building quantum algorithm~\cite{Moh,Shen,Sal}
that is shown to be of the primitive for universal quantum computations~\cite{und,Lov,chi,and}.
We know that the quantum walk include two main classes that are discrete-time quantum walk and continuous-time quantum walk~\cite{Jwa,Far}.
The continuous-time quantum walk can evolve continuously with time through tunneling between neighbors sites and does not require quantum coin to generate superposition of states.
This means the continuous-time quantum walk can be implemented
via a constant tunneling of quantum particles in several possible lattice sites.
So far, the quantum walks of single particles have been studied in experiments by using either classical waves~\cite{peret}, single photons~\cite{Sch,broo}, or single atoms~\cite{Kars,Wei}.
Additionally, quantum walks of two correlated photons trapped in waveguide lattices
were also studied in experiments~\cite{Yar,Alberto}.
Note that many-particle quantum walks can exhibit more fascinating quantum features in contrast to single-particle quantum walks. The reason is that single-particle quantum walks can be exactly mapped to classical wave phenomena~\cite{Knight} but for quantum walks of more than one indistinguishable particle, the classical theory can not provide sufficient descriptions. In Ref.~\cite{omar} , the authors theoretically studied the discrete-time quantum walk of two particles and demonstrated the distinctly nonclassical correlations. Meanwhile, the effect of interactions between particles on quantum walk of two indistinguishable particles was theoretically studied in Ref.~\cite{lahini,Qin}, where the system was assumed to be completely coherent and the influence of the degree of coherence was not studied. We know that except for the interaction between particles, the other factors, {\it e.g.}, the initial states, the quantum-walk parameters and the degree of coherence of the system, can also affect the features of two-particle quantum walks. Especially, we know that the major challenge to experimentally realize quantum walks of correlated particles is to find a low-decoherence system that can preserves the nonclassical features of quantum walks~\cite{Alberto}, which implies that the influence of the degree of coherence of the system on two-particle quantum walks is important. Then it is worthwhile to investigate the properties of two-particle quantum walks with attention to different degrees of coherence since the decoherence effects in quantum walks have potential algorithmic applications~\cite{kendon}.
In this paper, we propose a density matrix formulism to study the two-particle quantum walk
where the degrees of coherence can be naturally introduced.
With the help of Heisenberg equation of motion, we derive a general analytical expression
of the two-particle correlation function.
As a concrete example, we propose a possible two-photon quantum-walk scheme with a mixed initial state to exhibit the quantum features of the two-particle quantum walk
via the two-particle correlation and the average distance between the two particles.
Our result exhibits that the propagation of the two particles depends
not only on the initial distribution of the two particles
but also on the relative phase and the degree of coherence of the system.
Such a propagation feature can be
explained by our analytical two-particle correlation function. In the next section, we present the model and derive the analytical expression of the two-particle correlation. In Sec.~\ref{sec:two-photon}, we propose a concrete scheme to show some dynamical features of two-particle quantum walks. Our main conclusions are summarized in Sec.~\ref{sec:conc}.
\section{A general formulation }
We consider a two-particle quantum walk in a one-dimensional lattice space.
The propagation of the two particles is described by the evolution of the state
of a tight-binding model,
\begin{equation}
H=-\sum_{q}T_{q,q+1}(\hat{a}_q^\dagger\hat{a}_{q+1} + \mathrm{h.c.})
+\sum_q \beta_q\hat{a}_q^\dagger\hat{a}_q,
\end{equation}
where the operators $a_{q}^\dag$ and $a_{q}$ create and annihilate a bosonic particle at site $q$, respectively.
Here the parameter $T_{q,q+1}$ refers to the tunneling strength of particles between the nearest neighbor sites, and
\begin{equation}
\beta_q=T_{q+1,q}+T_{q-1,q}.
\end{equation}
Note that the above form of $\beta_q$ was picked to keep the probability conservation in the proposal of continuous-time quantum walk via decision tree~\cite{Far} . Now more generally, the value of $\beta_q$ can be arbitrary due to the probability conservation is naturally satisfied in quantum mechanics.
If the tunneling strength $T_{q,q+1}$ is a constant, $\beta_q$ will become a constant for the periodical boundary condition. In this case, the value of $\beta_q$ does not affect the dynamical properties of the quantum-walk system. Whereas, for the open boundary condition, the values of $\beta_q$ for the two boundary sites are different from that for the other sites. This can naturally introduce two defects to the quantum-walk system. Note that the effect of defects on single-particle quantum walks was studied in Ref.~\cite{Li}.
Since we consider a two-bosons system,
the Fock bases describing the system are
\begin{equation}
\label{eq:fockstate}
\ket{1}_q\ket{1}_r=\frac{1}{\sqrt{1+\delta_{q,r}}}\hat{a}_q^\dagger\hat{a}_r^\dagger\ket{\mathrm{vac}},
\end{equation}
where $\delta_{q,r}$ denotes the Kronecker delta.
Equation (\ref{eq:fockstate}) represents a two-particle state with one
on the $q$th site and the other one on the $r$th site.
Note that $\ket{1}_q\ket{1}_r$ is regarded as identical to $\ket{1}_r\ket{1}_q$
for indistinguishable particles that we considered.
Meanwhile, the two particles can be in the same site ({\it i.e.}, $q=r$)
for bosonic particle that we considered.
Thus the Hilbert space expanded by the aforementioned Fock bases is of $D=L(L+1)/2$ dimension.
Here $L$ denotes the number of the sites.
We know that the propagation of the two particles is determined not only by the property of the waveguide lattice but also by the two-particle input state.
If the two particles are in a pure state at the initial time,
the two-particle input state can be expressed as a wavefunction, namely, a coherent superposition of the Fock bases,
\begin{equation}\label{eq:purstates}
\ket{\psi}=\sum_{q,r}c_{q,r}\ket{1}_q\ket{1}_r,
\end{equation}
where $\sum_{q,r}|c_{q,r}|^2=1$.
However, if the two particles are in a mixed state at the initial time,
the two-particle input state needs to be described by a density matrix
rather than wavefunction.
Such a density matrix is given by
\begin{equation}
\label{eq:densitymatix}
\rho=\sum_{qr,q'r'}\rho_{qr,q'r'}\bigl(\ket{1}_q\ket{1}_r\bigr)\bigl(\bra{1}_{q'}\bra{1}_{r'}\bigr),
\end{equation}
which is a $D\times D$ matrix.
We know that $\textrm{Tr}\rho^2\leq(\textrm{Tr}\rho)^2$
where the equal sign holds only for pure states.
In the following,
we will focus on the two-particle quantum walk for the mixed input states.
Now we are in the position to study the propagation of the two particles
with the help of Heisenberg equation of motion for the creation operators,
namely,
\begin{equation}
\label{eq:dye}
i\frac{\partial \hat{a}_q^\dagger}{\partial t}=\beta_q\hat{a}_q^\dagger+T_{q,q+1}\hat{a}_{q+1}^\dagger
+T_{q,q-1}\hat{a}_{q-1}^\dagger,
\end{equation}
where we set $\hbar=1$ for simplicity in calculation.
The creation operator $\hat{a}_q^\dagger$ at any time can be obtained with the help of Eq.~(\ref{eq:dye}),
\begin{equation}
\label{eq:creation}
\hat{a}_q^\dagger(t)=\sum_r U_{q,r}(t)\hat{a}_r^\dagger(0),\quad
U(t)=e^{-iHt},
\end{equation}
where $U_{q,r}(t)$ is the probability amplitude of a single particle transiting
from the $q$th waveguide to the $r$th one.
To exhibit the quantum behaviors of the two-particle quantum walk,
let us firstly evaluate the two-particle correlation function $\Gamma_{k,l}(t)=\ave{\hat{a}_k^\dagger(t)\hat{a}_l^\dagger(t)\hat{a}_l(t)\hat{a}_k(t)}$
which manifests the probability that the two particles are coincident in the $k$th and the $l$th waveguide~\cite{Yar,Mattle}.
Since the two-particle input state can be described by the density matrix given
in Eq.~(\ref{eq:densitymatix}), the expectation value of any observable of the system
can be calculated via $\ave{\hat{O}(t)}= \textrm{Tr}(\hat{O}(t)\rho)$.
Then we obtain an expression of
two-particle correlation function
\begin{widetext}
\begin{eqnarray}\label{eq:twocorre}
&&\Gamma_{k,l}(t)=\sum_{q\neq r,q'\neq r'}\rho_{qr,q'r'}\Bigl(U_{kq'}U_{lr'}U_{lq}^*U_{kr}^*+U_{kq'}U_{lr'}U_{lr}^*U_{kq}^*
+U_{kr'}U_{lq'}U_{lq}^*U_{kr}^*+U_{kr'}U_{lq'}U_{lr}^*U_{kq}\Bigr)
\nonumber\\
&&+\sum_{q,q'\neq r'}\sqrt{2}\rho_{qq,q'r'}\Bigl(U_{kq'}U_{lr'}U_{lq}^*U_{kq}^*+U_{kr'}U_{lq'}U_{lq}^*U_{kq}^*\Bigr)
+\sum_{q\neq r,q'}\sqrt{2}\rho_{qr,q'q'}\Bigl(U_{kq'}U_{lq'}U_{lq}^*U_{kr}^*+U_{kq'}U_{lq'}U_{lr}^*U_{kq}^*\Bigr)
\nonumber\\
&&+\sum_{q,q'}2\rho_{qq,q'q'}U_{kq'}U_{lq'}U_{lq}^*U_{kq}^*,
\end{eqnarray}
\end{widetext}
which presents a general form for either pure initial input states or mixed ones.
One can obtain the two-particle correlation at any time
as long as the density matrix corresponding to the input state is given. with the help of such an expression of two-particle correlation function, many dynamical features of two-particle quantum walk can be explained.
| 3,271 | 7,189 |
en
|
train
|
0.98.1
|
\section{Two-photon quantum walk for a concrete input state}\label{sec:two-photon}
In order to expose the quantum properties of two-particle quantum walks more clearly, we turn to a concrete example where the particles are assumed to be photons. We know that each beam can become two coherent beams after propagating through a grating~\cite{sza} and the pure two-photon input states can be experimentally realized via injecting two coherent beams into waveguide lattice~\cite{Yar}. Then we suppose that there are two incoherent light beams and the relation of their intensity is $\cos^2{\delta}$:$\sin^2{\delta}$. The two incoherent beams propagate through two gratings, respectively, and then simultaneously inject into the waveguide arrays. The two incoherent beams will create two pure two-photon states $\psi_1$ and $\psi_2$, respectively~\cite{Yar,sza}. Because the initial two beams are not coherent, the initial state of the system needs to be described by the density matrix
\begin{equation}
\rho=\cos^2\delta \ket{\psi_1}\bra{\psi_1}
+\sin^2\delta \ket{\psi_2}\bra{\psi_2}.
\end{equation}
As an example, we take $\psi_1=\cos{\frac{\theta}{2}}\ket{2}_1+\sin{\frac{\theta}{2}}e^{i\phi}\ket{2}_0$
and $\psi_2=\ket{2}_1$, then the initial density matrix is
\begin{align}
\label{eq:exrho}
\rho = \rho_{00,11}\ket{2}_0\bra{2}_1 + \rho_{11,00}\ket{2}_1\bra{2}_0
+ \rho_{11,11}\ket{2}_1\bra{2}_1 + \rho_{00,00}\ket{2}_0\bra{2}_0
\end{align}
with $\rho_{00,11}=\cos^2\delta\cos{\frac{\theta}{2}}\sin{\frac{\theta}{2}}e^{i\phi}$,
$\rho_{11,00}=\rho_{00,11}^*$,
$\rho_{11,11}=\cos^2\delta\cos^2{\frac{\theta}{2}}+\sin^2\delta$,
and
$\rho_{00,00}=\cos^2\delta\sin^2{\frac{\theta}{2}}$.
Here $\ket{2}_q$ stands for
$\ket{1}_q\ket{1}_q$ whose definition has been given
in Eq.~(\ref{eq:fockstate}). We can find that the other matrix elements of $\rho$
are zeros except for the above four elements. Since the above four elements can be changed via $\delta$, $\theta$ and $\phi$,
without losing the generality, we redefine them as
$\rho_{00,00}=\alpha$, $\rho_{11,11}=1-\alpha$ and $\rho_{00,11}=\rho_{11,00}^*=e^{i\phi}\sqrt{\eta-1-4\alpha^2+4\alpha}/2$ with $0\leq\alpha\leq 1$. Here the parameter $\eta=2\textrm{Tr}(\rho^2)-1$ $(0\leq\eta\leq1)$ is introduced to characterize the degree of coherence~\cite{lhlu}.
We have $\eta=1$ when the system is in a pure state, otherwise $\eta<1$. With the help of Eq.~(\ref{eq:twocorre}),
the two-photon correlation of the system for the mixed state shown in Eq.~(\ref{eq:exrho}) yields
\begin{align}
\label{eq:extwocre}
\Gamma_{q,r} = 2\gamma\textrm{Re}(e^{i\phi}U_{q0}U_{r0}U^*_{r1}U^*_{q1})
+2\alpha |U_{r0}U_{q0}|^2+2(1-\alpha)|U_{r1}U_{q1}|^2,
\end{align}
where $\gamma=\sqrt{\eta-1+4\alpha(1-\alpha)}$.
This implies that the two-photon correlation depends not only on the initial probability distribution of the two photons but also on the degree of coherence and the relative phase of the system at the initial time. The first term in Eq.~(\ref{eq:extwocre}) is a coherent one that reveals well the quantum nature of two-photon quantum walk. Taking a pure initial state ({\it i.e.}, $\eta=1$) as an example, the two-photon correlation function~(\ref{eq:extwocre}) becomes $\Gamma=|\sqrt{2\alpha}e^{i\phi}U_{r0}U_{q0}+\sqrt{2(1-\alpha)}U_{r1}U_{q1}|^2$, which implies that the two-photon correlation can take place when the two photons from the 0th site propagate to the $q$th and the $r$th sites, respectively, or when the two photons from the 1th site to the $q$th and the $r$th sites, respectively. Due to the two photons are indistinguishable, the two paths can interfere, which is essentially the Hanbury Brown Twiss(HBT) interference~\cite{Han}.
Now we investigate the quantum features of a two-photon quantum walk
by considering a waveguide arrays consisting of (2$l$+1) identical waveguides.
In this case, the tunneling strengths between nearest-neighbor arrays are all the same, {\it i.e.}, $T_{q,r}=C$ with $C$ being a constant, and $\beta_q$ becomes a constant $2C$ for the periodical boundary condition we considered.
Then $U_{q,r}(t)$ becomes $e^{i2Ct}i^{q-r}J_{q-r}(2Ct)$
where $J_q$ is the $q$th order Bessel function~\cite{Led,Yariv}.
With the help of Eq.~(\ref{eq:extwocre}),
we can write out the two-photon correlation function in terms of Bessel functions.
\begin{widetext}
\begin{align}
\label{eq:twocrebf}
\Gamma_{q,r}(\tau) =-2\gamma\cos{\phi} J_q(\tau)J_r(\tau)J_{r-1}(\tau)J_{q-1}(\tau)
+2\alpha [ J_q(\tau) J_r(\tau) ]^2
+2(1-\alpha)[ J_{r-1}(\tau) J_{q-1}(\tau) ]^2.
\end{align}
\end{widetext}
where $\tau=2Ct$.
\begin{figure}
\caption{(Color online) Two-photon correlation at time $t=4(1/C)$ for different initial conditions. The initial condition is (a) $\alpha=1$, $\eta=1$, $\phi=0$ (b) $\alpha=0.5$, $\eta=0$, $\phi=0$
(c) $\alpha=0.5$, $\eta=0.5$, $\phi=0$ (d) $\alpha=0.5$, $\eta=1$, $\phi=0$ (e) $\alpha=0.5$, $\eta=0.5$, $\phi=\pi$, and (f) $\alpha=0.5$, $\eta=1$, $\phi=\pi$. }
\label{fig:tcou}
\end{figure}
In Fig.~\ref{fig:tcou},
we plotted the two-photon correlation matrix at time $t=4({1}/{C})$ for different initial conditions, where $1/C$ is the unit of time ({\it i.e.}, the time $t$ is in the unit of the inverse of tunneling strength). We can see that each particle can be found on either side of origin after propagation, which is reflected in the four symmetric peaks in Fig.~\ref{fig:tcou} (a). For this case ({\it i.e.}, $\alpha=1$ and $\eta=1$), the system is in a pure state and Eq.~(\ref{eq:extwocre}) becomes $\Gamma_{q,r}=2|U_{r0}U_{q0}|^2$ which is the same as the result in Ref.~\cite{Yar}. Such a correlation function is just a product of the two classical probability distribution, so there is no interference and the photons propagate in the ballistic direction. From Fig.~\ref{fig:tcou} (b), we can find that just like Fig.~\ref{fig:tcou} (a), the two photons also favor to localize at the four corners of the correlation map. The reason is that there is no interference because the system is completely incoherent, which is confirmed by that the coherent term in Eq.~(\ref{eq:extwocre}) vanishes in the case of $\eta=0$. Whereas, with the increase of the degree of coherence, the coherent term emerges in the two-photon correlation function $\Gamma_{q,r}$, so the $\Gamma_{q,r}$ exhibits the properties of interference. Due to the existence of the Hanbury Brown-Twiss (HBT) interference, two local maximums emerge in the off-diagonal regions of the correlation matrix (see Fig.~\ref{fig:tcou} (c) and (d)). That implies that the two photons favor to far from each other which is in contrast to the case of Fig.~\ref{fig:tcou} (e) and (f) where except the initial relative phase $\phi$, the other parameters are the same as those in Fig.~\ref{fig:tcou} (c) and (d), respectively. This is reasonable because the coherent term in Eq.~(\ref{eq:twocrebf}) is in proportion to $\cos{\phi}$ for the periodical waveguide lattice we considered.
Additionally, comparing the values of the maximums in Fig.~\ref{fig:tcou} (c) and (d),
it is easy to find
that the larger the degree of coherence is,
the more distinct the interference effect of the system will be.
\begin{figure}
\caption{(Color online) The time evolution of the distance between two photons for different initial conditions. The parameters are $\alpha=0.5$, and $\eta=1$ (left panel), $\eta=0.5$ (right panel).}
\label{fig:distanceevo}
\end{figure}
\begin{figure}
\caption{(Color online) The dependence of the distance between two photons at time $t=4(1/C)$ on the degree of coherence (left panel) and the initial relative phase (right panel). The parameter is $\alpha=0.5$.}
\label{fig:distance}
\end{figure}
To exhibit the propagation properties of the two photons,
we also calculate the average distance between the two photons,
\begin{eqnarray}\label{eq:distance}
\displaystyle d&=&-2\gamma \cos{\phi}\sum_{q>r}(q-r)J_q(\tau)J_r(\tau)J_{r-1}(\tau)J_{q-1}(\tau)\nonumber\\
&&+\sum_{q>r}(q-r)\Bigl(2\alpha [ J_q(\tau) J_r(\tau) ]^2
+2(1-\alpha)[ J_{r-1}(\tau) J_{q-1}(\tau) ]^2\Bigr).
\end{eqnarray}
Here the first term is a coherence one that is affected by the degree of coherence of the system due to $\gamma=\sqrt{\eta-1+4\alpha(1-\alpha)}$.
We plot the time evolution of the distance between the two photons in Fig.~\ref{fig:distanceevo},
we can see that the distance between two photons is affected not only by the relative phase but also by the degree of coherence.
In Fig.~\ref{fig:distance}, we plot the dependence of the
distance $d$ at time $t=4(1/C)$ on the degree of coherence and the initial relative phase.
From the left panel of this figure, we can see that the distance between two photons becomes larger with the increase of the degree of coherence when $\phi=0$, which is contrast to the case of $\phi=\pi$.
The reason for this phenomenon is that the HBT interference makes the two photons far from each when $0\leq\phi<\pi/2$,
which can be confirmed by Fig.~\ref{fig:tcou} (c) and (d) where there are two maximums in the off-diagonal regions.
Therefore, the distance between the two photons becomes larger with the increase of the degree of coherence due to the fact that the increase of the degree of coherence makes the interference effect more significant.
Whereas, the case of $\pi/2\leq\phi<\pi$ is in contrast to that of $0\leq\phi<\pi/2$ because the two photons favor to stay together when $\pi/2\leq\phi<\pi$, which can be confirmed by Fig.~\ref{fig:tcou} (e) and (f).
Note that when $\phi=\pi/2$, the interference term in the two-photon correlation function becomes zeros, so the degree of coherence does not affect the distance between two particles (see the dot-symbol line in the left panel of Fig.~\ref{fig:distance}).
The right panel of Fig.~\ref{fig:distance} exhibits that
the relative phase of the system at the initial time can affect the distance between two photons in the case of $\eta>0$ but such an effect vanishes in the case of $\eta=0$.
That is reasonable because the distance between two photons is in proportion to $\cos{\phi}\sqrt{\eta-1-4\alpha^2+4\alpha}$ which can be found in Eq.~(\ref{eq:distance}). Additionally, we calculate the von Neumann entropy to show the evolution of the entanglement of the system. We split the system into two halves, $L$ and $R$, in the center of the system, and build the reduced density matrix $\rho_L$ of the subsystem $L$ at any time~\cite{Schach}. Then we can calculate the von Neumann entropy of $\rho_L$ as
\begin{equation}
S=-\sum_i\lambda_i\log_2\lambda_i,
\end{equation}
where $\lambda_i$ are the non-zero eigenvalues of the matrix $\rho_L$. In Fig.~\ref{fig:von}, we plot the time evolution of the von Neumann entropy of the left half of the system for different initial conditions.
\begin{figure}
\caption{(Color online) The time evolution of the von Neumann entropy of the set of sites on the left part of the system. The parameters are $\eta=1$, $\phi=0$, and $L=15$.}
\label{fig:von}
\end{figure}
\section{conclusion}\label{sec:conc}
We proposed a density matrix formulism to study the properties of two-particle quantum walks where the effect of coherence was introduced naturally. We gave the general analytical expression of the two-particle correlation function which is correct for systems in both mixed states and pure states. We suggested a possible two-photon scheme to exhibit the more fascinating quantum features of two-particle random walks with mixed initial states. For such a concrete scheme, we calculated the two-photon correlation and the average distance between the two photons. The corresponding results manifested that the propagation of the two photons depends not only on the initial distribution of the two photons but also on the relative phase and the degree of coherence of the system. Such propagation features of the two photons were explained with the help of the analytical expression of the two-particle correlation function we obtained.
The work is supported by the NBRP of China (2014CB921201), the NSFC (11104244 and 11274272, 11434008), and by the Fundamental Research Funds for Central Universities.
\end{document}
| 3,918 | 7,189 |
en
|
train
|
0.99.0
|
\begin{equation}gin{itemize}n{document}
\title{\bf A Posteriori Error Estimates for Self-Similar Solutions to the Euler
Equations
}
\vskip 4emip 2emkip 1em
\author{
Alberto Bressan and Wen Shen \\ \, \\
Department of Mathematics, Penn State University.\\
University Park, PA~16802, USA.\\
\\ e-mails:[email protected], [email protected]}
\date{Dec 15, 2019}
\maketitle
\begin{equation}gin{itemize}n{abstract}
The main goal of this paper is to analyze a family of ``simplest possible" initial data for which,
as shown by numerical simulations,
the incompressible Euler equations have multiple solutions.
We take here a first step toward a rigorous validation of these numerical results.
Namely, we consider the system of equations corresponding to a self-similar solution,
restricted to a bounded domain with smooth boundary.
Given an approximate solution
obtained via a finite dimensional Galerkin method,
we establish a posteriori error bounds on the distance between the numerical
approximation and the exact solution having the same boundary data.
\epsilonnd{abstract}
\vskip 4emip 2emkip 1em
| 342 | 27,931 |
en
|
train
|
0.99.1
|
\section{Introduction}
\setcounter{equation}{0}
The flow of a homogeneous, incompressible, non-viscous fluid
in ${\mathbb R}^2$ is modeled by the
Euler equations
\begin{equation}l{E}
\left\{\begin{equation}gin{itemize}n{array}{rll}
u_t +(u\centerlinedot\noindentabla) u&=~-\noindentabla p &\qquad \textrm{(balance of momentum),}\\
\textrm{div}\, u&= ~0 & \qquad\textrm{(incompressibility
condition).}
\epsilonnd{array}\right.
\epsilonnd{equation}
Here $u=u(t,x)$ denotes the velocity of the fluid, while the scalar function
$p$ is a pressure.
The condition $\hbox{div } u=0$ implies the existence of a stream
function $\psi$ such that
\begin{equation}l{psi}
u~=~\noindentabla^\perp\psi,\qquad\qquad (u_1,u_2)~=~(-\psi_{x_2}, \, \psi_{x_1}).\epsiloneq
Denoting by $\omega = \centerlineurl u = (- u_{1, x_2}+ u_{2, x_1})$
the vorticity of the fluid,
it is well known that
the Euler equations (\ref{E}) can be reformulated
in terms of the system
\begin{equation}l{E2}
\left\{\begin{equation}gin{itemize}n{array}{rl} \omega_t + \noindentabla^\perp \psi\centerlinedot\noindentabla\omega
&=~0,\\[3mm]
{\mathcal D}elta\psi&=~\omega.\epsilonnda\right.\epsiloneq
The velocity $u$ is then recovered from the vorticity
by the Biot-Savart formula
\begin{equation}l{BS}u(x)~=~{1\overlineer 2\pi}\itemnt_{{\mathbb R}^2} {(x-y)^\perp\overlineer|x-y|^2}
\,\omega(y)\, dy.
\epsiloneq
Our eventual goal is to construct ``simplest possible" initial data
for which the equations (\ref{E}) have multiple solutions. Numerical
simulations, shown in Figures~\ref{f:EC1spi} and \ref{f:EC2spi},
indicate that two distinct solutions can be achieved
for initial data where the vorticity
\begin{equation}l{om0}\overline \omega(x)~=~ \omega(0,x)~=~\centerlineurl u(0,x)\epsiloneq has the form
\begin{equation}l{ssv}
\overline \omega(x)~=~r^{-{1/\mu}} \,\overline {\cal O}mega(\theta),
\qquad\qquad x= (x_1, x_2) = (r\centerlineos \theta, \,r\sin\theta).\epsiloneq
Here ${1\overlineer 2}<\mu<+\itemnfty$, while $\overline{\cal O}mega\itemn {\mathcal C}^\itemnfty({\mathbb R})$ is a non-negative, smooth, periodic function
which satisfies
\begin{equation}l{ovo}
\overline{\cal O}mega(\theta) ~=~\overline{\cal O}mega(\pi+\theta),\qquad\qquad
\overline{\cal O}mega(\theta)~=~0\quad\hbox{if}~~\theta\itemn \left[{\pi\overlineer 4}\,,\, \pi\right].\epsiloneq
As shown in Figure~\ref{f:e51}, left, the initial vorticity $\overline\omega$ is supported on two wedges,
and becomes arbitrarily large as $|x|\to 0$.
\begin{equation}gin{itemize}n{figure}[htbp]
\centerlineentering
\itemncludegraphics[scale=0.38]{e51}
\centerlineaption{\small The supports of the initial vorticity considered in (\ref{ssv})-(\ref{ID2}).}
\bigl\langlebel{f:e51}
\epsilonnd{figure}
One can approximate $\overline\omega$ by two families of initial data
$\overline \omega_\vskip 4emip 2emkip 1emarepsilon, \overline\omega_\vskip 4emip 2emkip 1emarepsilon^\dagger\itemn {\bf L}^\itemnfty({\mathbb R}^2)$, taking
\begin{equation}l{ID2}\overline \omega_\vskip 4emip 2emkip 1emarepsilon(x)~=~\left\{\begin{equation}gin{itemize}n{array}{cl} \omega(x)\quad&\hbox{if} ~~|x|>\vskip 4emip 2emkip 1emarepsilon,\centerliner
\vskip 4emip 2emkip 1emarepsilon^{-1/\mu}\quad&\hbox{if} ~~|x|\leq\vskip 4emip 2emkip 1emarepsilon,\epsilonnda\right.\qquad\qquad
\overline \omega^\dagger_\vskip 4emip 2emkip 1emarepsilon(x)~=~\left\{\begin{equation}gin{itemize}n{array}{cl} \omega(x)\quad&\hbox{if} ~~|x|>\vskip 4emip 2emkip 1emarepsilon,\centerliner
0\quad&\hbox{if} ~~|x|\leq\vskip 4emip 2emkip 1emarepsilon.\epsilonnda\right.
\epsiloneq
As $\vskip 4emip 2emkip 1emarepsilon\to 0$, we have $\overline \omega_\vskip 4emip 2emkip 1emarepsilon, \overline\omega_\vskip 4emip 2emkip 1emarepsilon^\dagger \to \overline\omega$
in ${\bf L}^p_{loc}$, for a suitable $p$ depending on the parameter $\mu$ in (\ref{ssv}).
By Yudovich's theorem \centerlineite{Y}, for every $\vskip 4emip 2emkip 1emarepsilon>0$ these initial data yield a unique solution. However, the numerical simulations indicate that, as $\vskip 4emip 2emkip 1emarepsilon\to 0$,
two distinct limit solutions are obtained. In the first solution, shown in Figure~\ref{f:EC1spi}, both wedges wind up together in a single spiral. In the second solution, shown in Figure~\ref{f:EC2spi},
each wedge curls up on itself, and two distinct spirals are observed.
\begin{equation}gin{itemize}n{remark} {\rm
The fact that all approximate solutions $\omega_\vskip 4emip 2emkip 1emarepsilon, \omega_\vskip 4emip 2emkip 1emarepsilon^\dagger$ are uniquely determined by their initial data
implies that the ill-posedness exhibited by this example is ``uncurable". Namely, there is no way to
select one solution with initial datum as in (\ref{ssv})-(\ref{ovo}), preserving the continuous dependence on initial data. }
\epsilonnd{remark}
\begin{equation}gin{itemize}n{figure}[htbp]
\centerlineentering
\itemncludegraphics[scale=0.8]{EC1spi.pdf}
\centerlineaption{\small
The vorticity distribution at time $t=1$, for a solution to (\ref{E2})
with initial vorticity $\overline \omega_\vskip 4emip 2emkip 1emarepsilon$.}
\bigl\langlebel{f:EC1spi}
\epsilonnd{figure}
\vskip 4emip 2emkip 1em
We observe that both of these limit solutions are self-similar,
i.e.~they have the form
\begin{equation}l{SS3}
\left\{\begin{equation}gin{itemize}n{array}{rl} u(t,x)&=~t^{\mu-1} U\left({x\overlineer t^\mu}\right),\\[4mm]
\omega(t,x)&=~t^{-1} {\cal O}mega\left({x\overlineer t^\mu}\right),\\[4mm]
\psi(t,x)&=~t^{2\mu-1} \Psi \left({x\overlineer t^\mu}\right)
.\epsilonnda\right.\epsiloneq
Notice that, by self-similarity, these solutions are completely determined as soon as
we know their values at time $t=1$. Indeed, these are given by $U, {\cal O}mega,\Psi$.
Inserting (\ref{SS3}) in (\ref{E2}), one obtains the equations
\begin{equation}l{SSE}\left\{
\begin{equation}gin{itemize}n{array}{rl}\Big(\noindentabla^\perp \Psi -\mu y\Big)\centerlinedot \noindentabla{\cal O}mega &=~ {\cal O}mega\,,\\[3mm]
{\mathcal D}elta \Psi&=~{\cal O}mega\,,
\epsilonnda
\right.
\epsiloneq
while the velocity is recovered by
\begin{equation}l{U}U~=~\noindentabla^\perp \Psi.\epsiloneq
\begin{equation}gin{itemize}n{figure}[htbp]
\centerlineentering
\itemncludegraphics[scale=0.8]{EC2spi.pdf}
\centerlineaption{\small
The vorticity distribution at time $t=1$, for a solution to (\ref{E2})
with initial vorticity $\overline\omega_\vskip 4emip 2emkip 1emarepsilon^\dagger$.}
\bigl\langlebel{f:EC2spi}
\epsilonnd{figure}
Constructing two distinct self-similar solutions of (\ref{E2}) with the same initial data (\ref{ssv})
amounts to finding two distinct solutions $({\cal O}mega,\Psi)$, $({\cal O}mega^\dagger,
\Psi^\dagger) $ of (\ref{SSE}) with the same
asymptotic behavior as $|x|\to + \itemnfty$. More precisely, writing the vorticity ${\cal O}mega$ in polar coordinates, this
means
\begin{equation}l{asy1}\lim_{r\to +\itemnfty} r^{1\overlineer\mu}\,{\cal O}mega(r,\theta)~=~\lim_{r\to +\itemnfty} r^{1\overlineer\mu}\,{\cal O}mega^\dagger(r,\theta)
~\doteq~\overline{\cal O}mega(\theta),\epsiloneq
for some smooth function $\overline{\cal O}mega$ as in (\ref{ovo}).
Since the two solutions in Figures~\ref{f:EC1spi} and \ref{f:EC2spi}
are produced by numerical computations,
a natural question is whether an exact self-similar solution of the
Euler equations exists, close to each computed one.
This requires suitable a posteriori error bounds.
Toward this goal, two difficulties arise:
\begin{equation}gin{itemize}
\itemtem[(i)] The self similar solution $({\cal O}mega,\Psi)$ is defined on the entire plane
${\mathbb R}^2$, while a numerical solution is computed only on some bounded domain.
\itemtem[(ii)] The solution is smooth, with the exception of one or two
points corresponding to the spirals' centers. In a neighborhood of these points
the standard error estimates break down.
\epsilonndi
To address these issues, we propose a domain decomposition method.
As shown in Fig.~\ref{f:e64},
the plane can be decomposed
into an outer domain $D^\sharp\supseteq\{x\itemn{\mathbb R}^2\,; |x|>R\}$,
an inner domain ${\mathcal D}^\flat$ containing
a neighborhood of the spirals' centers where the solution has singularities, and a bounded intermediate domain ${\mathcal D}^\noindentatural$ where the solution is smooth.
The solution is constructed analytically on ${\mathcal D}^\sharp$ and on ${\mathcal D}^\flat$,
and numerically on ${\mathcal D}^\noindentatural$.
These three components are then patched together
by suitable matching conditions.
\begin{equation}gin{itemize}n{figure}[htbp]
\centerlineentering
\itemncludegraphics[scale=0.22]{e64}
\centerlineaption{\small Decomposing the plane ${\mathbb R}^2={\mathcal D}^\sharp\centerlineup{\mathcal D}^\noindentatural\centerlineup{\mathcal D}^\flat$
into an outer, a middle, and an inner domain. Left: the case of a single spiraling vortex, as in Fig.~\ref{f:EC1spi}. Right: the case of two spiraling vortices, as in Fig.~\ref{f:EC2spi}.}
\bigl\langlebel{f:e64}
\epsilonnd{figure}
A detailed analysis of the solution to (\ref{SSE}) in a neighborhood of
infinity and near the spirals' centers will appear in the companion paper
\centerlineite{BM}, relying on the approach developed in \centerlineite{E1, E2, E3}.
In the present paper we focus on the derivation of a posteriori
error estimates for a numerically computed solution to (\ref{SSE}), on
a bounded domain ${\mathcal D}\subset{\mathbb R}^2$ with smooth boundary $\partial{\mathcal D}$.
As shown in Fig.~\ref{f:e88}, we assume that this boundary
can be decomposed as the union of two closed, disjoint components:
\begin{equation}l{dbd}\partial{\mathcal D}~=~\Sigma_1\centerlineup\Sigma_2\,.\epsiloneq
We seek a solution to (\ref{SSE}), satisfying boundary conditions of the form
\begin{equation}l{BC}\left\{
\begin{equation}gin{itemize}n{array}{rl}\Psi(x)&=~g(x),\qquad\qquad x\itemn \partial {\mathcal D},\\[3mm]
{\cal O}mega(x)&=~h(x),\qquad\qquad x\itemn\Sigma_1\,,\epsilonnda\right.\epsiloneq
where $g,h$ are given smooth functions.
Given an approximate solution
$({\cal O}mega_0,\Psi_0)\itemn {\mathcal C}^{0,\alpha}({\mathcal D})\times {\mathcal C}^{2,\alpha}({\mathcal D})$, computed
by a Galerkin finite dimensional approximation,
we want to prove the existence
of an exact solution close to the approximate one.
The remainder of the paper is organized as follows. Section~\ref{s:2} introduces the basic framework,
specifying the main assumptions on the numerical scheme and on the approximate solution.
Section~\ref{s:3} begins by analyzing the first equation in (\ref{lam1}), regarded as a linear
PDE for the vorticity function ${\cal O}mega$. In this direction, Lemma~\ref{l:42}
provides a detailed estimate on how the solution depends on the vector field $\noindentabla^\perp \Phi$.
In addition, Lemma~\ref{l:interp}
yields a sharper regularity estimate on the solutions, deriving an
a priori bound on their ${\mathcal C}^{0,\alpha}$ norm.
Finally, in Section~\ref{s:4} the exact solution ${\cal O}mega$ is constructed as the fixed point of a transformation
which is contractive w.r.t.~a norm equivalent to the ${\bf L}^2$ norm. We remark that,
in order to achieve this contractivity, a bound on the norms $\|{\cal O}mega\|_{{\bf L}^2}$ and $\|\Phi\|_{H^2}$
is not good enough. Indeed, we need an a priori bound on $\|{\cal O}mega\|_{{\mathcal C}^{0,\alpha}}$ and on
$\|\Phi\|_{{\mathcal C}^2}$.
This is achieved by means of Lemma~\ref{l:interp}.
At the end of Section~\ref{s:4} we collect all the various constants appearing in the estimates,
and summarize our analysis by stating a theorem on the existence of an exact solution, close to the
computed one.
| 3,992 | 27,931 |
en
|
train
|
0.99.2
|
Since the two solutions in Figures~\ref{f:EC1spi} and \ref{f:EC2spi}
are produced by numerical computations,
a natural question is whether an exact self-similar solution of the
Euler equations exists, close to each computed one.
This requires suitable a posteriori error bounds.
Toward this goal, two difficulties arise:
\begin{equation}gin{itemize}
\itemtem[(i)] The self similar solution $({\cal O}mega,\Psi)$ is defined on the entire plane
${\mathbb R}^2$, while a numerical solution is computed only on some bounded domain.
\itemtem[(ii)] The solution is smooth, with the exception of one or two
points corresponding to the spirals' centers. In a neighborhood of these points
the standard error estimates break down.
\epsilonndi
To address these issues, we propose a domain decomposition method.
As shown in Fig.~\ref{f:e64},
the plane can be decomposed
into an outer domain $D^\sharp\supseteq\{x\itemn{\mathbb R}^2\,; |x|>R\}$,
an inner domain ${\mathcal D}^\flat$ containing
a neighborhood of the spirals' centers where the solution has singularities, and a bounded intermediate domain ${\mathcal D}^\noindentatural$ where the solution is smooth.
The solution is constructed analytically on ${\mathcal D}^\sharp$ and on ${\mathcal D}^\flat$,
and numerically on ${\mathcal D}^\noindentatural$.
These three components are then patched together
by suitable matching conditions.
\begin{equation}gin{itemize}n{figure}[htbp]
\centerlineentering
\itemncludegraphics[scale=0.22]{e64}
\centerlineaption{\small Decomposing the plane ${\mathbb R}^2={\mathcal D}^\sharp\centerlineup{\mathcal D}^\noindentatural\centerlineup{\mathcal D}^\flat$
into an outer, a middle, and an inner domain. Left: the case of a single spiraling vortex, as in Fig.~\ref{f:EC1spi}. Right: the case of two spiraling vortices, as in Fig.~\ref{f:EC2spi}.}
\bigl\langlebel{f:e64}
\epsilonnd{figure}
A detailed analysis of the solution to (\ref{SSE}) in a neighborhood of
infinity and near the spirals' centers will appear in the companion paper
\centerlineite{BM}, relying on the approach developed in \centerlineite{E1, E2, E3}.
In the present paper we focus on the derivation of a posteriori
error estimates for a numerically computed solution to (\ref{SSE}), on
a bounded domain ${\mathcal D}\subset{\mathbb R}^2$ with smooth boundary $\partial{\mathcal D}$.
As shown in Fig.~\ref{f:e88}, we assume that this boundary
can be decomposed as the union of two closed, disjoint components:
\begin{equation}l{dbd}\partial{\mathcal D}~=~\Sigma_1\centerlineup\Sigma_2\,.\epsiloneq
We seek a solution to (\ref{SSE}), satisfying boundary conditions of the form
\begin{equation}l{BC}\left\{
\begin{equation}gin{itemize}n{array}{rl}\Psi(x)&=~g(x),\qquad\qquad x\itemn \partial {\mathcal D},\\[3mm]
{\cal O}mega(x)&=~h(x),\qquad\qquad x\itemn\Sigma_1\,,\epsilonnda\right.\epsiloneq
where $g,h$ are given smooth functions.
Given an approximate solution
$({\cal O}mega_0,\Psi_0)\itemn {\mathcal C}^{0,\alpha}({\mathcal D})\times {\mathcal C}^{2,\alpha}({\mathcal D})$, computed
by a Galerkin finite dimensional approximation,
we want to prove the existence
of an exact solution close to the approximate one.
The remainder of the paper is organized as follows. Section~\ref{s:2} introduces the basic framework,
specifying the main assumptions on the numerical scheme and on the approximate solution.
Section~\ref{s:3} begins by analyzing the first equation in (\ref{lam1}), regarded as a linear
PDE for the vorticity function ${\cal O}mega$. In this direction, Lemma~\ref{l:42}
provides a detailed estimate on how the solution depends on the vector field $\noindentabla^\perp \Phi$.
In addition, Lemma~\ref{l:interp}
yields a sharper regularity estimate on the solutions, deriving an
a priori bound on their ${\mathcal C}^{0,\alpha}$ norm.
Finally, in Section~\ref{s:4} the exact solution ${\cal O}mega$ is constructed as the fixed point of a transformation
which is contractive w.r.t.~a norm equivalent to the ${\bf L}^2$ norm. We remark that,
in order to achieve this contractivity, a bound on the norms $\|{\cal O}mega\|_{{\bf L}^2}$ and $\|\Phi\|_{H^2}$
is not good enough. Indeed, we need an a priori bound on $\|{\cal O}mega\|_{{\mathcal C}^{0,\alpha}}$ and on
$\|\Phi\|_{{\mathcal C}^2}$.
This is achieved by means of Lemma~\ref{l:interp}.
At the end of Section~\ref{s:4} we collect all the various constants appearing in the estimates,
and summarize our analysis by stating a theorem on the existence of an exact solution, close to the
computed one.
For results on the uniqueness of solutions to the incompressible Euler equations we refer to
\centerlineite{BH, MP, Y}. Examples showing the non-uniqueness of solutions to the incompressible Euler equations
were first constructed in \centerlineite{Sch, Shn}. See also
\centerlineite{V} for a different approach, yielding more regular solutions.
Following the major breakthrough in
\centerlineite{DS09} several examples
of multiple solutions for Euler's equations have recently been provided \centerlineite{Dan, DRS, DanSz, DS10}.
These solutions are obtained by means of convex integration and a Baire category argument.
They have turbulent nature and their physical significance is unclear.
Our numerical simulations, on the other hand, suggest that ``uncurable" non-uniqueness
can arise from quite simple initial data. In our case, both solutions can be easily visualized;
they remain everywhere smooth (with the exception of one or two points), and conserve energy at all times.
| 1,578 | 27,931 |
en
|
train
|
0.99.3
|
\section{Setting of the problem}
\setcounter{equation}{0}
\bigl\langlebel{s:2}
Throughout the following, we consider the boundary value problem (\ref{SSE}), (\ref{BC}) on
a bounded, open domain ${\mathcal D}\subset {\mathbb R}^2$, with smooth boundary $\partial{\mathcal D}$ decomposed as in (\ref{dbd}).
The unit outer normal
at the boundary point $y\itemn \partial {\mathcal D}$ will be denoted by ${\bf n}(y)$.
We call $\Psi^g$ the solution to the non-homogeneous boundary value problem
\begin{equation}l{Pg}\left\{\begin{equation}gin{itemize}n{array}{rll}
{\mathcal D}elta\Psi(x)&=~0\qquad & x\itemn{\mathcal D},\\[3mm]
\Psi(x)&=~g(x)\qquad &x\itemn\partial{\mathcal D},\epsilonnda\right.\epsiloneq
and define the vector field
\begin{equation}l{bbv}
{\bf v}(x)~\doteq~\noindentabla^\perp \Psi^g(x) -\mu x\,.\epsiloneq
Assuming that $g$ is smooth, the same is true of ${\bf v}$.
Given ${\cal O}mega\itemn {\bf L}^2({\mathcal D})$, we define
\begin{equation}l{D-1}\Phi~=~{\mathcal D}elta^{-1}{\cal O}mega\epsiloneq
to be the solution of
\begin{equation}l{lam2}
\left\{\begin{equation}gin{itemize}n{array}{rll} {\mathcal D}elta\Phi (x)&=~{\cal O}mega(x),\qquad &x\itemn {\mathcal D},\\[3mm]
\Phi(x)&=~0,\qquad &x\itemn \partial {\mathcal D}.\epsilonnda\right.\epsiloneq
Our problem thus amounts to finding a function ${\cal O}mega$ such that
\begin{equation}l{lam1}
\left\{\begin{equation}gin{itemize}n{array}{rll} \bigl(\noindentabla^\perp \Phi(x)+{\bf v}(x)\bigr)\centerlinedot \noindentabla{\cal O}mega(x)&=~{\cal O}mega(x),\qquad\qquad
&x\itemn {\mathcal D},\\[3mm]
{\cal O}mega(x)&=~h(x), &x\itemn \Sigma_1\,,\epsilonnda\right.\epsiloneq
where $\Phi={\mathcal D}elta^{-1}{\cal O}mega$ and ${\bf v}$ is the vector field at (\ref{bbv}).
In the following, for any given function $\Phi\itemn {\mathcal C}^{2}({\mathcal D})$,
we denote by ${\cal O}mega={\mathcal G}amma(\Phi)$ the solution to (\ref{lam1}).
We seek a fixed point of the composed map
\begin{equation}l{ldef}
{\cal O}mega~\mapsto~{\bf L}ambda({\cal O}mega)~\doteq~{\mathcal G}amma({\mathcal D}elta^{-1}{\cal O}mega).\epsiloneq
\vskip 4emip 2emkip 1em
We shall consider approximate solutions of (\ref{lam1}) which are obtained
by finite dimensional Galerkin
approximations. More precisely, given a finite set of linearly independent functions
$\{\phi_j\,;~~1\leq j\leq N\}\subset{\bf L}^2({\mathcal D})$, consider the orthogonal decomposition
${\bf L}^2({\mathcal D})~=~U\times V$, where
\begin{equation}l{UV} U~=~\hbox{span}\{\phi_1,\ldots, \phi_N\},\qquad \qquad V= U^\perp,\epsiloneq
with orthogonal projections
\begin{equation}l{proj}
P:{\bf L}^2({\mathcal D})~\mapsto~ U,\qquad\quad (I-P): {\bf L}^2({\mathcal D})~\mapsto ~V.\epsiloneq
\vskip 4emip 2emkip 1em
{\bf Example 1.} We can choose the functions
$\phi_j$ to be piecewise affine, obtained from a
triangulation of the domain ${\mathcal D}$. This implies
\begin{equation}l{pjp}
\phi_j\itemn W^{1,\itemnfty}({\mathcal D})\subset {\mathcal C}^{0,\alpha}({\mathcal D})\epsiloneq
for every $0<\alpha\leq 1$. In this case, the gradient $\noindentabla\phi_j\itemn{\bf L}^\itemnfty\centerlineap BV$
is piecewise constant, with jumps
along finitely many segments.
\vskip 4emip 2emkip 1em
{\bf Example 2.} In alternative, one can choose $\phi_1,\ldots,\phi_N\itemn {\bf L}^2({\mathcal D})$
to be the first $N$ normalized eigenfunctions of the Laplacian. More precisely,
for $j=1,\ldots,N$ we require that the functions $\phi_j$ satisfy
\begin{equation}l{EL}\left\{\begin{equation}gin{itemize}n{array}{rl} {\mathcal D}elta\phi_j + \bigl\langlembda_j \phi_j~=~0\qquad & x\itemn {\mathcal D},\\[3mm]
\phi_j~=~0\qquad & x\itemn \Sigma_1\,,\\[3mm]
{\bf n}\centerlinedot \noindentabla\phi_j~=~0\qquad & x\itemn \Sigma_2\,,\epsilonnda\right.\epsiloneq
with eigenvalues $0<\bigl\langlembda_1\leq \bigl\langlembda_2\leq\centerlinedots\leq \bigl\langlembda_N$. Moreover, $\|\phi_j\|_{{\bf L}^2}=1$.
\vskip 4emip 2emkip 1em
\begin{equation}gin{itemize}n{remark}\bigl\langlebel{r:1}{\rm In both of the above cases, there may be no linear combination $\sum_j c_j \phi_j$
of the basis functions that matches the boundary data $h$ along $\Sigma_1$.
This issue can be addressed simply by adding to our basis an additional function $\phi_0\itemn {\mathcal C}^\itemnfty({\mathcal D})$,
chosen so that $\phi_0=h$ on $\Sigma_1$.}
\epsilonnd{remark}
Our basic question can be formulated as follows.
\begin{equation}gin{itemize}
\itemtem[{\bf (Q)}]
{\itemt
Assume we can find a finite dimensional approximation
\begin{equation}l{UU} {\cal O}mega_0 ~=~\sum_{j=1}^N c_j\phi_j\,,\qquad\qquad
\Phi_0~=~{\mathcal D}elta^{-1}{\cal O}mega_0\,,\epsiloneq
with error
\begin{equation}l{AA} \Big\| {\cal O}mega_0 -P\,{\bf L}ambda({\cal O}mega_0)\Big\|_{{\bf L}^2({\mathcal D})}~=~\delta_0\,.\epsiloneq
How small should $\delta_0$ be, to make sure that an exact solution
$({\cal O}mega,\Phi)$ of (\ref{lam1}), (\ref{D-1}) exists,
close to $({\cal O}mega_0,\Phi_0)$ ?}
\epsilonndi
\vskip 4emip 2emkip 1em
Given a function $\Phi\itemn {\mathcal C}^2({\mathcal D})$, the linear, first order PDE (\ref{lam1}) for ${\cal O}mega$ can be solved
by the method of characteristics.
Namely, consider the
vector field
\begin{equation}l{qd}{\bf q}(x)~=~\noindentabla^\perp \Phi(x)+{\bf v}(x)
,\epsiloneq
whose divergence is
\begin{equation}l{divq}
\hbox{div } {\bf q}~=~-2\mu\,.\epsiloneq
We shall denote by $t\mapsto \epsilonxp(t{\bf q})(y)$ the solution to the ODE
$$\dot x(t)~=~{\bf q}(x(t)),\qquad x(0)=y.$$
For convenience, we shall use the notation
\begin{equation}l{xty}t~\mapsto ~x(t,y)~\doteq~\epsilonxp(-t{\bf q})(y)\epsiloneq
for the solution to the ODE
$$\dot x(t)~=~-{\bf q}(x(t)),\qquad x(0)=y.$$
Consider the set
\begin{equation}l{D*} {\mathcal D}^*~\doteq~\Big\{ y\itemn \overline{\mathcal D}\,;~~x(\tau,y)\itemn \Sigma_1\centerlineap\hbox{Supp}(h)\quad\hbox{for some}~\tau\geq 0\Big\}.\epsiloneq
In other words, $y\itemn {\mathcal D}^*$ if the characteristic through $y$ reaches a boundary point in the support of $h$ within
finite time. Calling
\begin{equation}l{tauy}\tau(y)~\doteq~\min\,\bigl\{t\geq 0\,;~~ x(t,y)\itemn \Sigma_1\bigr\}\epsiloneq
the first time when the characteristic starting at $y$ reaches the boundary $\Sigma_1$,
the solution to (\ref{lam1}) is computed by
\begin{equation}l{OR}{\cal O}mega(y)~=~\left\{ \begin{equation}gin{itemize}n{array}{cl} e^{\tau(y)}\, h(x(\tau(y), y))\qquad &\hbox{if} ~~x\itemn {\mathcal D}^*,\\[3mm]
0\qquad&\hbox{if}~~x\noindentotin{\mathcal D}^*.\epsilonnda\right.\epsiloneq
\begin{equation}gin{itemize}n{figure}[htbp]
\centerlineentering
\itemncludegraphics[scale=0.45]{e88.pdf}
\centerlineaption{\small According to {\bf (A1)}, every characteristic starting at a point
$y\itemn \Sigma_1\centerlineap {\rm Supp }(h)$ exits from the domain ${\mathcal D}$ at some boundary point
$z=z(y)\itemn \Sigma_2$, at a time $T(y)\leq T^*$. The shaded region represents the subdomain ${\mathcal D}^*$ in
(\ref{D*}).}
\bigl\langlebel{f:e88}
\epsilonnd{figure}
The following transversality assumption will play a key role in the sequel (see Fig.~\ref{f:e88}).
\vskip 4emip 2emkip 1em
\begin{equation}gin{itemize}
\itemtem[{\bf (A1)}] {\itemt There exists constants $T^*, c_1>0$ such that, for
every boundary point $y\itemn \Sigma_1\centerlineap \hbox{Supp}(h)$, the following holds.
\begin{equation}gin{itemize}
\itemtem[(i)] The vector ${\bf q}$
is strictly inward pointing:
\begin{equation}l{OT3}\bigl\bigl\langlengle {\bf n}(y)\,,\, {\bf q}(y)\bigr\bigr\ranglengle~\leq~-c_1\,.
\epsiloneq
\itemtem[(ii)] The characteristic $t\mapsto \epsilonxp(t{\bf q})(y)$ remains inside ${\mathcal D}$
until it reaches a boundary point
\begin{equation}l{exitp}z(y)\,=\, \epsilonxp\bigl(T(y){\bf q}\bigr)(y)~\itemn ~\Sigma_2\epsiloneq
within a finite time $T(y)\leq T^*$, and exits transversally:
\begin{equation}l{OT2}
\Big\bigl\langlengle {\bf n}(z(y))\,,\, {\bf q}(z(y))\Big\bigr\ranglengle~\geq ~c_1\,.\epsiloneq
\epsilonndi
}
\epsilonndi
As in (\ref{UV})-(\ref{proj}), we consider the decomposition $H\doteq {\bf L}^2({\mathcal D})=U\times V$, with perpendicular projections
$P$ and $ I-P$, and write ${\cal O}mega= (u,v)$. The partial derivatives of the map
${\bf L}ambda={\bf L}ambda(u,v)$ introduced at (\ref{ldef}) will be denoted by
$D_u{\bf L}ambda$, $D_v{\bf L}ambda$.
Let a finite dimensional approximate solution ${\cal O}mega_0= (u_0,0)\itemn U$ of (\ref{lam1}) be given,
with $\Phi_0={\mathcal D}elta^{-1}{\cal O}mega_0$. Throughout the following, we denote by
\begin{equation}l{Adef} A~\doteq~P\centerlineirc D_u{\bf L}ambda({\cal O}mega_0).\epsiloneq
the partial differential w.r.t.~$u$ of the map $(u,v)\mapsto P{\bf L}ambda(u,v)$, computed at the point
${\cal O}mega_0$.
Notice that $A$ is a linear map from the finite dimensional space $U$ into itself.
Since we are seeking a fixed point, in the same
spirit of the Newton-Kantorovich theorem~\centerlineite{Berger, Ciarlet, Deimling}
we shall assume the invertibility of the
Jacobian map, restricted to the finite dimensional subspace $U$.
\begin{equation}gin{itemize}
\itemtem[{\bf (A2)}] {\itemt The operator $I - A$
from $U$ into itself has a bounded inverse, with }
\begin{equation}l{IA}\big\|(I-A)^{-1}\bigr\|_{{\centerlineal L}(U)}~\leq~\gamma~<~+\itemnfty\epsiloneq
for some $\gamma\geq 1$.
\epsilonndi
Here the left hand side refers to the operator norm, in the space of linear operators on $U\subset{\bf L}^2({\mathcal D})$.
Notice that (\ref{IA}) implies that the operator $I-AP:H\mapsto H$ is also invertible,
with
\begin{equation}l{IA2}
\big\|(I-A\,P)^{-1}\bigr\|_{{\centerlineal L}(H)}~\leq~\gamma.\epsiloneq
| 3,674 | 27,931 |
en
|
train
|
0.99.4
|
Consider the set
\begin{equation}l{D*} {\mathcal D}^*~\doteq~\Big\{ y\itemn \overline{\mathcal D}\,;~~x(\tau,y)\itemn \Sigma_1\centerlineap\hbox{Supp}(h)\quad\hbox{for some}~\tau\geq 0\Big\}.\epsiloneq
In other words, $y\itemn {\mathcal D}^*$ if the characteristic through $y$ reaches a boundary point in the support of $h$ within
finite time. Calling
\begin{equation}l{tauy}\tau(y)~\doteq~\min\,\bigl\{t\geq 0\,;~~ x(t,y)\itemn \Sigma_1\bigr\}\epsiloneq
the first time when the characteristic starting at $y$ reaches the boundary $\Sigma_1$,
the solution to (\ref{lam1}) is computed by
\begin{equation}l{OR}{\cal O}mega(y)~=~\left\{ \begin{equation}gin{itemize}n{array}{cl} e^{\tau(y)}\, h(x(\tau(y), y))\qquad &\hbox{if} ~~x\itemn {\mathcal D}^*,\\[3mm]
0\qquad&\hbox{if}~~x\noindentotin{\mathcal D}^*.\epsilonnda\right.\epsiloneq
\begin{equation}gin{itemize}n{figure}[htbp]
\centerlineentering
\itemncludegraphics[scale=0.45]{e88.pdf}
\centerlineaption{\small According to {\bf (A1)}, every characteristic starting at a point
$y\itemn \Sigma_1\centerlineap {\rm Supp }(h)$ exits from the domain ${\mathcal D}$ at some boundary point
$z=z(y)\itemn \Sigma_2$, at a time $T(y)\leq T^*$. The shaded region represents the subdomain ${\mathcal D}^*$ in
(\ref{D*}).}
\bigl\langlebel{f:e88}
\epsilonnd{figure}
The following transversality assumption will play a key role in the sequel (see Fig.~\ref{f:e88}).
\vskip 4emip 2emkip 1em
\begin{equation}gin{itemize}
\itemtem[{\bf (A1)}] {\itemt There exists constants $T^*, c_1>0$ such that, for
every boundary point $y\itemn \Sigma_1\centerlineap \hbox{Supp}(h)$, the following holds.
\begin{equation}gin{itemize}
\itemtem[(i)] The vector ${\bf q}$
is strictly inward pointing:
\begin{equation}l{OT3}\bigl\bigl\langlengle {\bf n}(y)\,,\, {\bf q}(y)\bigr\bigr\ranglengle~\leq~-c_1\,.
\epsiloneq
\itemtem[(ii)] The characteristic $t\mapsto \epsilonxp(t{\bf q})(y)$ remains inside ${\mathcal D}$
until it reaches a boundary point
\begin{equation}l{exitp}z(y)\,=\, \epsilonxp\bigl(T(y){\bf q}\bigr)(y)~\itemn ~\Sigma_2\epsiloneq
within a finite time $T(y)\leq T^*$, and exits transversally:
\begin{equation}l{OT2}
\Big\bigl\langlengle {\bf n}(z(y))\,,\, {\bf q}(z(y))\Big\bigr\ranglengle~\geq ~c_1\,.\epsiloneq
\epsilonndi
}
\epsilonndi
As in (\ref{UV})-(\ref{proj}), we consider the decomposition $H\doteq {\bf L}^2({\mathcal D})=U\times V$, with perpendicular projections
$P$ and $ I-P$, and write ${\cal O}mega= (u,v)$. The partial derivatives of the map
${\bf L}ambda={\bf L}ambda(u,v)$ introduced at (\ref{ldef}) will be denoted by
$D_u{\bf L}ambda$, $D_v{\bf L}ambda$.
Let a finite dimensional approximate solution ${\cal O}mega_0= (u_0,0)\itemn U$ of (\ref{lam1}) be given,
with $\Phi_0={\mathcal D}elta^{-1}{\cal O}mega_0$. Throughout the following, we denote by
\begin{equation}l{Adef} A~\doteq~P\centerlineirc D_u{\bf L}ambda({\cal O}mega_0).\epsiloneq
the partial differential w.r.t.~$u$ of the map $(u,v)\mapsto P{\bf L}ambda(u,v)$, computed at the point
${\cal O}mega_0$.
Notice that $A$ is a linear map from the finite dimensional space $U$ into itself.
Since we are seeking a fixed point, in the same
spirit of the Newton-Kantorovich theorem~\centerlineite{Berger, Ciarlet, Deimling}
we shall assume the invertibility of the
Jacobian map, restricted to the finite dimensional subspace $U$.
\begin{equation}gin{itemize}
\itemtem[{\bf (A2)}] {\itemt The operator $I - A$
from $U$ into itself has a bounded inverse, with }
\begin{equation}l{IA}\big\|(I-A)^{-1}\bigr\|_{{\centerlineal L}(U)}~\leq~\gamma~<~+\itemnfty\epsiloneq
for some $\gamma\geq 1$.
\epsilonndi
Here the left hand side refers to the operator norm, in the space of linear operators on $U\subset{\bf L}^2({\mathcal D})$.
Notice that (\ref{IA}) implies that the operator $I-AP:H\mapsto H$ is also invertible,
with
\begin{equation}l{IA2}
\big\|(I-A\,P)^{-1}\bigr\|_{{\centerlineal L}(H)}~\leq~\gamma.\epsiloneq
Concerning the $V$-component, a key assumption used in our analysis will be
\begin{equation}gin{itemize}
\itemtem[{\bf (A3)}] {\itemt The orthogonal spaces $U,V$ in (\ref{UV}) are chosen so that
\begin{equation}l{LVsmall}
\| {\mathcal D}elta^{-1}\centerlineirc (I-P)\|~<~\vskip 4emip 2emkip 1emarepsilon_0~<\!\!<~1\,.\epsiloneq
}
\epsilonndi
Intuitively this means that, in the decomposition ${\cal O}mega = (u,v)\itemn U\times V$, the
component $v\itemn V$ captures the high frequency modes,
which are heavily damped by the inverse Laplace operator \centerlineite{BRS, Cle, KS}.
\vskip 4emip 2emkip 1em
{}From an abstract point of view, proving the existence of a fixed point of the map ${\cal O}mega\mapsto
{\bf L}ambda({\cal O}mega)$ in (\ref{ldef}) is a simple matter.
Using the decomposition ${\cal O}mega = (u,v)\itemn U\times V$ with orthogonal projections as in (\ref{proj}),
we start with an initial guess ${\cal O}mega_0 = (u_0,0)$.
Assuming (\ref{IA}), we write (\ref{lam1}) in the form
$${\cal O}mega-AP\,{\cal O}mega~=~{\bf L}ambda({\cal O}mega)- AP\,{\cal O}mega\,.$$
Equivalently,
\begin{equation}l{eqeq}
{\cal O}mega~=~{\mathcal U}psilon({\cal O}mega)~\doteq~(I-AP)^{-1}\bigl({\bf L}ambda({\cal O}mega)- AP\,{\cal O}mega\bigr).\epsiloneq
The heart of the matter is to show that, on a neighborhood ${\centerlineal N}$ of ${\cal O}mega_0$,
the map ${\cal O}mega\mapsto{\mathcal U}psilon({\cal O}mega)$ is a strict contraction.
If the initial error $\|{\mathcal U}psilon({\cal O}mega_0)-{\cal O}mega_0\|$
is sufficiently small, the iterates ${\cal O}mega_n={\mathcal U}psilon^n({\cal O}mega_0)$ will thus remain inside ${\centerlineal N}$
and converge to a fixed point. The contraction property is proved as follows.
By (\ref{IA2}) one has $\|(I-AP)^{-1}\|\leq\gamma$. On the other hand, computing the differential of the map
${\cal O}mega\mapsto {\bf L}ambda({\cal O}mega)- AP\,{\cal O}mega$
w.r.t.~the components $(u,v)\itemn U\times V$, at the point ${\cal O}mega={\cal O}mega_0= (u_0,0)$ one obtains
\begin{equation}l{diff}
D\bigl({\bf L}ambda(u,v)-Au\bigr)
~=~\left(
\begin{equation}gin{itemize}n{array}{ccc} 0 && P D_v{\bf L}ambda\\[4mm]
(I-P) D_u{\bf L}ambda && (I-P) D_v{\bf L}ambda\epsilonnda\right).\epsiloneq
Because of (\ref{LVsmall}), we expect
\begin{equation}l{dism}\|D_v{\bf L}ambda\|~\leq~\|D{\mathcal G}amma\|\centerlinedot \bigl\| {\mathcal D}elta^{-1} \centerlineirc(I-P)\bigr\|~<\!<~1.\epsiloneq
By possibly using an equivalent norm on the product space $U\times V$ (see (\ref{n*}) for details),
we thus achieve the strict contractivity of the map ${\mathcal U}psilon$ in (\ref{eqeq}).
\vskip 4emip 2emkip 1em
In the next section, more careful estimates will be derived on the differentials of ${\mathcal G}amma$ and
${\bf L}ambda$ in (\ref{ldef}). In this direction we remark that,
while the operator ${\mathcal D}elta^{-1}$ is well defined on ${\bf L}^2({\mathcal D})$,
a difficulty arises in connection with the differential of ${\mathcal G}amma$.
Indeed, to compute this differential, we need to perturb the function $\Phi$ in (\ref{lam1})
and estimate the change in the corresponding solution ${\cal O}mega$.
This can be done assuming that $\Phi\itemn{\mathcal C}^2$,
hence $\noindentabla^\perp\Phi\itemn {\mathcal C}^1$. Unfortunately, the assumption ${\cal O}mega\itemn {\bf L}^2$
only implies $\Phi= {\mathcal D}elta^{-1}{\cal O}mega\itemn H^2$, which does not yield any bound on $\|\Phi\|_{{\mathcal C}^2}$.
In order to establish the desired a posteriori error bound,
an additional argument will thus be needed, showing that our approximate solutions actually
enjoy some additional regularity.
| 2,801 | 27,931 |
en
|
train
|
0.99.5
|
\section{Preliminary lemmas}
\setcounter{equation}{0}
\bigl\langlebel{s:3}
\begin{equation}gin{itemize}n{figure}[htbp]
\centerlineentering
\itemncludegraphics[scale=0.5]{e86.pdf}
\centerlineaption{\small Computing the perturbed solution ${\cal O}mega^\vskip 4emip 2emkip 1emarepsilon(y)$ in (\ref{om4}), by estimating the change in
the characteristic through $y$.}
\bigl\langlebel{f:e86}
\epsilonnd{figure}
\subsection{Continuous dependence of solutions to a first order linear PDE.}
As remarked in Section~\ref{s:2}, solutions to the linear PDE (\ref{lam1}) can be found by the method of characteristics (\ref{OR}). Our present goal is to understand how the solution changes,
depending on the vector field ${\bf q}$ in (\ref{qd}). To fix the ideas,
consider the boundary value problem
\begin{equation}l{lam3}
\left\{\begin{equation}gin{itemize}n{array}{rll} {\bf q}(x)\centerlinedot \noindentabla{\cal O}mega&=~{\cal O}mega,\qquad\qquad
&x\itemn {\mathcal D},\\[3mm]
{\cal O}mega&=~h, &x\itemn \Sigma_1\,,\epsilonnda\right.\epsiloneq
assuming that ${\bf q}:{\mathcal D}\mapsto{\mathbb R}^2$ is a ${\mathcal C}^1$ vector field
satisfying {\bf (A1)} together with
\begin{equation}l{qass}\|{\bf q}\|_{{\mathcal C}^1({\mathcal D})}~\leq~M,\qquad\qquad \hbox{div } {\bf q}(x) ~=~ -2\mu\qquad\hbox{for all }~ x\itemn {\mathcal D}\,.\epsiloneq
Given a second vector field $\widetilde{\bf q}\itemn {\mathcal C}^1$, consider the family of perturbations
\begin{equation}l{qep}{\bf q}^\vskip 4emip 2emkip 1emarepsilon(x)~=~{\bf q}(x) + \vskip 4emip 2emkip 1emarepsilon \,\widetilde{\bf q} (x),\epsiloneq
and let
\begin{equation}l{om4}{\cal O}mega^\vskip 4emip 2emkip 1emarepsilon(x)~=~{\cal O}mega(x) +\vskip 4emip 2emkip 1emarepsilon\, \widetilde{\cal O}mega (x) + o(\vskip 4emip 2emkip 1emarepsilon)\epsiloneq
be the corresponding solutions of (\ref{lam3}).
Here and in the sequel, the notation $o(\vskip 4emip 2emkip 1emarepsilon)$ indicates a higher order infinitesimal,
so that $\vskip 4emip 2emkip 1emarepsilon^{-1}o(\vskip 4emip 2emkip 1emarepsilon)\to 0$ as $\vskip 4emip 2emkip 1emarepsilon\to 0$.
The next lemma provides an ${\bf L}^2$~estimate on size of the first order perturbation $\widetilde{\cal O}mega$.
Setting $\Sigma^*_1\doteq\Sigma_1\centerlineap\hbox{Supp}(h)$, we introduce the constant
\begin{equation}l{TC}
\widetilde C~=~ \sup_{x\itemn \Sigma_1^*} {1\overlineer | \bigl\langlengle {\bf n}(x), {\bf q}(x)\bigr\ranglengle|}
\centerlinedot \|h\|_{{\mathcal C}^0}+\left( 1+\,\sup_{x\itemn \Sigma_1^*} {|{\bf q}(x)|\overlineer | \bigl\langlengle {\bf n}(x), {\bf q}(x)\bigr\ranglengle|}\right) \|Dh\|_{{\mathcal C}^0} \,,
\epsiloneq
and define
\begin{equation}l{pera} K(M, t)~=~e^t\left({e^{(2M+1)t}- e^{-2\mu t}\overlineer 4M+4\mu+2}\right)^{1/2}
.\epsiloneq
\begin{equation}gin{itemize}n{lemma}\bigl\langlebel{l:42} Recalling (\ref{D*}), assume that ${\bf q}\itemn {\mathcal C}^1({\mathcal D})$ satisfies (\ref{qass})
together with {\bf (A1)}.
In the setting considered at (\ref{qep})-(\ref{om4}), the first order perturbation $\widetilde {\cal O}mega$ satisfies
\begin{equation}l{oper}
\|\widetilde{\cal O}mega \|_{{\bf L}^2({\mathcal D})}~\leq~
\widetilde C \centerlinedot K\Big(\|D{\bf q}\|_{{\mathcal C}^0({\mathcal D}^*)},\,T^*\Big)\centerlinedot \|\widetilde{\bf q}\|_{{\bf L}^2({\mathcal D}^*)}\,.\epsiloneq
\epsilonnd{lemma}
{\bf Proof.} {\bf 1.} In analogy with (\ref{xty}),
we denote by
$$ t\,\mapsto\, x^\vskip 4emip 2emkip 1emarepsilon(t,y)\,=\,\epsilonxp(-t{\bf q}^\vskip 4emip 2emkip 1emarepsilon)(y)$$
the solution to
\begin{equation}l{od2}\dot x^\vskip 4emip 2emkip 1emarepsilon~=~-{\bf q}(x)-\vskip 4emip 2emkip 1emarepsilon \widetilde{\bf q}(x),\qquad x(0)=y.\epsiloneq
For $0\leq t<\tau(y)$, the tangent vector
\begin{equation}l{tanv}{\bf w}(t,y)~\doteq~\lim_{\vskip 4emip 2emkip 1emarepsilon\to 0} {x^\vskip 4emip 2emkip 1emarepsilon(t,y)-x(t,y)\overlineer\vskip 4emip 2emkip 1emarepsilon}\epsiloneq
provides a solution to the linearized equation
\begin{equation}l{leq}
\dot {\bf w}(t,y)~=~-D{\bf q}(x(t,y))\centerlinedot {\bf w}(t,y)- \widetilde{\bf q}(x(t,y)),\qquad\qquad {\bf w}(0,y)=0.\epsiloneq
For notational convenience,
we extend the definition of the vector ${\bf w}(t,y)$ by setting
\begin{equation}l{tT}{\bf w}(t,y)~=~{\bf w}(\tau(y), y)\qquad\hbox{if}\qquad t\itemn [\tau(y), \,T^*].\epsiloneq
We denote by
\begin{equation}l{xie}\begin{equation}gin{itemize}n{array}{l} \xi(y)\,=\,x(\tau(y),y)\,=\,\epsilonxp\bigl(-\tau(y){\bf q}\bigr)(y),
\\[4mm] \xi^\vskip 4emip 2emkip 1emarepsilon(y)\,=\,x^\vskip 4emip 2emkip 1emarepsilon(\tau^\vskip 4emip 2emkip 1emarepsilon(y),y)\, =\,\epsilonxp\bigl(-\tau^\vskip 4emip 2emkip 1emarepsilon(y){\bf q}^\vskip 4emip 2emkip 1emarepsilon\bigr)(y)\,,\epsilonnda\epsiloneq
the points where the characteristic through $y$ crosses the boundary $\Sigma_1$,
and consider the expansions
\begin{equation}l{txx}
\xi^\vskip 4emip 2emkip 1emarepsilon(y)~=~\xi(y)+\vskip 4emip 2emkip 1emarepsilon\tilde\xi(y)+o(\vskip 4emip 2emkip 1emarepsilon),\qquad\qquad \tau^\vskip 4emip 2emkip 1emarepsilon(y)~=~\tau(y)+\vskip 4emip 2emkip 1emarepsilon \widetilde\tau(y)+o(\vskip 4emip 2emkip 1emarepsilon).\epsiloneq
Observing that
\begin{equation}l{wwy}
\Big\bigl\langlengle {\bf n}\bigl(x(\tau(y),y)\bigr)~,~{\bf w}(\tau(y),y) -\widetilde\tau(y)\centerlinedot {\bf q}\bigl(x(\tau(y),y)\bigr)
\Big\bigr\ranglengle~=~0,
\epsiloneq
we obtain
\begin{equation}l{ttau}\widetilde\tau(y)~=~{\Big\bigl\langlengle {\bf n}\bigl(\xi(y)\bigr)~,~{\bf w}(\tau(y),y)\Big\bigr\ranglengle \overlineer
\Big\bigl\langlengle {\bf n}\bigl(\xi(y)\bigr)~,~{\bf q}\bigl(\xi(y)\bigr)\Big\bigr\ranglengle}\,,\epsiloneq
\begin{equation}l{txi}
\tilde \xi(y)~=~{\bf w}(\tau(y),y) -\widetilde\tau(y)\, {\bf q}\bigl(\xi(y)\bigr)
\,,\epsiloneq
\begin{equation}l{tx2}
|\tilde \xi(y)|~\leq~|{\bf w}(\tau(y),y)|\centerlinedot \left( 1+ \left| { {\bf q}(\xi(y))\overlineer \bigl\langle {\bf n}(\xi(y)),\,{\bf q}(\xi(y))
\bigr\rangle}\right| \,\right). \epsiloneq
Finally, for $y\itemn {\mathcal D}^*$ we have
\begin{equation}l{TO5}
\widetilde{\cal O}mega(y)~=~\widetilde \tau(y)\,e^{\tau(y)} h(\xi(y))
+ e^{\tau(y)}\,\noindentabla h(\xi(y))\centerlinedot \tilde\xi(y) .\epsiloneq
In view of (\ref{ttau})--(\ref{tx2}) and the definition of $\widetilde C$ at (\ref{TC}), this yields
\begin{equation}l{TO6}\begin{equation}gin{itemize}n{array}{rl}\displaystyle
\bigl|\widetilde{\cal O}mega(y)\bigr|&\displaystyle\leq~e^{\tau(y)}\|h\|_{{\bf L}^\itemnfty}
\centerlinedot{\bigl|{\bf w}(\tau(y),y)\bigr|\overlineer\left| \bigl\langle {\bf n}(\xi(y)))\,,~{\bf q}(\xi(y)) \bigr\rangle\right|} \\[4mm]
&\displaystyle\qquad\qquad +
e^{\tau(y)}\|\noindentabla h\|_{{\bf L}^\itemnfty}\centerlinedot \left( 1+ { |{\bf q}(\xi(y))|\overlineer \left| \bigl\langle {\bf n}(\xi(y)),\,{\bf q}(\xi(y))
\bigr\rangle\right| } \right)\,\bigl|{\bf w}(\tau(y),y)\bigr|\\[5mm]
&\leq~e^{\tau(y)}\widetilde C\,\bigl|{\bf w}(\tau(y),y)\bigr|~\leq~e^{T^*}\widetilde C\,\bigl|{\bf w}(\tau(y),y)\bigr|.\epsilonnda\epsiloneq
\vskip 4emip 2emkip 1em
{\bf 2.}
It now remains to derive a bound on the ${\bf L}^2$ norm of
${\bf w}$.
Keeping (\ref{tT}) in mind, define
$$Z(t)~\doteq~\itemnt_{\mathcal D} |{\bf w}(t, y)|^2\, dy.$$
For any $t\itemn [0, T^*]$, by (\ref{qass})
the Jacobian determinant of the map $y\mapsto x(t,y)$ satisfies
\begin{equation}l{det}\det \Big({\partial x(t,y)\overlineer\partial y}\Big) ~=~e^{2\mu t}.\epsiloneq
Using the above identity to change variables of integration,
setting
$${\mathcal D}^*_t~\doteq~\bigl\{ y\itemn {\mathcal D}^*\,;~~\tau(y)<t\bigr\},$$
we obtain
\begin{equation}l{i3}\itemnt_{{\mathcal D}^*_t} |\widetilde{\bf q} (x(t,y))|^2\, dy~\leq~e^{-2\mu t} \itemnt_{{\mathcal D}^*} |\widetilde{\bf q} (x)|^2\, dx~
=~e^{-2\mu t}\,\|\widetilde{\bf q} \|_{{\bf L}^2({\mathcal D}^*)}^2\,.\epsiloneq
In turn, by the elementary inequality $ab\leq {1\overlineer 2} (a^2+b^2)$, this yields
$$\begin{equation}gin{itemize}n{array}{rl}\displaystyle {d\overlineer dt} Z(t)&\displaystyle\leq~2\itemnt_{{\mathcal D}^*_t} \Big|\bigl\langle {\bf w}(t,y), ~\dot{\bf w}(t,y)\bigr\rangle\Big|\, dy
\\[4mm]
&\displaystyle\leq~2\itemnt_{{\mathcal D}^*_t}
\Big\{ \bigl|D{\bf q}(x(t,y))\bigr|\, |{\bf w}(t,y)|^2 + |{\bf w}(t,y)|\, |\widetilde{\bf q} (x(t,y))|\Big\}\, dy
\\[4mm]
&\displaystyle\leq~2 \Big\{ \|D{\bf q}\|_{{\mathcal C}^0} \centerlinedot \|{\bf w}(t,\centerlinedot)\|_{{\bf L}^2({\mathcal D}^*_t)}^2 + \|{\bf w}(t,\centerlinedot)\|_{{\bf L}^2({\mathcal D}^*_t)}
e^{-\mu t}\|\widetilde {\bf q}\|_{{\bf L}^2({\mathcal D}^*)}\Big\}\\[4mm]
&\leq~\displaystyle 2 \Big\{ C\, Z(t) + {Z(t)\overlineer 2} +
{e^{-2\mu t}\overlineer 2} \|\widetilde{\bf q} \|^2_{{\bf L}^2({\mathcal D}^*)}\Big\}
\,.
\epsilonnda$$
Hence
\begin{equation}l{wl2}\|{\bf w}(t,\centerlinedot)\|_{{\bf L}^2({\mathcal D})}^2~=~Z(t)~\leq~\kappa (t)\centerlinedot \|\widetilde {\bf q}\|^2_{{\bf L}^2({\mathcal D})}, \epsiloneq
where we set
\begin{equation}l{kapdef} \kappa (t)~\doteq~{1\overlineer 2}
\itemnt_0^t e^{(2M+1)(t-s)} \,e^{-2\mu s}\, ds~=~{e^{(2M+1)t}- e^{-2\mu t}\overlineer 4M+4\mu+2}\,.\epsiloneq
Taking $t=T^*$ we conclude
\begin{equation}l{wl3}\itemnt_{{\mathcal D}^*} \bigl|{\bf w}(\tau(y),y)\bigr|^2\, dy~\leq~{e^{(2M+1)T^*}- e^{-2\mu T^*}\overlineer 4M+4\mu+2}\centerlinedot \|\widetilde {\bf q}\|^2_{{\bf L}^2({\mathcal D})}
\,.\epsiloneq
Using this bound, from (\ref{TO6}) one obtains
(\ref{oper}).
\epsilonndproof
| 4,049 | 27,931 |
en
|
train
|
0.99.6
|
\subsection{A regularity estimate.}
Given a H\"older continuous function
$f:{\mathcal D}\mapsto{\mathbb R}$, for $0<\alpha,\delta<1$, we introduce the notation
\begin{equation}l{39}
\|f\|_{\alpha,\delta}~\doteq~
\sup_{x,y\itemn{\mathcal D},~0<|x-y|<\delta} ~{|f(x)-f(y)|\overlineer |x-y|^\alpha}\,.\epsiloneq
Notice that, compared with the standard definition of the norm in the H\"older space ${\mathcal C}^{0,\alpha}$
(see for example \centerlineite{BFA, Evans}), in (\ref{39}) the supremum is taken only over couples with $|x-y|<\delta$.
From the above definition it immediately follows
\begin{equation}l{ades}
\|f\|_{\alpha,\delta}~\leq~\|f\|_{{\mathcal C}^{0,\alpha}}\,,
\qquad\quad \|f\|_{\alpha,\delta}~\leq~
\delta^{1-\alpha} \|\noindentabla f\|_{{\bf L}^\itemnfty}\,.
\epsiloneq
The next lemma shows that the ${\mathcal C}^{0,\alpha}$ norm can be controlled in terms
of the seminorm
$\|\centerlinedot\|_{\alpha,\delta}$ together with the ${\bf L}^2$ norm.
As a preliminary we observe that, since ${\mathcal D}$ is an open set with smooth boundary, it has a positive inner radius $\rho>0$.
Namely, every point $y\itemn {\mathcal D}$ lies in an open ball
of radius $\rho$, entirely contained inside ${\mathcal D}$.
\begin{equation}gin{itemize}n{lemma}\bigl\langlebel{l:interp}
Let ${\mathcal D}\subset{\mathbb R}^2$ be a bounded open set with inner radius $\rho>0$, and let $\alpha,\delta\itemn \,]0,1[\,$ and $c>0$ be given. Then there exists $\delta_c>0$ such that the following holds.
If
\begin{equation}l{40} \|f\|_{\alpha,\delta}~\leq ~c,\qquad\qquad \|f\|_{{\bf L}^2({\mathcal D})}~\leq~\delta_c\,,\epsiloneq
then \begin{equation}l{42}
\|f\|_{{\mathcal C}^0({\mathcal D})}~\leq~{c\overlineer 2} \,\delta^\alpha.\epsiloneq
In turn, this implies
\begin{equation}l{41}\|f\|_{{\mathcal C}^{0,\alpha}({\mathcal D})}~\leq~c.
\epsiloneq
\epsilonnd{lemma}
{\bf Proof.}
{\bf 1.} We claim that, by choosing $\delta_c>0$ small enough, the inequalities
(\ref{40}) imply (\ref{42}).
Indeed, consider the function
$$\phi(x)~\doteq~\max\Big\{ 0,~{c\overlineer 2} \delta^\alpha - c|x|^\alpha\Big\}.$$
and the disc
$$B~=~\{(x_1,x_2)\,;~~(x_1-\rho)^2 + x_2^2<\rho^2\}.$$
Setting
\begin{equation}l{doc}\delta_c~\doteq~\|\phi\|_{{\bf L}^2(B)} ~=~\left(\itemnt_B\phi^2(x)\, dx\right)^{1/2},\epsiloneq
we claim that the assumptions (\ref{40}) imply (\ref{42}).
Indeed, assume that $f(y_0)> c\delta^\alpha/2$ at some point $y_0\itemn {\mathcal D}$.
Let $B_0\subset{\mathcal D}$ be a disc of radius $\rho$ which contains $y_0$.
A comparison argument now yields
$$\begin{equation}gin{itemize}n{array}{rl}\displaystyle\|f\|^2_{{\bf L}^2({\mathcal D})}&\displaystyle\ge~\itemnt_{B_0} f^2(x)\, dx~\geq~\itemnt_{B_0}
\left(\max\Big\{ 0,~|f(y_0)|-c|x-y_0|^\alpha\Big\}\right)^2\, dx \\[4mm]
\displaystyle\qquad &\displaystyle >~
\itemnt_B\phi^2(x)\, dx~=~\delta_c^2\,,\epsilonnda$$
reaching a contradiction. Hence (\ref{42}) holds.
\vskip 4emip 2emkip 1em
{\bf 2.} By the assumptions, we already know that
\begin{equation}l{44} {|f(x)-f(y)|\overlineer |x-y|^\alpha}~\leq~c\epsiloneq
when $0<|x-y|<\delta$. It remains to prove that the same holds when
$|x-y|\geq\delta$. But in this case by (\ref{42}) one trivially has
$${|f(x)-f(y)|\overlineer |x-y|^\alpha}~\leq~{|f(x)|+|f(y)|\overlineer \delta^\alpha}
~\leq~c\,.$$
Together with (\ref{42}), this yields
$$\|f\|_{{\mathcal C}^{0,\alpha}({\mathcal D})}~\doteq~\max\left\{ \sup_x~|f(x)|\,,~~\sup_{x\noindentot= y} {|f(x)-f(y)|\overlineer |x-y|^\alpha}\right\}
~\leq~c\,,$$
proving (\ref{41}).
\epsilonndproof
| 1,377 | 27,931 |
en
|
train
|
0.99.7
|
\section{Construction of an exact solution}
\setcounter{equation}{0}
\bigl\langlebel{s:4}
As in (\ref{UV})-(\ref{proj}), we consider the decomposition $H\doteq {\bf L}^2({\mathcal D})=U\times V$, with perpendicular projections
$P$ and $ I-P$, and write ${\cal O}mega= (u,v)$. We recall that $D_u{\bf L}ambda$, $D_v{\bf L}ambda$ denote the partial derivatives of the map
${\bf L}ambda={\bf L}ambda(u,v)$ introduced at (\ref{ldef}).
As anticipated in Section~\ref{s:2}, we write the fixed point problem ${\cal O}mega = {\bf L}ambda({\cal O}mega)$ in the
equivalent form
\begin{equation}l{ups}
{\cal O}mega~=~{\mathcal U}psilon({\cal O}mega)~\doteq~(I-AP)^{-1}\bigl({\bf L}ambda({\cal O}mega)- AP\,{\cal O}mega\bigr).\epsiloneq
In terms of the components $(u,v)$, at
${\cal O}mega={\cal O}mega_0= (u_0,0)$, the differential of the map ${\cal O}mega\mapsto {\bf L}ambda({\cal O}mega)- AP\,{\cal O}mega$
has the form (\ref{diff}).
To achieve the contraction property, on the product space $H=U\times V$ we consider
the equivalent inner product
\begin{equation}l{ip*}\bigl\langle(u,v),(u',v')\bigr\rangle_*~=~\bigl\langlengle u, u'\bigr\ranglengle + \epsilonta_0 \bigl\langlengle v, v'\bigr\ranglengle,\epsiloneq
for a suitable constant $0<\epsilonta_0\leq 1$. The corresponding norm is
\begin{equation}l{n*}\|(u,v)\|_*~=~\bigl( \|u\|^2 + \epsilonta_0 \|v\|^2\bigr)^{1/2}.\epsiloneq
Based on (\ref{dism}), we assume that a constant $\epsilonta_0$ can be chosen so that, at the point
${\cal O}mega_0$, the corresponding norm of the linear operator (\ref{diff}) is $\leq 1/4\gamma$.
By (\ref{IA2}) and the definition of ${\mathcal U}psilon$ at (\ref{ups}), this implies
$$\|D{\mathcal U}psilon({\cal O}mega_0)\|_*~\leq~{1\overlineer 4}\,.$$
Here and in the sequel, we also denote by $\|\centerlinedot\|_*$ the norm of a linear operator, corresponding to the
norm (\ref{n*}) on the product space $H=U\times V$.
By continuity, we can determine a radius $r_0>0$ such that,
denoting by $B_*({\cal O}mega_0, r_0)$ a ball centered at ${\cal O}mega_0$ with radius $r_0$
w.r.t.~the equivalent norm $\|\centerlinedot\|_*$, one has the implication
\begin{equation}l{c23}
{\cal O}mega,{\cal O}mega'\itemn B_*({\cal O}mega_0, r_0)\qquad \itemmplies\qquad \|{\mathcal U}psilon({\cal O}mega)-{\mathcal U}psilon({\cal O}mega')\|_*~\leq~
{1\overlineer 2} \|{\cal O}mega-{\cal O}mega'\|_*\,.\epsiloneq
If the approximate solution ${\cal O}mega_0$ satisfies
\begin{equation}l{ig}
\|{\mathcal U}psilon({\cal O}mega_0)-{\cal O}mega_0\|_*~\leq~{r_0\overlineer 2}\,,\epsiloneq
we can then define the iterates
\begin{equation}l{OPN}
{\cal O}mega_n~\doteq~{\mathcal U}psilon({\cal O}mega_{n-1}),\qquad\qquad \Phi_n~=~{\mathcal D}elta^{-1}{\cal O}mega_n\,.\epsiloneq
Taking the limit
\begin{equation}l{UPN}
\overline{\cal O}mega~=~\lim_{n\to\itemnfty} {\cal O}mega_n\,,
\epsiloneq
we thus obtain a fixed point
$\overline{\cal O}mega = {\mathcal U}psilon(\overline{\cal O}mega)$, with
\begin{equation}l{fixo}
\|\overline{\cal O}mega-{\cal O}mega_0\|_{{\bf L}^2({\mathcal D})}~\leq~\|\overline{\cal O}mega-{\cal O}mega_0\|_*~\leq~r_0\,.\epsiloneq
While this approach is entirely straightforward, our main concern here is to
derive more precise estimates on the various constants, which guarantee that an exact solution actually exists.
Notice that the contraction property, in a norm equivalent to ${\bf L}^2$, implies that
$\|{\mathcal U}psilon^n({\cal O}mega_0)-{\cal O}mega_0\|_{{\bf L}^2}$ will be small, for every $n\geq 1$.
In turn, recalling (\ref{OPN}), we conclude that
$\|\Phi_n-\Phi_0\|_{H^2}$ also remains small. However, this estimate
is not enough to provide an a priori bound on $\|\noindentabla^\perp \Phi_n\|_{{\mathcal C}^1}$, which is needed to
estimate the differential $D{\bf L}ambda$. For this reason, an additional regularity estimate for the
iterates ${\cal O}mega_n={\mathcal U}psilon^n({\cal O}mega_0)$ will be derived.
\subsection{Regularity estimates.}
Assuming that the ${\mathcal C}^1$ vector field ${\bf q}_0= \noindentabla^\perp \Phi_0+{\bf v}$ satisfies the transversality
assumptions {\bf (A1)}, we can find $0<\delta_1\leq 1$ with the following property.
\begin{equation}gin{itemize}
\itemtem[{\bf (P1)}]
{\itemt If $\|\Phi-\Phi_0\|_{{\mathcal C}^2}\leq\delta_1$, then the vector field ${\bf q}= \noindentabla^\perp \Phi+{\bf v}$
still satisfies (\ref{OT3})--(\ref{OT2}), possibly with a smaller constant $c_1>0$ and with $T^*$ replaced by $T^*+1$.
}
\epsilonndi
In particular, all characteristics starting from a point $y\itemn\Sigma_1$ in the support of $h$ still exit from the domain ${\mathcal D}$
within time $T^*+1$.
Moreover, the right hand side of (\ref{TC}) remains uniformly bounded by some constant,
which we still denote by $\widetilde C$.
Our next goal is to ensure that all our approximations satisfy
\begin{equation}l{pno}
\|\Phi_n-\Phi_0\|_{{\mathcal C}^2}~\leq~ \delta_1\qquad\qquad \hbox{for all }~ n\geq 1.\epsiloneq
This bound will be achieved by an inductive argument, in several steps.
\vskip 4emip 2emkip 1em
{\bf 1.}
If (\ref{pno}) holds,
then
\begin{equation}l{qn}\|{\bf q}_n\|_{{\mathcal C}^1}~=~\bigl\|\noindentabla^\perp \Phi_n+ {\bf v}\bigr\|_{{\mathcal C}^1}~\leq~
\bigl\|\noindentabla^\perp \Phi_0+ {\bf v}\bigr\|_{{\mathcal C}^1} + \bigl\|\noindentabla^\perp \Phi_n- \noindentabla^\perp \Phi_0\bigr\|_{{\mathcal C}^1}
~\leq~M+1\,.\epsiloneq
Assuming (\ref{pno}), the solution ${\cal O}mega = {\mathcal G}amma(\Phi_n)$ of (\ref{lam1}) satisfies
\begin{equation}l{Gn1}
\|{\cal O}mega\|_{{\mathcal C}^0}~\leq~e^{T^*+1}\, \|h\|_{{\mathcal C}^0}\,.\epsiloneq
To provide a bound on the gradient $\noindentabla{\cal O}mega$, recalling the notation introduced at
(\ref{xty})--(\ref{tauy}),
fix any $x_0\itemn {\mathcal D}$ such that $x_0 = \epsilonxp(t{\bf q}_n)(y)$ for some $y\itemn \Sigma_1\centerlineap \hbox{Supp}(h)$ and $t= \tau(x_0)>0$.
Using the notation $x(t, x_0) \doteq \epsilonxp(-t{\bf q}_n)(x_0)$, consider the vector
$${\bf w}(t)~\doteq~\lim_{\vskip 4emip 2emkip 1emarepsilon\to 0} {x(t, x_0+\vskip 4emip 2emkip 1emarepsilon {\bf e}) - x(t,x_0)\overlineer\vskip 4emip 2emkip 1emarepsilon}\,,$$
where ${\bf e}\itemn {\mathbb R}^2$ is any unit vector.
Then
$$\dot {\bf w}(t)~=~D{\bf q}_n(x(t, x_0))\centerlinedot {\bf w}(t),\qquad\qquad {\bf w}(0)~=~{\bf e}.$$
Hence
\begin{equation}l{wn}
|{\bf w}(t)|~\leq ~\epsilonxp\bigl\{ t\|D{\bf q}_n\|_{{\mathcal C}^0}\bigr\}~\leq~e^{ (T^*+1) (M+1)}.\epsiloneq
By (\ref{OR}), the same computations performed at (\ref{wwy})--(\ref{TO6})
now yield
$$|\noindentabla {\cal O}mega(x_0)\centerlinedot {\bf e}|~\leq~e^{T^*+1} \,\widetilde C\, \bigl|{\bf w}(\tau(x_0))\bigr|~\leq~\widetilde C\, e^{(T^*+1)(M+2)}.$$
Since the unit vector ${\bf e}$ was arbitrary, this implies that the gradient of ${\cal O}mega={\mathcal G}amma(\Phi_n)$ satisfies
\begin{equation}l{nom}
\bigl\|\noindentabla {\cal O}mega\bigr\|_{{\mathcal C}^0}~\leq~\widetilde C\, e^{(T^*+1)(M+2)}.\epsiloneq
\vskip 4emip 2emkip 1em
{\bf 2.}
Recalling (\ref{ups}) and the definition of ${\mathcal G}amma$ at (\ref{lam1})-(\ref{ldef}),
by (\ref{Gn1}) and (\ref{nom}) we now obtain
\begin{equation}l{on1}
\bigl\|{\mathcal G}amma({\mathcal D}elta^{-1}{\cal O}mega_n)\bigr\|_{{\mathcal C}^1}~\leq~e^{T^*+1} \|h\|_{{\mathcal C}^0} +\widetilde C\, e^{(T^*+1)(M+2)},\epsiloneq
as long as (\ref{pno}) holds. In turn, this yields a bound of the form
\begin{equation}l{oc1}
\|{\cal O}mega_{n+1}\|_{{\mathcal C}^1}~=~\Big\| (I-AP)^{-1} \bigl({\mathcal G}amma({\mathcal D}elta^{-1}{\cal O}mega_n)- AP \,{\cal O}mega_n\bigr)\Big\|_{{\mathcal C}^1}\leq~C_1\,,\epsiloneq
where the constant $C_1$ can be estimated in terms of (\ref{on1})
and the properties of the linear operator $A$.
\vskip 4emip 2emkip 1em
{\bf 3.}
Since the domain ${\mathcal D}$ has smooth boundary, Schauder's regularity estimates \centerlineite{Evans, GT} with $\alpha=1/2$
yield a bound of the form
\begin{equation}l{pnes}
\|\Phi_n-\Phi_0\|_{{\mathcal C}^2}~=~\|{\mathcal D}elta^{-1}({\cal O}mega_n-{\cal O}mega_0)\|_{{\mathcal C}^{2}} ~\leq~C_2\, \|{\cal O}mega_n-{\cal O}mega_0\|_{{\mathcal C}^{0,
1/2}}\,,\epsiloneq
for some constant $C_2$ depending only on ${\mathcal D}$.
Assume we have the inductive estimate
\begin{equation}l{ie2}
\|{\cal O}mega_n-{\cal O}mega_0\|_{{\mathcal C}^1} ~\leq~2C_1\,.
\epsiloneq
Choosing $0<\delta<1$ so that $\delta\leq (2C_1 C_2)^{1\overlineer\alpha-1} = (2 C_1C_2)^{-2}$,
we obtain
\begin{equation}l{del}\|{\cal O}mega_n-{\cal O}mega_0\|_{\alpha,\delta} ~\leq~\delta^{1/2}\|\noindentabla{\cal O}mega_n-\noindentabla{\cal O}mega_0\|_{C^0}
~\leq~2C_1\delta^{1/2}~\leq~{1\overlineer C_2} \,.\epsiloneq
\vskip 4emip 2emkip 1em
{\bf 4.} Let $\delta_1>0$ be the constant introduced in {\bf (P1)}.
We now use Lemma~\ref{l:interp}, with $\delta>0$ as in (\ref{del}) and $c=\delta_1/C_2$.
This yields a constant $\delta_c\itemn \, ]0, \delta_1]$ such that
(\ref{40}) implies (\ref{42})-(\ref{41}). In the present setting, this means that the two inequalities
\begin{equation}l{ndc}\|{\cal O}mega_n-{\cal O}mega_0\|_{{\mathcal C}^1} ~\leq~2C_1\,,\qquad\qquad
\|{\cal O}mega_n-{\cal O}mega_0\|_{{\bf L}^2} ~\leq~\delta_c\,,\epsiloneq
together imply
\begin{equation}l{onz}\|{\cal O}mega_n-{\cal O}mega_0\|_{{\mathcal C}^{0,1/2}}~\leq~2C_1\,\delta^{1/2} ~\leq~{\delta_1\overlineer C_2}\,,
\qquad\qquad \|\Phi_n-\Phi_0\|_{{\mathcal C}^2}~\leq
~\delta_1.\epsiloneq
In other words, if the ${\bf L}^2$ distance between ${\cal O}mega_0$ and every ${\cal O}mega_n$
remains small, then all these approximate solutions have uniformly bounded ${\mathcal C}^{0,1/2}$ norm.
In turn, property {\bf (P1)} applies.
| 3,845 | 27,931 |
en
|
train
|
0.99.8
|
\subsection{Convergence of the approximations.}
As long as $\|{\cal O}mega-{\cal O}mega_0\|_{{\bf L}^2}\leq \delta_c$ we have $\|\Phi\|_{{\mathcal C}^2}\leq \delta_1$, and hence
\begin{equation}l{kod}\|D{\mathcal G}amma({\cal O}mega)\|_{{\bf L}^2}~\leq~\widetilde C\centerlinedot K(M+\delta_1, \, T^*+1)~\doteq~\kappa_0\,.\epsiloneq
\begin{equation}l{kol}\|D{\bf L}ambda({\cal O}mega)\|_{{\bf L}^2}~\leq~\|D{\mathcal G}amma({\cal O}mega)\|_{{\bf L}^2}
\centerlinedot \|{\mathcal D}elta^{-1}\|_{{\bf L}^2} ~\leq~~\kappa_0/\bigl\langlembda_1\,,
\epsiloneq
where $\bigl\langlembda_1>0$ denotes the first eigenvalue of the Laplace operator on ${\mathcal D}$.
On the other hand, choosing a sufficiently large number $N$ of functions in the orthogonal basis
at (\ref{UV}), we achieve (\ref{LVsmall}). When $(u,v)=(u_0,0)\itemn U\times V$,
the four blocks in the matrix of partial derivatives (\ref{diff}) have norms which can be dominated
respectively by the entries of the matrix
$\left(\begin{equation}gin{itemize}n{array}{cc} 0 & \kappa_0\vskip 4emip 2emkip 1emarepsilon_0\centerliner\kappa_0/\bigl\langlembda_1 & \kappa_0\vskip 4emip 2emkip 1emarepsilon_0\epsilonnda\right)$.
More generally, we can determine $\delta_2\itemn \,]0, \delta_c]$ such that
\begin{equation}l{imp3}
\|{\cal O}mega-{\cal O}mega_0\|_{{\bf L}^2}~\leq~\delta_2\qquad\itemmplies\qquad \|P \,D_u {\bf L}ambda({\cal O}mega)- P\, D_u{\bf L}ambda({\cal O}mega_0)\|_{{\bf L}^2}
~\leq~\kappa_0\vskip 4emip 2emkip 1emarepsilon_0\,.\epsiloneq
Computing the matrix of partial derivatives at (\ref{diff}) at such a point ${\cal O}mega=(u,v)$, and recalling
that $A\doteq P\, D_u{\bf L}ambda({\cal O}mega_0)$, we obtain the relation
\begin{equation}l{cbo}
D\bigl({\bf L}ambda(u,v)-Au\bigr)
~=~\left(\begin{equation}gin{itemize}n{array}{ccc} P \,D_u {\bf L}ambda({\cal O}mega)- P\, D_u{\bf L}ambda({\cal O}mega_0) && P D_v{\bf L}ambda\\[4mm]
(I-P) D_u{\bf L}ambda && (I-P) D_v{\bf L}ambda\epsilonnda\right)~~\prec~~
\left(\begin{equation}gin{itemize}n{array}{cc} \kappa_0\vskip 4emip 2emkip 1emarepsilon_0 & \kappa_0\vskip 4emip 2emkip 1emarepsilon_0\centerliner\kappa_0/\bigl\langlembda_1 & \kappa_0\vskip 4emip 2emkip 1emarepsilon_0\epsilonnda\right).
\epsiloneq
Here we have used the notation $A\prec B$, meaning that every entry in the $2\times 2$ matrix $A$ has norm
bounded by the corresponding entry in the matrix $B$. Notice that the constant $\delta_2$ in (\ref{imp3})
involves only
the behavior of ${\mathcal G}amma\centerlineirc {\mathcal D}elta^{-1}$ on a neighborhood of ${\cal O}mega_0$ in the finite dimensional subspace
$U$, and can be directly estimated.
According to {\bf (A3)}, we now make the key assumption that the constant
$\vskip 4emip 2emkip 1emarepsilon_0>0$ in (\ref{LVsmall}) is small enough so that
\begin{equation}l{A3}\sqrt 2\, \kappa_0\left( \vskip 4emip 2emkip 1emarepsilon_0^2+{\vskip 4emip 2emkip 1emarepsilon_0\overlineer\bigl\langlembda_1}
\right)^{1/2}
\leq ~{1\overlineer 2\gamma}\,.\epsiloneq
This allows us to introduce on $U\times V$ the equivalent norm (\ref{n*}), choosing
$$\epsilonta_0~\doteq~\bigl\langlembda_1\vskip 4emip 2emkip 1emarepsilon_0\,.$$
Without loss of generality, we can assume that $0<\epsilonta_0\leq 1$, so that
\begin{equation}l{eqn}\|\centerlinedot\|_*~\leq~\|\centerlinedot\|_{{\bf L}^2}~\leq~{1\overlineer\sqrt{\epsilonta_0}} \|\centerlinedot\|_*\,.\epsiloneq
In term of this new norm, a direct computation shows, at any point $(u,v)$ where (\ref{cbo}) holds,
the corresponding operator norm satisfies\footnote{Indeed, assume $u,v\itemn {\mathbb R}$, $u^2+\epsilonta_0v^2\leq 1$.
Set $z=\sqrt{\epsilonta_0} v$, so that $u^2+z^2\leq 1$.
This implies
$$\begin{equation}gin{itemize}n{array}{l}\displaystyle(\kappa_0 \vskip 4emip 2emkip 1emarepsilon_0 u + \kappa_0\vskip 4emip 2emkip 1emarepsilon_0 v)^2 + \epsilonta_0\left({\kappa_0\overlineer \bigl\langlembda_1} u +\kappa_0\vskip 4emip 2emkip 1emarepsilon_0 v
\right)^2~=~\left( \kappa_0 \vskip 4emip 2emkip 1emarepsilon_0 u + {\kappa_0\vskip 4emip 2emkip 1emarepsilon_0 \overlineer\sqrt {\bigl\langlembda_1\vskip 4emip 2emkip 1emarepsilon_0 }}z\right)^2
+ \bigl\langlembda_1\vskip 4emip 2emkip 1emarepsilon_0\left({\kappa_0\overlineer \bigl\langlembda_1} u +{\kappa_0\vskip 4emip 2emkip 1emarepsilon_0 \overlineer\sqrt {\bigl\langlembda_1\vskip 4emip 2emkip 1emarepsilon_0 }}z
\right)^2\\[4mm]
\displaystyle\qquad =~\kappa_0^2\left[ \Big( \vskip 4emip 2emkip 1emarepsilon_0 u + \sqrt{\vskip 4emip 2emkip 1emarepsilon_0\overlineer\bigl\langlembda_1} z\Big)^2 +
\Big(\sqrt{\vskip 4emip 2emkip 1emarepsilon_0\overlineer\bigl\langlembda_1}\, u
+ \vskip 4emip 2emkip 1emarepsilon_0 z\Big)^2\right]~=~\kappa_0^2\left[ \vskip 4emip 2emkip 1emarepsilon_0^2( u^2+z^2) + {\vskip 4emip 2emkip 1emarepsilon_0\overlineer\bigl\langlembda_1}(u^2+z^2)
+ 4\vskip 4emip 2emkip 1emarepsilon_0\sqrt{\vskip 4emip 2emkip 1emarepsilon_0\overlineer\bigl\langlembda_1}\, uz \right]\\[4mm]
\qquad \leq~\displaystyle 2\kappa_0^2 \left( \vskip 4emip 2emkip 1emarepsilon_0^2 + {\vskip 4emip 2emkip 1emarepsilon_0\overlineer\bigl\langlembda_1}\right) (u^2+z^2).
\epsilonnda$$
An entirely similar computation, with the numbers $u,v$ replaced by $\|u\|$ and $\|v\|$ respectively, shows that
the corresponding norm of the linear operator (\ref{cbo}) is bounded by $\sqrt 2\, \kappa_0\left( \vskip 4emip 2emkip 1emarepsilon_0^2+\displaystyle{\vskip 4emip 2emkip 1emarepsilon_0\overlineer\bigl\langlembda_1}
\right)^{1/2}$. }
\begin{equation}l{opn3}\Big\|
D\bigl({\bf L}ambda(u,v)-Au\bigr)\Big\|_*~\leq~{1\overlineer 2\gamma}\,.\epsiloneq
Hence
\begin{equation}l{co2}\|D{\mathcal U}psilon\|_*~\leq~\|(I-PA)^{-1}\|\centerlinedot \Big\|D\bigl({\bf L}ambda(u,v)-Au\bigr)\Big\|_*~\leq~{1\overlineer 2}\,.\epsiloneq
By the previous analysis, restricted to the set
$${\centerlineal S}~\doteq~\left\{ {\cal O}mega\,;~~\|{\cal O}mega-{\cal O}mega_0\|_{{\bf L}^2}~\leq~ \delta_2\,,\quad \|{\cal O}mega-{\cal O}mega_0
\|_{{\mathcal C}^{0,\alpha}}\,\leq\, {\delta_1\overlineer C_2}
\right\},$$
the map ${\cal O}mega\mapsto {\mathcal U}psilon({\cal O}mega)$ is a strict contraction, w.r.t.~the equivalent norm $\|\centerlinedot \|_*$ introduced at
(\ref{n*}). Indeed, by (\ref{co2}),
\begin{equation}l{co1}
{\cal O}mega,{\cal O}mega'\,\itemn\, {\centerlineal S}\qquad\itemmplies\qquad \|{\mathcal U}psilon({\cal O}mega)-{\mathcal U}psilon({\cal O}mega')\|_*
~\leq~{1\overlineer 2} \|{\cal O}mega-{\cal O}mega'\|_*\,.\epsiloneq
Recalling that $\delta_1$ is the constant in {\bf (P1)}, we now have the chain of implications
$$\begin{equation}gin{itemize}n{array}{l}\displaystyle \|{\cal O}mega-{\cal O}mega_0\|_*~\leq~ {\delta_2\overlineer\sqrt{\epsilonta_0}} \quad\itemmplies\quad
\|{\cal O}mega-{\cal O}mega_0\|_{{\bf L}^2}
~\leq~\delta_2\\[4mm]\displaystyle
\quad\itemmplies\quad \|{\cal O}mega-{\cal O}mega_0
\|_{{\mathcal C}^{0,\alpha}}\,\leq\, {\delta_1\overlineer C_2}\quad\itemmplies\quad \|{\mathcal D}elta^{-1}({\cal O}mega-{\cal O}mega_0)\|_{{\mathcal C}^2}
~\leq~\delta_1\,.\epsilonnda$$
If the initial guess ${\cal O}mega_0$ satisfies
\begin{equation}l{igss}\|{\mathcal U}psilon({\cal O}mega_0) - {\cal O}mega_0\|_{{\bf L}^2}~\leq~{\delta_2\sqrt{\epsilonta_0}\overlineer 2}\,,\epsiloneq
then by (\ref{eqn}) and (\ref{co1}) all the iterates ${\cal O}mega_n$ in (\ref{OPN}) will remain inside ${\centerlineal S}$.
Namely,
$$\|{\cal O}mega_n-{\cal O}mega_0\|_{{\bf L}^2}~\leq~{1\overlineer\sqrt{\epsilonta_0}}\,\|{\cal O}mega_n-{\cal O}mega_0\|_*~\leq~
{1\overlineer\sqrt{\epsilonta_0}}\centerlinedot 2\|{\mathcal U}psilon({\cal O}mega_0)-{\cal O}mega_0\|_*~\leq~\delta_2\,.$$
Letting $n\to \itemnfty$ we have the convergence ${\cal O}mega_n\to \overline{\cal O}mega\itemn {\centerlineal S}$.
This limit function $\overline{\cal O}mega$ provides a solution to the boundary value problem
(\ref{lam1}), (\ref{D-1}), with
\begin{equation}l{cl1}\|\overline{\cal O}mega-{\cal O}mega_0\|_{{\bf L}^2}~\leq~\delta_2\,.\epsiloneq
| 3,360 | 27,931 |
en
|
train
|
0.99.9
|
\subsection{An existence theorem.} For readers' convenience,
we summarize the previous analysis, recalling the various constants introduced along the way.
All these constants can be estimated in terms of the domain ${\mathcal D}$, the boundary data $g,h$ in (\ref{BC}),
and the finite dimensional
approximation ${\cal O}mega_0$ produced by the numerical algorithm.
\begin{equation}gin{itemize}
\itemtem $\widetilde C$ is the constant at (\ref{TC}), related to the stability of the ODEs for the characteristics of the linear PDE
(\ref{lam3}). This applies to any vector field ${\bf q}= \noindentabla^\perp\Phi+{\bf v}$ with $\|\Phi-\Phi_0\|_{{\mathcal C}^2}\leq \delta_1$.
\itemtem $K(M,t)$ is the function in (\ref{pera}). Together with $\widetilde C$, it provides an estimate (\ref{oper}) on
how the solution to the linear PDE (\ref{lam3}) varies in the ${\bf L}^2$ distance,
depending on the vector field $\noindentabla^\perp \Phi$.
\itemtem $C_2$ is the Schauder regularity constant for the smooth domain ${\mathcal D}$, introduced at (\ref{pnes}).
\itemtem $\bigl\langlembda_1>0$ is the lowest eigenvalue of the Laplace operator on ${\mathcal D}$.
\itemtem $\vskip 4emip 2emkip 1emarepsilon_0$ is the small constant in (\ref{LVsmall}), determining the rate at which the components in the orthogonal
space $V$ are damped by the inverse Laplace operator ${\mathcal D}elta^{-1}$.
\itemtem $\kappa_0$ is the constant in (\ref{kod}), estimating how the solution of the linear PDE (\ref{lam1})
varies, depending on the vector field $\noindentabla^\perp\Phi_1$, w.r.t.~the ${\bf L}^2$ norm.
\itemtem $\delta_1$ is the constant in {\bf (P1)}, measuring by how much we can perturb the vector field
${\bf q}= \noindentabla^\perp\Phi+{\bf v}$, and still retain the transversality conditions at the boundary, and a finite exit time.
\itemtem $\delta_c$ is the constant in the (\ref{ndc}), providing the regularity estimate (\ref{onz}).
\itemtem $\gamma$ is a bound on the norm $\|(I-PD_u{\bf L}ambda({\cal O}mega_0))^{-1}\|$ of the inverse Jacobian matrix in (\ref{IA2}).
It gives a measure of stability for the finite dimensional fixed point problem ${\cal O}mega = P\centerlineirc
{\bf L}ambda({\cal O}mega)$.
\itemtem $\delta_2$ is the constant in (\ref{imp3}), determining a neighborhood of the initial approximation ${\cal O}mega_0$,
where the differential $D{\bf L}ambda({\cal O}mega)$ remains close to $D{\bf L}ambda({\cal O}mega_0)$.
\epsilonndi
Recalling the definition of the finite dimensional linear operator $A$ at (\ref{Adef}) and the equivalent formulation
of the boundary value problem (\ref{lam2})-(\ref{lam1}) as a fixed point problem (\ref{ups}),
we can now summarize all of the previous analysis as follows.
\begin{equation}gin{itemize}n{theorem} Let ${\mathcal D}\subset{\mathbb R}^2$ be a bounded open domain with smooth boundary, decomposed as
$\partial{\mathcal D}=\Sigma_1\centerlineup\Sigma_2$. Given smooth boundary data $g,h$, consider the boundary value problem
(\ref{lam2})-(\ref{lam1}), where ${\bf v}$ is the vector field in (\ref{bbv}).
Assume that the properties {\bf (A1)-(A2)} hold, together with {\bf (P1)}.
Consider the orthogonal decomposition ${\bf L}^2({\mathcal D})= U\times V$ as in (\ref{UV})-(\ref{proj}),
and let
${\cal O}mega_0= (u_0, 0)\itemn U\times V$ be an approximate solution such that
\begin{equation}l{igs2}\|{\mathcal U}psilon({\cal O}mega_0) - {\cal O}mega_0\|_{{\bf L}^2({\mathcal D})}~\leq~{\delta_2\sqrt{\bigl\langlembda_1\vskip 4emip 2emkip 1emarepsilon_0}\overlineer 2}\,.\epsiloneq
Then an exact solution $\overline{\cal O}mega$ exists, with
\begin{equation}l{OO}
\|\overline{\cal O}mega-{\cal O}mega_0\|_{{\bf L}^2({\mathcal D})}~\leq~\delta_2\,.
\epsiloneq
\epsilonnd{theorem}
\vskip 4emip 2emkip 1em
{\bf Acknowledgment.} The authors would like to thank Ludmil Zikatanov for useful discussions.
\vskip 4emip 2emkip 1em
\begin{equation}gin{itemize}n{thebibliography}{99}
\bibitem{Berger}
M.~S.~Berger,
{\itemt Nonlinearity and Functional Analysis.} Academic Press, New York, 1977.
\bibitem{BH}
F.~Bernicot and T.~Hmidi,
On the global well-posedness for Euler equations with unbounded vorticity.
{\itemt Dyn. Partial Differ. Equ.} {\bf 12} (2015), 127--155.
\bibitem{BRS} S.~C.~Brenner and L.~Ridgway Scott,
{\itemt The Mathematical Theory of Finite Element Methods}, Third Edition, Springer, New York, 2008.
\bibitem{BFA} A.~Bressan, {\itemt Lecture Notes on Functional Analysis, with Applications to Linear Partial Differential Equations.} American Mathematical Society Graduate Studies in Mathematics Vol.~143, Providence, RI, 2013.
\bibitem{BM} A.~Bressan and R.~Murray, On the construction of self-similar solutions
to the Euler equations, to appear.
\bibitem{Ciarlet} P.~Ciarlet and C.~Mardare, On the Newton-Kantorovich theorem.
{\itemt Analysis \& Applications} {\bf 10} (2012), 249--269.
\bibitem{Cle} P.~Cl\'ement, Approximation by finite element functions using local regularization,
{\itemt Rev. Francaise Automat. Informat. Rech. Op\'erat.}, S\'er.~{\bf 9} (1975), 77--84.
\bibitem{Dan} S.~Daneri Cauchy problem for dissipative H\"older solutions to the incompressible Euler equations
{\itemt Comm. Math. Phys.} \textbf{329} (2014), 745--786.
\bibitem{DRS} S.~Daneri, E.~Runa and L.~Sz\'ekelyhidi,
Non-uniqueness for the Euler equations up to Onsager's critical exponent. To appear.
\bibitem{DanSz}
S.~Daneri and L.~Sz\'ekelyhidi,
Non-uniqueness and $h$-principle for H\"older-continuous weak solutions of the Euler equations
{\itemt Arch. Rat. Mech. Anal.} \textbf{224} (2017), 471--514.
\bibitem{DS09} C.~De Lellis and L.~Sz\'ekelyhidi,
The Euler equations as a differential inclusion
{\itemt Ann. Math.} \textbf{170} (2009), 1417--1436.
\bibitem{DS10} C.~De Lellis and L.~Sz\'ekelyhidi,
On admissibility criteria for weak solutions of the Euler equations
{\itemt Arch. Rat. Mech. Anal.} \textbf{195} (2010), 225--260.
\bibitem{DS13} C.~De Lellis and L.~Sz\'ekelyhidi,
Dissipative continuous Euler flows. {\itemt Invent. Math.} \textbf{193} (2013), 377--407.
\bibitem{Deimling}
K.~Deimling,
{\itemt Nonlinear Functional Analysis.} Springer-Verlag, Berlin, 1985.
\bibitem{E1} V.~Elling,
Algebraic spiral solutions of 2d incompressible Euler.
{\itemt
J.~Differential Equat.} {\bf 255} (2013), 3749--3787.
\bibitem{E2} V.~Elling,
Self-Similar 2d Euler solutions with mixed-sign vorticity
{\itemt Commun. Math. Phys.} {\bf 348} (2016), 27--68.
\bibitem{E3} V.~Elling,
Algebraic spiral solutions of the 2d incompressible Euler equations
{\itemt Bull. Brazilian Math. Soc.} {\bf 47} (2016), 323--334.
\bibitem{Evans}
L.~C.~Evans, {\itemt Partial Differential Equations.} Second edition.
American Mathematical Society, Providence, RI, 2010.
\bibitem{GT} D.~Gilbarg and N.~S.~Trudinger,
{\itemt Elliptic Partial Differential Equations of Second Order},
Springer, New York, 2001.
\bibitem{KS} J.~R.~Kuttler and V.~G.~Sigillito, Eigenvalues of the Laplacian in
two dimensions.
{\itemt SIAM Review} {\bf 26} (1984), 163--193.
\bibitem{MP} C.~Marchioro and M.~Pulvirenti,
{\itemt Mathematical Theory of Incompressible Nonviscous Fluids.}
Springer-Verlag, New York, 1994.
\bibitem{Sch} V.~Scheffer, An inviscid flow with compact support in space-time
{\itemt J. Geom. Anal.} \textbf{3} (1993), 343--401.
\bibitem{Shn} A.~Shnirelman
On the nonuniqueness of weak solution of the Euler equation
{\itemt Comm. Pure Appl. Math.} \textbf{50} (1997), 1261--1286.
\bibitem{V} M.~Vishik,
Instability and non-uniqueness in the Cauchy problem for the Euler equations of an ideal incompressible fluid. Part
I and II. arxiv.org/pdf/1805.09426.
\bibitem{Y} V.~Yudovich. Non-stationary flow of an ideal incompressible liquid.
{\itemt Comp. Math. Math. Phys.} {\bf 3} (1963), 1407--1457.
\epsilonnd{thebibliography}
\epsilonnd{document}
| 2,913 | 27,931 |
en
|
train
|
0.100.0
|
\begin{document}
\title{
On generalized Howell designs with block size three}
\footnotetext[1]{School of Mathematics and Statistics, University of New South Wales, Sydney, NSW 2052, Australia. Email: \texttt{[email protected]}}
\footnotetext[2]{Division of Science (Mathematics), Grenfell Campus, Memorial University of Newfoundland, Corner Brook, NL A2H 6P9, Canada. Email: \texttt{[email protected]}}
\footnotetext[3]{Department of Mathematical Sciences, University of New Brunswick, 100 Tucker Park Rd., Saint John, NB E2L 4L5, Canada. Email: \texttt{[email protected]}}
\footnotetext[4]{Department of Mathematics, Ryerson University, 350 Victoria St., Toronto, ON M5B 2K3, Canada. Email: \texttt{[email protected], [email protected]}}
\footnotetext[5]{Supported by Vice-President (Grenfell Campus) Research Fund, Memorial University of Newfoundland.}
\footnotetext[6]{Supported by an NSERC Discovery Grant.}
\renewcommand{\arabic{footnote}}{\arabic{footnote}}
\begin{abstract}
In this paper, we examine a class of doubly resolvable combinatorial objects. Let $t, k, \lambda, s$ and $v$ be nonnegative integers, and let $X$ be a set of $v$ symbols. A {\em generalized Howell design}, denoted $t$-$\mathrm{GHD}_{k}(s,v;\lambda)$, is an $s\times s$ array, each cell of which is either empty or contains a $k$-set of symbols from $X$, called a {\em block}, such that:
(i) each symbol appears exactly once in each row and in each column (i.e.\ each row and column is a resolution of $X$);
(ii) no $t$-subset of elements from $X$ appears in more than $\lambda$ cells.
Particular instances of the parameters correspond to Howell designs, doubly resolvable balanced incomplete block designs (including Kirkman squares), doubly resolvable nearly Kirkman triple systems, and simple orthogonal multi-arrays (which themselves generalize mutually orthogonal Latin squares). Generalized Howell designs also have connections with permutation arrays and multiply constant-weight codes.
In this paper, we concentrate on the case that $t=2$, $k=3$ and $\lambda=1$, and write $\mathrm{GHD}(s,v)$. In this case, the number of empty cells in each row and column falls between 0 and $(s-1)/3$.
Previous work has considered the existence of GHDs on either end of the spectrum, with at most 1 or at least $(s-2)/3$ empty cells in each row or column.
In the case of one empty cell, we correct some results of Wang and Du, and show that there exists a $\mathrm{GHD}(n+1,3n)$ if and only if $n \geq 6$, except possibly for $n=6$.
In the case of two empty cells, we show that there exists a $\mathrm{GHD}(n+2,3n)$ if and only if $n \geq 6$.
Noting that the proportion of cells in a given row or column of a $\mathrm{GHD}(s,v)$ which are empty falls in the interval $[0,1/3)$, we prove that for any $\pi \in [0,5/18]$, there
is a $\mathrm{GHD}(s,v)$ whose proportion of empty cells in a row or column is arbitrarily close to $\pi$.
\end{abstract}
{\bf Keywords:} Generalized Howell designs, triple systems, doubly resolvable designs.
{\bf MSC2010:} Primary: 05B07, 05B15; Secondary: 05B40, 94B25
| 986 | 67,737 |
en
|
train
|
0.100.1
|
\section{Introduction}
Combinatorial designs on square arrays have been the subject of much attention, with mutually orthogonal Latin squares being the most natural example. Block designs with two orthogonal resolutions can also be thought of in this way, with the rows and columns of the array labelled by the resolution classes. In this paper, we consider generalized Howell designs, which are objects that in some sense fill in the gap between these two cases.
We refer the reader to \cite{Handbook} for background on these objects and design theory in general.
\subsection{Definition and examples} \label{DefnSection}
In this paper, we examine a class of doubly resolvable designs, defined below, which generalize a number of well-known objects.
\begin{definition}
Let $t, k, \lambda, s$ and $v$ be nonnegative integers, and let $X$ be a set of $v$ symbols. A {\em generalized Howell design}, denoted $t$-$\mathrm{GHD}_{k}(s,v;\lambda)$, is an $s\times s$ array, each cell of which is either empty or contains a $k$-set of symbols from $X$, called a {\em block}, such that:
\begin{enumerate}
\item
\label{latin}
each symbol appears exactly once in each row and in each column (i.e.\ each row and column is a resolution of $X$);
\item
no $t$-subset of elements from $X$ appears in more than $\lambda$ cells.
\end{enumerate}
\end{definition}
In the case that $t=2$, $k=2$ and $\lambda=1$, a $2$-$\mathrm{GHD}_2(s,v;1)$ is known as a {\em Howell design} $\mathrm{H}(s,v)$. In the literature, two different generalizations
of Howell designs have been proposed, both of which can be incorporated into the definition above: these are due to Deza and Vanstone~\cite{DezaVanstone} (which
corresponds to the case that $t=2$) and to Rosa~\cite{Rosa} (which corresponds to the case that $t=k$). The objects in question have appeared under several names
in the literature. The term {\em generalized Howell design} appears in both~\cite{DezaVanstone} and~\cite{Rosa} in reference to the particular cases studied in these
papers, and also in papers such as~\cite{WangDu}. The term {\em generalized Kirkman square} or GKS has more recently been introduced to the literature (see~\cite{DuAbelWang,Etzion});
in particular, in these papers, a $\mathrm{GKS}_k(v;1,\lambda;s)$ is defined to be what we have defined as a $2$-$\mathrm{GHD}_k(s,v;\lambda)$.
In this paper we will concentrate on the case when $t=2$, $k=3$ and $\lambda=1$, in which case we omit these parameters in the notation and simply write $\mathrm{GHD}(s,v)$, or $\mathrm{GHD}_k(s,v)$ for more general $k$.
Two obvious necessary conditions for the existence of a non-trivial $2$-$\mathrm{GHD}_k(s,v;\lambda)$ are that $v\equiv 0 \pmod k$ and that $\frac{v}{k}\leq s \leq \frac{\lambda(v-1)}{k-1}$. In particular, when $k=3$, $t=2$ and $\lambda=1$, we have that $\frac{v}{3}\leq s \leq \frac{v-1}{2}$. Since a $t$-$\mathrm{GHD}_k(s,v;\lambda)$ contains exactly $n=\frac{v}{k}$ non-empty cells in each row and column, it can be helpful to write $t$-$\mathrm{GHD}_k(n+e, kn;\lambda)$ (or $\mathrm{GHD}(n+e, 3n)$ in the case $k=3$, $t=2$ and $\lambda=1$), where $e$ is then the number of empty cells in each row and column.
A Howell design with $v=s+1$, i.e. an $\mathrm{H}(s,s+1)$ is called a {\em Room square}. The study of Room squares goes back to the original work of Kirkman \cite{Kirkman} in 1850, where
he presents a Room square with side length~$7$, i.e.\ an $\mathrm{H}(7,8)$. The name of this object, however, is in reference to T.~G.~Room~\cite{Room}, who also constructed an $\mathrm{H}(7,8)$, and in
addition showed that there is no $\mathrm{H}(3,4)$ or $\mathrm{H}(5,6)$. The existence of Room squares was settled in 1975 by Mullin and Wallis \cite{Mullin}; for a survey on Room squares see \cite{blue book}.
\begin{theorem}[Mullin and Wallis \cite{Mullin}]
\label{Room squares}
There exists a Room square of side $s$ if and only if $s$ is odd and either $s=1$ or $s \geq 7$.
\end{theorem}
More generally, Stinson \cite{Stinson} showed existence of Howell designs with odd side $s$ and Anderson, Schellenberg and Stinson \cite{ASS} showed existence of Howell designs with even side $s$.
We thus have the following.
\begin{theorem}[Stinson \cite{Stinson}; Anderson, Schellenberg and Stinson \cite{ASS}]
\label{HD}
There exists an $\mathrm{H}(s,2n)$ if and only if $n=0$ or $n \leq s \leq 2n-1$, $(s,2n)\not\in \{(2,4), (3,4), (5,6), (5,8)\}$.
\end{theorem}
A $2$-$\mathrm{GHD}_k(s,v;\lambda)$ with $s=\frac{\lambda(v-1)}{k-1}$ is equivalent to a {\em doubly resolvable balanced incomplete block design} $\mathrm{BIBD}(v,k,\lambda)$. Doubly resolvable designs and related objects have been studied, for example, in~\cite{OCD, Curran Vanstone, Fuji-Hara Vanstone, Fuji-Hara Vanstone TD, Vanstone AG, Lamken 09, Vanstone 80, Vanstone 82}. In particular, Fuji-Hara and Vanstone investigated orthogonal resolutions in affine geometries, showing the existence of a doubly resolvable $\mathrm{BIBD}(q^n,q,1)$ for prime powers $q$ and integers $n \geq 3$. Asymptotic existence of doubly resolvable $\mathrm{BIBD}(v,k,1)$ was shown by Rosa and Vanstone~\cite{Rosa Vanstone} for $k=3$ and by Lamken~\cite{Lamken 09} for general $k$.
For $t=2$, $k=3$ and $\lambda=1$, a $\mathrm{GHD}(s,v)$ with $s = \lfloor\frac{v-1}{2}\rfloor$ corresponds to a {\em Kirkman square}, $\mathrm{KS}(v)$, (i.e.\ a doubly resolvable Kirkman triple system of order $v$) when $v \equiv 3 \pmod 6$ and a {\em doubly resolvable nearly Kirkman triple system}, $\mathrm{DRNKTS}(v)$, when $v\equiv 0 \pmod 6$. A Kirkman square of order $3$ is trivial to construct. Mathon and Vanstone~\cite{Mathon Vanstone} showed the non-existence of a $\mathrm{KS}(9)$ or $\mathrm{KS}(15)$, while the non-existence of a $\mathrm{DRNKTS}(6)$ and $\mathrm{DRNKTS}(12)$ follows from Kotzig and Rosa~\cite{KotzigRosa74}.
For many years, the smallest known example of a $\mathrm{GHD}(s,v)$ with $s = \lfloor\frac{v-1}{2}\rfloor$ (other than the trivial case of $s=1$, $v=3$) was a $\mathrm{DRNKTS}(24)$,
found by Smith in 1977~\cite{Smith77}. However, the smallest possible example of such a GHD
with $v\equiv 0 \pmod 6$
is for $v=18$ and $s=8$; these were recently obtained and classified up to isomorphism in~\cite{Finland}, from which
the following example is taken.
\begin{example} \label{FinlandExample}
The following is a $\mathrm{GHD}(8,18)$, or equivalently a $\mathrm{DRNKTS}(18)$.
\renewcommand{1.0}{1.25}
\[
\begin{array}{|c|c|c|c|c|c|c|c|} \hline
& & agh & bkm & ejp & fir & clq & dno \\ \hline
& & bln & aij & cho & dgq & ekr & fmp \\ \hline
acd & boq & & & gmr & hkp & fjn & eil \\ \hline
bpr & aef & & & inq & jlo & dhm & cgk \\ \hline
fgl & dik & emq & cnr & & & aop & bhj \\ \hline
ehn & cjm & fko & dlp & & & bgi & aqr \\ \hline
imo & gnp & djr & fhq & akl & bce & & \\ \hline
jkq & hlr & cip & ego & bdf & amn & & \\ \hline
\end{array}
\]
\renewcommand{1.0}{1.0}
\end{example}
The existence of Kirkman squares was settled by Colbourn, Lamken, Ling and Mills in \cite{CLLM}, with 23 possible exceptions, 11 of which where solved in \cite{ALW} and one ($v=351$) was solved in~\cite{ACCLWW}.
Abel, Chan, Colbourn, Lamken, Wang and Wang \cite{ACCLWW} have determined the existence of doubly resolvable nearly Kirkman triple systems, with 64 possible exceptions. We thus have the following result.
\begin{theorem}[\cite{ACCLWW,ALW,CLLM}]
\label{Kirkman squares}
Let $v \equiv 0 \pmod{3}$ be a positive integer. Then a $\mathrm{GHD}(\lfloor\frac{v-1}{2}\rfloor,v)$ exists if and only if
either $v=3$ or $v\geq 18$, with 75 possible exceptions. The symbol $N$ will be used to denote this set of possible
exceptions throughout the paper.
\end{theorem}
Recently, Du, Abel and Wang~\cite{DuAbelWang} have proved the existence of $\mathrm{GHD}(\frac{v-4}{2},v)$ for $v \equiv 0 \pmod{12}$ with at most 15 possible exceptions and $\mathrm{GHD}(\frac{v-6}{2},v)$ for $v \equiv 0 \pmod{6}$ and $v>18$ with at most 31 possible exceptions. Two of these possible exceptions ($\mathrm{GHD}(9,24)$ and $\mathrm{GHD}(12,30)$) will be addressed later in this paper.
At the other end of the spectrum, the case when $s=n$, $v=kn$, $t=2$ and $\lambda=1$ is known as a $\mathrm{SOMA}(k,n)$. SOMAs, or {\em simple orthogonal multi-arrays}, were introduced by Phillips and Wallis~\cite{PhillipsWallis} and have been investigated by Soicher~\cite{Soicher} and Arhin~\cite{ArhinThesis,Arhin}. We note that the existence of a $2$-$\mathrm{GHD}_{k}(n,kn;1)$ is guaranteed by the existence of $k$ mutually orthogonal Latin squares (MOLS) of side $n$. (For more information on Latin squares and MOLS, see~\cite[Part III]{Handbook}.) It is well known that there exist 3 MOLS of side $n$ for every $n\neq 2,3,6,10$. Interestingly, even though the corresponding set of 3 MOLS is not known to exist (and is known not to for $n=2,3,6$), the existence of a $\mathrm{GHD}(6, 18)$ and $\mathrm{GHD}(10, 30)$ has been shown by Brickell~\cite{brickell} and Soicher~\cite{Soicher}, respectively. A $\mathrm{GHD}(1,3)$ exists trivially but it is easily seen that the existence of a $\mathrm{GHD}(2,6)$ or $\mathrm{GHD}(3,9)$ is impossible.
\begin{theorem} \label{618}
\label{Latin}
There exists a $\mathrm{GHD}(n,3n)$ if and only if $n =1$ or $n \geq 4$.
\end{theorem}
\subsection{GHDs, permutation arrays and codes}
\label{PA-code}
In~\cite{DezaVanstone}, Deza and Vanstone noted $2$-$\mathrm{GHD}_k(s,v;\lambda)$ are equivalent to a particular type of permutation array.
\begin{definition}
A {\em permutation array} $\mathrm{PA}(s,\lambda;v)$ on a set $S$ of $s$ symbols is an $v \times s$ array such that each row is a permutation of $S$ and any two rows agree in at most $\lambda$ columns. A permutation array in which each pair of rows agree in exactly $\lambda$ symbols is called {\em equidistant}. If, in any given column, each symbol appears either $0$ or $k$ times, then a permutation array is said to be {\em $k$-uniform}.
\end{definition}
The rows of a permutation array form an error-correcting code with minimum distance $s-\lambda$. Codes formed in this manner, called {\em permutation codes}, have more recently attracted attention due to their applications to powerline communications; see~\cite{ChuColbournDukes, ColbournKloveLing, Huczynska}. In~\cite{DezaVanstone}, it is noted that a $2$-$\mathrm{GHD}_k(s,v;\lambda)$ is equivalent to a $k$-uniform $\mathrm{PA}(s,\lambda;v)$. In the case that $s=\frac{\lambda(v-1)}{k-1}$, so that the $\mathrm{GHD}$ is a doubly resolvable BIBD, the permutation array is equidistant.
There are also known connections between GHDs and other classes of codes. A {\em doubly constant weight code} is a binary constant-weight code with length $n=n_1+n_2$ and weight $w=w_1+w_2$, where each codeword has $w_1$ 1s in the first $n_1$ positions and $w_2$ 1s in the final $n_2$ positions. Such a code with minimum distance $d$ is referred to as a $(w_1,n_1,w_2,n_2,d)$-code. In~\cite{Etzion}, Etzion describes connections between certain classes of designs and optimal doubly constant weight codes. In particular, a $2$-$\mathrm{GHD}_k(s,v;1)$ gives a $(2,2s,k,v,2k+2)$-code.
| 3,843 | 67,737 |
en
|
train
|
0.100.2
|
\subsection{GHDs, permutation arrays and codes}
\label{PA-code}
In~\cite{DezaVanstone}, Deza and Vanstone noted $2$-$\mathrm{GHD}_k(s,v;\lambda)$ are equivalent to a particular type of permutation array.
\begin{definition}
A {\em permutation array} $\mathrm{PA}(s,\lambda;v)$ on a set $S$ of $s$ symbols is an $v \times s$ array such that each row is a permutation of $S$ and any two rows agree in at most $\lambda$ columns. A permutation array in which each pair of rows agree in exactly $\lambda$ symbols is called {\em equidistant}. If, in any given column, each symbol appears either $0$ or $k$ times, then a permutation array is said to be {\em $k$-uniform}.
\end{definition}
The rows of a permutation array form an error-correcting code with minimum distance $s-\lambda$. Codes formed in this manner, called {\em permutation codes}, have more recently attracted attention due to their applications to powerline communications; see~\cite{ChuColbournDukes, ColbournKloveLing, Huczynska}. In~\cite{DezaVanstone}, it is noted that a $2$-$\mathrm{GHD}_k(s,v;\lambda)$ is equivalent to a $k$-uniform $\mathrm{PA}(s,\lambda;v)$. In the case that $s=\frac{\lambda(v-1)}{k-1}$, so that the $\mathrm{GHD}$ is a doubly resolvable BIBD, the permutation array is equidistant.
There are also known connections between GHDs and other classes of codes. A {\em doubly constant weight code} is a binary constant-weight code with length $n=n_1+n_2$ and weight $w=w_1+w_2$, where each codeword has $w_1$ 1s in the first $n_1$ positions and $w_2$ 1s in the final $n_2$ positions. Such a code with minimum distance $d$ is referred to as a $(w_1,n_1,w_2,n_2,d)$-code. In~\cite{Etzion}, Etzion describes connections between certain classes of designs and optimal doubly constant weight codes. In particular, a $2$-$\mathrm{GHD}_k(s,v;1)$ gives a $(2,2s,k,v,2k+2)$-code.
\subsection{GHDs as generalized packing designs}
The notion of {\em generalized $t$-designs} introduced by Cameron in 2009 \cite{Cameron09}, and the broader notion of {\em generalized packing designs} introduced by Bailey and Burgess \cite{packings}, provide a common framework for studying many classes of combinatorial designs. As described below, generalized Howell designs fit neatly into this framework. Recently, Chee, Kiah, Zhang and Zhang~\cite{CKZZ} noted that generalized packing designs are equivalent to {\em multiply constant-weight codes}, introduced in~\cite{Chee2}, which themselves generalize doubly constant-weight codes as discussed in Section~\ref{PA-code}.
Suppose that $v,k,t,\lambda,m$ are integers where $v \geq k \geq t \geq 1$, $\lambda \geq 1$ and $m\geq 1$. Let $\mathbf{v} = (v_1,v_2,\ldots,v_m)$ and $\mathbf{k}=(k_1,k_2,\ldots,k_m)$ be $m$-tuples
of positive integers with sum $v$ and $k$ respectively, where $k_i\leq v_i$ for all $i$, and let $\mathbf{X} = (X_1,X_2,\ldots,X_m)$ be an $m$-tuple of pairwise disjoint sets, where $|X_i|=v_i$.
We say that an $m$-tuple of {\em non-negative} integers $\mathbf{t}=(t_1,t_2,\ldots,t_m)$ is {\em admissible} if $t_i\leq k_i$ for all $i$ and the entries $t_i$ sum to $t$.
Now we define a {\em $t$-$(\mathbf{v},\mathbf{k},\lambda)$ generalized packing design} to be a collection $\mathcal{P}$ of $m$-tuples of sets $(B_1,B_2,\ldots,B_m)$, where $B_i\subseteq X_i$ and $|B_i|=k_i$ for all $i$,
with the property that for any admissible $\mathbf{t}$, any $m$-tuple of sets $(T_1,T_2,\ldots,T_m)$ (where $T_i\subseteq X_i$ and $|T_i|=t_i$ for all $i$) is contained in at
most $\lambda$ members of $\mathcal{P}$. We say a generalized packing is {\em optimal} if it contains the largest possible number of blocks. (See~\cite{packings} for further details, and for numerous examples.)
The connection with GHDs is the following: any $2$-$\mathrm{GHD}_k(s,n)$ forms an optimal $2$-$(\mathbf{v},\mathbf{k},1)$ generalized packing, where $\mathbf{v}=(n,s,s)$ and $\mathbf{k}=(k,1,1)$. The ``point set'' of the
generalized packing is formed from the points of the GHD, together with the row labels and the column labels. Since $t=2$, the only possible admissible
triples $\mathbf{t}$ are $(2,0,0)$, $(1,0,1)$, $(0,1,0)$ and $(0,1,1)$. The first of these tells us that no pair of points may occur in more than one block of the GHD; the second and third tell us
that no point may be repeated in any row or any column; the last tells us that any entry of the $s\times s$ array may contain only one block of the GHD.
In fact, GHDs may be used to obtain $2$-$(\mathbf{v},\mathbf{k},1)$ generalized packings for $\mathbf{k}=(k,1,1)$ more generally, for more arbitrary $\mathbf{v}$. If $\mathbf{v}=(n,r,s)$, we may obtain a rectangular array
by (for instance) deleting rows or adding empty columns. Depending on the value of $n$ relative to $r,s$, we may need to delete points and/or blocks. This idea is discussed
further in~\cite[Section 3.5]{packings}.
| 1,609 | 67,737 |
en
|
train
|
0.100.3
|
\section{Terminology}
\subsection{Designs and resolutions}
In this section, we discuss various useful classes of combinatorial designs. For more information on these and related objects, see~\cite{Handbook}.
We say that $(X, \cB)$ is a {\em pairwise balanced design} PBD$(v, K,\lambda)$ if $X$ is a set of $v$ elements and $\cB$ is a collection of subsets of $X$, called {\em blocks}, which between them contain every pair of elements of $X$ exactly $\lambda$ times. If $K=\{k\}$, then a $\mathrm{PBD}(v,\{k\},\lambda)$ is referred to as a {\em balanced incomplete block design}, $\mathrm{BIBD}(v,k,\lambda)$. A collection of blocks that between them contain each point of $X$ exactly once is called a {\em resolution class} of $X$. If $\cB$ can be partitioned into resolution classes we say that the design is {\em resolvable}, and refer to the partition as a {\em resolution}.
It is possible that a design may admit two resolutions, $\cR$ and $\cS$. If $|R_i\cap S_j| \leq 1$, for every resolution class $R_i\in \cR$ and $S_j\in \cS$, we say that these resolutions are {\em orthogonal}. A design admitting a pair of orthogonal resolutions is called {\em doubly resolvable}.
In the definition of a PBD or BIBD, if we relax the constraint that every pair appears exactly once to that every pair appears at most once, then we have a {\em packing} design. Thus, a $2$-$\mathrm{GHD}_k(s,v;\lambda)$ may be viewed as a doubly resolvable packing design.
A {\em group-divisible design}, $(k,\lambda)$-GDD of type $g^u$, is a triple $(X,\cG, \cB)$, where $X$ is a set of $gu$ points, $\cG$ is a partition of $X$ into $u$ subsets, called {\em groups}, of size $g$, and $\cB$ is a collection of subsets, called {\em blocks}, of size $k$, with the properties that no pair of points in the same group appears together in a block, and each pair of points in different groups occurs in exactly one block. A $(k,1)$-GDD of type $n^k$ is called a {\em transversal design}, and denoted $\mathrm{T}(k,n)$.
Note that a transversal design can be interpreted as a packing which misses the pairs contained in the groups. Alternatively, adding the groups as blocks, we may form from a transversal design a $\mathrm{PBD}(kn,\{k,n\})$. PBDs formed in this way will often be used as ingredients in the construction of GHDs in later sections.
A resolvable transversal design with block size $k$ and group size $n$ is denoted $\mathrm{RT}(k,n)$. It is well known that a set of $k$ MOLS of side $n$ is equivalent to an $\mathrm{RT}(k+1,n)$.
\subsection{Subdesigns and holes}
The word {\em hole} is used with different meanings in different areas of design theory. For example, a hole in a BIBD refers to a set $H$ of points such that no pair of elements of $H$ appears in the blocks, while a hole in a Latin square means an empty subsquare, so that certain rows and columns do not contain all symbols. In the case of GHDs, three types of ``hole'' may occur, and each will be of importance later in the paper. We thus develop appropriate terminology to differentiate the uses of the term ``hole''.
First, a {\em pairwise hole} in a GHD is a set of points $H \subseteq X$ with the property that no pair of points of $H$ appear in a block of the GHD. Thus, a pairwise hole corresponds with the notion of a hole in a BIBD.
\begin{definition}
A {\em $\mathrm{GHD}_k^*(s,v)$}, $\cal{G}$, is a $2$-$\mathrm{GHD}_k(s,v;1)$ with a pairwise hole of size $v-s(k-1)$. Thus the symbol set $X$ of a $\mathrm{GHD}_k^*(s,v)$ can be written as
\[
X=Y \cup (S \times \{1,2,\ldots,k-1\}),
\]
where $|S|=s$, $|Y|=v-s(k-1)$, and $Y$ is a pairwise hole of $\cal{G}$.
\end{definition}
Note that this definition extends in a natural way to higher $\lambda$; however, for our purposes it will be enough to only consider the case that $\lambda=1$.
Also note that in the case that $k=2$, a $\mathrm{GHD}_2^*(s,v)$ is precisely the $\mathrm{H}^*(s,v)$ described by Stinson~\cite{Stinson}.
We will refer to any $\mathrm{GHD}_k^*(s,v)$ as having the {\em $*$-property}. As our primary focus is on the case that $k=3$, we will omit the subscript $k$ in this case. Note that trivially
any $\mathrm{GHD}(s,2s+1)$ (i.e.\ Kirkman square) has the $*$-property. It is also clear that any $\mathrm{GHD}(s,2s+2)$ (i.e.\ DRNKTS) has the \mbox{$*$-property,} as the values of the parameters force
there to be an unused pair of points.
In the case of a $\mathrm{GHD}(s,3s)$ formed by superimposing three $\mathrm{MOLS}(s)$, the unused pairs of points are those which occur within the three groups of size $s$ in the corresponding transversal design;
one of these groups may be taken as $Y$, so any $\mathrm{GHD}(s,3s)$ formed in this manner is a $\mathrm{GHD}^*(s,3s)$.
In addition, the existence of a decomposable $\mathrm{SOMA}(3,6)$~\cite{PhillipsWallis} and $\mathrm{SOMA}(3,10)$~\cite{Soicher} yield the existence of $\mathrm{GHD}^*(6,18)$ and $\mathrm{GHD}^*(10,30)$. Thus, we have the following.
\begin{theorem} \label{GHD*}
\begin{enumerate}
\item[(i)] There exists a $\mathrm{GHD}^*\left(\left\lfloor \frac{v-1}{2} \right\rfloor,v \right)$ for any $v\equiv 0 \pmod 3$, whenever $v=3$ or $v \geq 18$ and $v \notin N$.
\item[(ii)] There exists a $\mathrm{GHD}^*(n,3n)$ if and only if $n \neq 2$ or $3$.
\end{enumerate}
\end{theorem}
The second type of hole in a GHD is an {\em array hole}. To define it, we need the concepts of trivial GHDs and subdesigns of GHDs. A $t$-$\mathrm{GHD}_k(s,0;\lambda)$ is called {\em trivial}; that is, a trivial GHD is an empty $s \times s$ array. If a $t$-$\mathrm{GHD}_k(s,v;\lambda)$, $\cG$, has a subarray $\cH$ which is itself a $t$-$\mathrm{GHD}_k(s',v';\lambda')$ for some $s'\leq s$, $v' \leq v$ and $\lambda' \leq \lambda$ and whose point set is a pairwise hole, then we say that $\cH$ is a {\em subdesign} of $\cG$. In particular, if the subdesign $\cH$ is trivial, then we call $\cH$ an {\em array hole} in $\cG$, and say that $\cG$ has an array hole of size $s'$.
There is a third type of hole that may occur, a {\em Latin hole}, which is a set of elements $H \subseteq X$ and sets $R$, $C$ of rows and columns, respectively, such that each row in $R$ (resp.\ column in $C$) contains no points in $H$, but all points in $X \setminus H$.
The concepts of array holes and Latin holes may coincide when there is an array hole in a Latin square, and each row and column intersecting the array hole misses the same subset $H$ of the point set. This is often referred to in the literature as a Latin square with a hole.
The notation $t$~$\mathrm{IMOLS}(s,a)$ is used for $t~\mathrm{MOLS}(s)$, each of which has a hole of side length $a$ on the same set of positions. Also, the notation $t~\mathrm{IMOLS}(s,a,b)$ is used for a set of $t~\mathrm{MOLS}(s)$ with two disjoint holes of
orders $a$ and $b$, with two extra properties: (1) the two Latin holes have no symbols in common; (2) no common row or column within the larger square intersects both array holes.
Latin holes and array holes also both feature in the concept of frames, which are described in Section~\ref{FrameConstr}.
| 2,257 | 67,737 |
en
|
train
|
0.100.4
|
\section{Construction methods} \label{ConstrSection}
\subsection{Frames} \label{FrameConstr}
In this section, we discuss a useful method, that of frames, which will allow us to construct infinite families of GHDs. The central idea has been used to construct GHDs on both ends of the
spectrum. However, the terminology in the literature varies: for MOLS and GHDs with few empty cells, authors often refer to HMOLS~\cite{AbelBennettGe,BennettColbournZhu,WangDu}, while for doubly
resolvable designs authors often speak of frames~\cite{CLLM, blue book}. We use the latter terminology, as it is more easily adaptable to more general kinds of GHDs. First we begin with a definition.
\begin{definition} \label{Frame Definition}
Let $s$ and $v$ be integers, $X$ be a set of $v$ points, $\{G_1, \ldots, G_n\}$ be a partition of $X$, and $g_i=|G_i|$. Also let $s_1, \ldots, s_n$ be non-negative integers
with $\sum_{i=1}^{n} s_i = s$. A {\em generalized Howell frame}, $\mathrm{GHF}_k$, of type $(s_1,g_1)\ldots (s_n, g_n)$ is a square array of side $s$, $A$, that has the following properties:
\begin{enumerate}
\item
Every cell of $A$ is either empty or contains a $k$-subset of elements of $X$.
The filled cells are called {\em blocks}.
\item
No pair of points from $X$ appear together in more than one block, and no pair of points in any $G_i$ appear together in any block.
\item
The main diagonal of $A$ consists of empty $s_i\times s_i$ subsquares, $A_i$.
\item
Each row and column of the array with empty diagonal entry $A_i$ is a resolution of $X\setminus G_i$.
\end{enumerate}
\end{definition}
We will use an exponential notation $(s_1,g_1)^{n_1}\ldots (s_t,g_t)^{n_t}$ to indicate that there are $n_i$ occurrences of $(s_i, g_i)$ in the partition. In the case that
$k=3$ and $g_i=3s_i$, we will refer to a GHF of type $s_1s_2\cdots s_n$, and will use exponential notation $s_1^{\alpha_1} \cdots s_r^{\alpha_r}$ to indicate that there are
$\alpha_i$ occurrences of $s_i$. Note that this concept of frame is analogous to the HMOLS used in~\cite{AbelBennettGe,BennettColbournZhu,WangDu}.
\begin{theorem} \cite{AbelBennett, AbelBennettGe} \label{Uniform Frames}
Let $h\geq 1$ and $u\geq 5$ be integers. Then there exists a GHF of type $h^u$ except for $(h,u)=(1,6)$, and possibly for $(h,u) \in \{(1,10), (3,28), (6,18)\}$.
\end{theorem}
The following two lemmas are similar to Lemmas 2.2 and 2.3 in \cite{BennettColbournZhu} where they were written in the language of transversal designs with holes and holey MOLS. More information
on the construction methods used to establish them can be found in \cite{BrouwerVanRees}. For the sake of clarity, we give a brief proof here of Lemma~\ref{n=38 lemma}; the proof of
Lemma~\ref{n=68 lemma} is similar.
\begin{lemma} \label{n=38 lemma}
Let $h,m$ be positive integers such that $4~\mathrm{MOLS}(m)$ exist. Let $v_1, v_2, \ldots, v_{m-1}$ be non-negative integers such that for
each $i=1,2,\ldots,m-1$, there exist $3~\mathrm{IMOLS}(h+v_i, v_i)$. Then for $v=v_1+v_2+\cdots+v_{m-1}$, there exists a GHF of type $h^mv^1$.
\end{lemma}
\begin{proof}
Take the first three $\mathrm{MOLS}(m)$ on disjoint symbol sets and superimpose them to form a $\mathrm{GHD}(m,3m)$, $A$. These MOLS possess $m$ disjoint
transversals, $T_0, T_1, \ldots, T_{m-1}$, since the cells occupied by any symbol in the fourth square form such a transversal. Permuting
rows and columns if necessary, we can assume that $T_0$ is on the main diagonal.
Form an $(hm+v)\times(hm+v)$ array $A'$ as follows. For $i,j=1,2,\ldots,m$, the subarray in rows $h(i-1)+1, \ldots, hi$ and
columns $h(j-1)+1,\ldots,hj$ corresponds to the $(i,j)$-cell in $A$.
We now fill the cells of $A'$. The $h \times h$ subsquares along the diagonal, corresponding to $T_0$, will remain empty. Next, consider the positions
arising from the transversals $T_1, \ldots, T_{m-1}$. Partition the last $v$ rows into $m-1$ sets $R_1, R_2,\ldots,R_{m-1}$, with $R_{\ell}$ containing
$v_{\ell}$ rows. Similarly partition the last $v$ columns into $C_1, \ldots, C_{m-1}$, where $C_{\ell}$ contains $v_{\ell}$ columns. Suppose that
in $A$, cell $(i,j)$, containing entry $\{x_{ij}, y_{ij}, z_{ij}\}$, is in $T_{\ell}$. In the entries of $A'$ arising from this cell, together with the
entries in columns $h(j-1)+1,\ldots,hj$ of $R_{\ell}$, the entries in rows $h(i-1)+1,\ldots,hi$ of $C_{\ell}$ and the $v_{\ell} \times v_{\ell}$
subsquare formed by the intersection of $R_{\ell}$ and $C_{\ell}$, place the superimposed three $\mathrm{IMOLS}(h+v_{\ell}, v_{\ell})$,
so that the missing hole is on the $v_{\ell} \times v_{\ell}$ subsquare formed by the intersection of $R_{\ell}$ and $C_{\ell}$.
For these superimposed MOLS, the symbol set is $(\{x_{ij}, y_{ij}, z_{ij}\} \times \mathbb{Z}_h) \cup \{ \infty_{\ell, 1},\infty_{\ell, 2}, \ldots,
\infty_{\ell, v_{\ell}}\}$, and the missing elements due to the hole are $\infty_{\ell, 1},\infty_{\ell, 2}, \ldots, \infty_{\ell, v_{\ell}}$.
It is straightforward to verify that the resulting array $A'$ is a GHF of type $h^mv^1$.
\end{proof}
\begin{lemma} \label{n=68 lemma}
Let $h$, $m$, $x$ and $t$ be positive integers. Suppose there exist $3+x~\mathrm{MOLS}(m)$ and $3~\mathrm{MOLS}(h)$. For each $1 \leq i \leq x-1$, let $w_i$ be a
non-negative integer such that there exist $3~\mathrm{IMOLS}(h+w_i, w_i)$. Then if $w=w_1+ w_2+ \cdots+w_{x-1}$, there exists a GHF of type $h^{m-1}(m+w)^1$.
\end{lemma}
To apply Lemmas~\ref{n=38 lemma} and \ref{n=68 lemma}, we need some information on existence of $3+x~\mathrm{MOLS}(m)$, $4~\mathrm{MOLS}(m)$ and $3~\mathrm{IMOLS}(h+a,a)$.
These existence results are given in the next two lemmas.
\begin{lemma} \label{n=14 lemma}
There exist $4~\mathrm{MOLS}(m)$ for all integers $m \geq 5$ except for $m=6$ and possibly for $m \in \{10,22\}$. Also, if $m$ is a prime power, there exist $m-1~\mathrm{MOLS}(m)$.
\end{lemma}
\begin{proof}
See \cite{Todorov} for $n=14$, \cite{Abel} for $n=18$ and \cite[Section III.3.6]{Handbook} for other values.
\end{proof}
\begin{lemma} \label{n=10 lemma} \cite[Section III.4.3]{Handbook}
Suppose $y,a$ are positive integers with $y \geq 4a$. Then $3~\mathrm{IMOLS}(y,a)$ exist, except for $(y,a) = (6,1)$, and possibly for $(y,a) = (10,1)$.
\end{lemma}
Most of the frames that we require in Section~\ref{TwoEmptySection} are obtained using Lemma~\ref{n=38 lemma}, but Theorem~\ref{Uniform Frames} and Lemma~\ref{n=68 lemma}
will also be used. The majority of frames required come from the following special cases of Lemma~\ref{n=38 lemma}.
\begin{lemma} \label{Frame 7^uv}
Let $h,m,v$ be integers with $m \geq 5,$ $m \notin \{6,10,22\}$. Then there exists a GHF of type $h^mv^1$ if either:
\begin{enumerate}
\item $h=6$ and $m-1 \leq v \leq 2(m-1)$;
\item $h\in \{7,8\}$ and $0 \leq v \leq 2(m-1)$;
\item $h\in \{9,11\}$, $0 \leq v \leq 3(m-1)$ and $(h,v) \neq (9,1)$.
\end{enumerate}
\end{lemma}
\begin{proof}
Apply Lemma~\ref{n=38 lemma}, and let $h$ and $m$ be as given in that lemma. The admissible values of $m$ guarantee the existence of $4~\mathrm{MOLS}(m)$ (see Lemma~\ref{n=14 lemma}).
The range for $v= \sum_{i=1}^{m-1} v_i$ is easily determined from the feasible values of $v_i$. We require that $3~\mathrm{IMOLS}(h+v_i, v_i)$ exist, so in light of Lemma~\ref{n=10 lemma}, we may take
$v_i \in \{1,2\}$ when $h=6$, $v_i \in \{0, 1,2\}$ when $h \in \{7,8\}$, $v_i \in \{0,2,3\}$ when $h=9$ and $v_i \in \{0,1,2,3\}$ when $h=11$.
\end{proof}
We next discuss how to construct GHDs from frames. The following
result is a generalization of the Basic Frame Construction for doubly resolvable designs; see~\cite{ACCLWW, CLLM, Lamken95}.
\begin{theorem} [Basic Frame Construction] \label{Frame Construction}
Suppose there exists a $\mathrm{GHF}_k$ of type $\Pi_{i=1}^m (s_i,g_i)$. Moreover, suppose that for $1 \leq i \leq m$, there exists a $\mathrm{GHD}_k(s_i+e,g_i+u)$
and this design contains a $\mathrm{GHD}_k(e,u)$ as a subdesign for $1 \leq i \leq m-1$. Then there exists a \mbox{$\mathrm{GHD}_k(s+e,v+u)$,} where $s=\sum_{i=1}^m s_i$ and $v=\sum_{i=1}^m g_i$.
\end{theorem}
\begin{proof}
Suppose the $\mathrm{GHF}_k$ of type $\Pi (s_i,g_i)$ has point set $X$ and groups $G_i$, $i=1,2,\ldots,m$. Let $U$ be a set of size $u$, disjoint from $X$, and take our new point set to be $X \cup U$. Now,
add $e$ new rows and columns. For each group $G_i$ with $1 \leq i \leq m-1$, fill the $s_i \times s_i$ subsquare corresponding to $G_i$, together with the $e$ new rows and columns, with a copy of the
$\mathrm{GHD}_k(s_i+e,g_i+u)$, with the sub-$\mathrm{GHD}_k(e,u)$ (containing the points in $U$) over the $e \times e$ subsquare which forms the intersection of the new rows and columns. See Figure~\ref{BasicFrameFigure}.
We then delete the blocks of the sub-$\mathrm{GHD}_k(e,u)$. Now, fill the $s_m \times s_m$ subsquare corresponding to $G_m$, together with the $e$ new rows and columns with the $\mathrm{GHD}_k(s_m+e,g_m+u)$.
\begin{figure}
\caption{\label{BasicFrameFigure}
\label{BasicFrameFigure}
\end{figure}
We show that each point occurs in some cell of each row. In the $e$ new rows, the points in $U$ appear in the blocks from the $\mathrm{GHD}_k(s_m+e,g_m+u)$, and the points in each $G_i$ occur in the columns corresponding to the $\mathrm{GHD}_k(s_i+e,g_i+u)$ for $1 \leq i \leq m$. In a row which includes part of the $s_i \times s_i$ subsquare corresponding to the group $G_i$, the elements of $G_j$ ($j \neq i$) appear in the frame, while the elements of $G_i \cup U$ appear in the added $\mathrm{GHD}_k(s_i+e,g_i+u)$. In a similar way, each element occurs exactly once in every column.
\end{proof}
We will generally use the Basic Frame Construction with $u=0$, so that our ingredient GHDs have an $e \times e$ empty subsquare. For this special case, we have the following.
\begin{corollary} \label{FrameCorollary}
Suppose that: (1) there exists a $\mathrm{GHF}_k$ of type $\Pi_{i=1}^m (s_i,g_i)$; (2) for each $i\in \{1,\ldots,m-1\}$ there exists a $\mathrm{GHD}_k(s_i+e,g_i)$ containing a trivial subdesign $\mathrm{GHD}_k(e,0)$;
and (3) there exists a $\mathrm{GHD}_k(s_m+e,g_m)$. Then there exists a $\mathrm{GHD}_k(s+e,v)$, where $s=\sum_{i=1}^m s_i$ and $v=\sum_{i=1}^m g_i$.
\end{corollary}
| 3,917 | 67,737 |
en
|
train
|
0.100.5
|
\subsection{Starter-adder method} \label{SA section}
In practice, to apply the Basic Frame Construction, Theorem~\ref{Frame Construction}, described in Section~\ref{FrameConstr}, we need to first obtain small GHDs with
sub-GHDs to use as ingredients in the construction. One important technique for constructing GHDs of small side length is the {\em starter-adder method}.
See~\cite{blue book} for further background and examples.
Let $G$ be a group, written additively, and consider the set $G\times\{0,1, \ldots,c\}$, which we think of as $c$ copies of $G$ labelled by subscripts. For
elements $g,h\in G$, a {\em pure $(i,i)$-difference} is an element of the form $\pm(g_i-h_i)$ (i.e.\ the subscripts are the same), while a {\em mixed $(i,j)$-difference}
is of the form $g_i-h_j$ with $i < j$. In both cases, subtraction is done in $G$, so that $g_i-h_j = g'_i - h'_j$ if and only if $g-h=g'-h'$ in $G$, for any choice of subscripts.
\pagebreak
\begin{definition} \label{Starter-Adder Definition}
Let $(G,+)$ be a group of order $n+x$, and let $X=(G \times\{0,1\}) \cup (\{\infty\} \times \{1,2, \ldots, n-2x\})$.
A {\em transitive starter-adder} for a $\mathrm{GHD}(n+x,3n)$ is a collection of triples $\mathcal{ST}=\{S_1, S_2, \ldots, S_{n}\}$, called a {\em transitive starter},
and a set $A=\{a_1, a_2, \ldots, a_{n}\}$, called an {\em adder}, of elements of $G$ with the following properties:
\begin{enumerate}
\item $\mathcal{ST}$ is a partition of $X$.
\item For any $i \in \{0,1\}$, each element of $G$ occurs at most once as a pure $(i,i)$ difference within a triple of $\mathcal{ST}$.
Likewise, each element of $G$ occurs at most once as a mixed $(0,1)$ difference within a triple of $\mathcal{ST}$.
If $|G|$ is even, no $g$ with order 2 in $G$ occurs as a pure $(i,i)$ difference for any $i$ in the triples of $\mathcal{ST}$.
\item Each $\infty_i$ occurs in one triple of the form $\{\infty_i, g_0, h_1\}$, where $g,h \in G$.
\item The sets $S_1+a_1, S_2+a_2, \ldots, S_{n}+a_{n}$ form a partition of $X$. Here $S_j+a_j$ is the triple formed by adding $a_j$ to the non-subscript part of each non-infinite element of $S_j$, and $\infty_i +a_j = \infty_i$ for any $i \in \{1,2, \ldots, n-2x\}$, $j \in \{1,2, \ldots, n\}$.
\end{enumerate}
\end{definition}
Note that if $G$ is not abelian, the element $a_j$ would be added on the right in each case. However, in this paper we will always take the group $G$ to be $\mathbb{Z}_{n+x}$.
Transitive starters and adders can be used to construct $\mathrm{GHD}(n+x,3n)$s in the following manner: label the rows and columns of the $(n+x) \times (n+x)$ array by the elements of $G$,
then in row $i$, place the triple $S_j+i$ in the column labelled as $i-a_j$. Thus, the first row of the array contains the blocks of the starter $\mathcal{ST}$, with
block $S_j$ in column $-a_j$; by the definition of a starter, these blocks form a resolution class. The remaining rows consist of translates of the first, with their
positions shifted, so that the first column contains the blocks $S_j+a_j$ for $1 \leq j \leq n$. By the definition of an adder, these blocks are pairwise disjoint and
thus also form a resolution class. The remaining columns are translates of the first. By construction, the two resolutions (into rows and into columns) are orthogonal.
Note also that the infinite points used in a transitive starter-adder construction for a $\mathrm{GHD}(n+x,3n)$ form a pairwise hole of size $n-2x = 3n - 2(n+x)$.
Therefore any $\mathrm{GHD}(n+x,3n)$ obtained from a transitive starter-adder constrution possesses the $*$-property. We thus have the following theorem:
\begin{theorem}
If there exists a transitive starter $\mathcal{ST}$ and a corresponding adder $A$ for a $\mathrm{GHD}(n+x,3n)$ then there exists a $\mathrm{GHD}^*(n+x,3n)$.
\end{theorem}
\begin{example} \label{SAexample8}
For $s=10$, $v=24$, the following is a transitive starter and adder using $(\mathbb{Z}_{10}\times\{0,1\}) \cup \{\infty_0,\infty_1,\infty_2, \infty_3 \}$. (The terms in square brackets give the adder.)
\[
\begin{array}{@{}*{4}l@{}}
0_{1} 6_{1} 7_{1} [2] & 4_{1} 2_{1} 7_{0} [3] & 6_{0} 8_{0} 8_{1} [8] & 0_{0} 1_{0} 4_{0} [7] \\
\infty_0 2_{0} 3_{1} [1] & \infty_1 5_{0} 9_{1} [4] & \infty_2 3_{0} 1_{1} [9] & \infty_3 9_{0} 5_{1} [6]
\end{array}
\]
Using this starter and adder, we obtain the following $\mathrm{GHD}^*(10,24)$.
\renewcommand{1.0}{1.25}
\[
\arraycolsep 3.2pt
\begin{array}{|c|c|c|c|c|c|c|c|c|c|} \hline
& \infty_2 3_0 1_1 & 6_0 8_0 8_1 & 0_0 1_0 4_0 & \infty_3 9_0 5_1 & & \infty_1 5_0 9_1 & 4_1 2_1 7_0 & 0_1 6_1 7_1 & \infty_0 2_0 3_1 \\ \hline
\infty_0 3_0 4_1 & & \infty_2 4_0 2_1 & 7_0 9_0 9_1 & 1_0 2_0 5_0 & \infty_3 0_0 6_1 & & \infty_1 6_0 0_1 & 5_1 3_1 8_0 & 1_1 7_1 8_1 \\ \hline
2_1 8_1 9_1 & \infty_0 4_0 5_1 & & \infty_2 5_0 3_1 & 8_0 0_0 0_1 & 2_0 3_0 6_0 & \infty_3 1_0 7_1 & & \infty_1 7_0 1_1 & 6_1 3_1 9_0 \\ \hline
7_1 4_1 0_0 & 3_1 9_1 0_1 & \infty_0 5_0 6_1 & & \infty_2 6_0 4_1 & 9_0 1_0 1_1 & 3_0 4_0 7_0 & \infty_3 2_0 8_1 & &\infty_1 8_0 2_1 \\ \hline
\infty_1 9_0 3_1 & 8_1 5_1 1_0 & 4_1 0_1 1_1 & \infty_0 6_0 7_1 & & \infty_2 7_0 5_1 & 0_0 2_0 2_1 & 4_0 5_0 8_0 & \infty_3 3_0 9_1 & \\ \hline
& \infty_1 0_0 4_1 & 9_1 6_1 2_0 & 5_1 1_1 2_1 & \infty_0 7_0 8_1 & & \infty_2 8_0 6_1 & 1_0 3_0 3_1 & 5_0 6_0 9_0 & \infty_3 4_0 0_1 \\ \hline
\infty_3 5_0 1_1 & & \infty_1 1_0 5_1 & 0_1 7_1 3_0 & 6_1 2_1 3_1 & \infty_0 8_0 9_1 & & \infty_2 9_0 7_1 & 2_0 4_0 4_1 & 6_0 7_0 0_0 \\ \hline
7_0 8_0 1_0 & \infty_3 6_0 2_1 & & \infty_1 2_1 6_0 & 1_1 8_1 4_0 & 7_1 3_1 4_1 & \infty_0 9_0 0_1 & & \infty_2 0_0 8_1 & 3_0 5_0 5_1 \\ \hline
4_0 6_0 6_1 & 8_0 9_0 2_0 & \infty_3 7_0 3_1 & & \infty_1 3_1 7_0 & 2_1 9_1 5_0 & 8_1 4_1 5_1 & \infty_0 0_0 1_1 & & \infty_2 1_0 9_1 \\ \hline
\infty_2 2_0 0_1 & 5_0 7_0 7_1 & 9_0 0_0 3_0 & \infty_3 8_0 4_1 & & \infty_1 4_1 8_0 & 3_1 0_1 6_0 & 9_1 5_1 6_1 & \infty_0 1_0 2_1 & \\ \hline
\end{array}
\]
\renewcommand{1.0}{1.0}
\end{example}
Another type of starter-adder is an {\em intransitive starter-adder}. This term is defined below.
\begin{definition} \label{IntStarter-Adder Definition}
Let $(G,+)$ be a group of order $n$, and let $X=(G \times\{0,1,2\})$.
An {\em intransitive starter-adder} for a $\mathrm{GHD}(n+x,3n)$ consists of a collection $\mathcal{ST}$ of $n+x$ triples, called an {\em intransitive starter},
and a set $A=\{a_1, a_2, \ldots, a_{n-x}\}$ of elements of $G$, called an {\em adder}, with the following properties:
\begin{enumerate}
\item $\mathcal{ST}$ can be partitioned into three sets $S$, $R$ and $C$ of sizes $n-x$, $x$ and $x$ respectively. We write $S=\{S_1, S_2, \ldots, S_{n-x}\}$,
$R=\{R_1, R_2, \ldots, R_{x}\}$ and $C=\{C_1, C_2, \ldots, C_{x}\}$.
\item $S \cup R$ is a partition of $X$.
\item For each $i \in \{0,1,2\}$, each element of $G$ occurs at most once as a pure $(i,i)$ difference
within a triple of $\mathcal{ST}$. Also for each pair $(i,j) \in \{ (0,1), (0,2), (1,2)\}$, each element of $G$ occurs at most once as a mixed $(i,j)$ difference
within a triple of $\mathcal{ST}$. If $|G|$ is even, no pure $(i,i)$ difference $g$ with order 2 in $G$ occurs within these triples.
\item If $S + A$ is the set of blocks $\{S_1 + a_1, S_2+ a_2, \ldots, S_{n-x} + a_{n-x}\}$ then $(S + A) \cup C$ is a partition of $X$.
As before, $S_j + a_j$ denotes the block obtained by adding $a_j$ to the non-subscript part of all elements of $S_j$.
\end{enumerate}
\end{definition}
To obtain a $\mathrm{GHD}(n+x, 3n)$ from such an intransitive starter-adder, we proceed as follows. Let $\mathcal{G}$ be the required GHD, and let its the top
left $n \times n$ subarray be $\mathcal{H}$. Label the first $n$ rows and columns of $\cal{G}$ by the elements of $G$.
For $i \in G$, we then place the block $S_j + i$ in the $(i, i-a_j)$ cell of $\mathcal{H}$.
In the top row of $\mathcal{G}$ and the final $x$ columns (which we label as $n+1, n+2, \ldots, n+x$), place the blocks $R_1, R_2, \ldots, R_{x}$ from $R$; then
for $i \in G$, and $j=1,2, \ldots, x$, place $R_j + i$ in the $(i, n+j)$ cell of $\mathcal{G}$. Similarly, label the last $x$ rows of $\mathcal{G}$ as $n+1, n+2, \ldots, n+x$.
In the initial column and last $x$ rows of $\mathcal{G}$ we place the blocks $C_1, C_2, \ldots, C_{x}$ from $C$, and for $i \in G$, $j=1,2, \ldots, x$, we
place $C_j + i$ in the $(n+j, i)$ cell of $\mathcal{G}$. The bottom right $x \times x$ subarray of $\mathcal{G}$ is always an empty subarray.
\begin{theorem}
If there exists an intransitive starter $\mathcal{ST}$ and a corresponding adder $A$ for a $\mathrm{GHD}(n+x,3n)$ then there exists a $\mathrm{GHD}(n+x,3n)$ with an empty $x \times x$ sub-array.
\end{theorem}
| 3,846 | 67,737 |
en
|
train
|
0.100.6
|
Another type of starter-adder is an {\em intransitive starter-adder}. This term is defined below.
\begin{definition} \label{IntStarter-Adder Definition}
Let $(G,+)$ be a group of order $n$, and let $X=(G \times\{0,1,2\})$.
An {\em intransitive starter-adder} for a $\mathrm{GHD}(n+x,3n)$ consists of a collection $\mathcal{ST}$ of $n+x$ triples, called an {\em intransitive starter},
and a set $A=\{a_1, a_2, \ldots, a_{n-x}\}$ of elements of $G$, called an {\em adder}, with the following properties:
\begin{enumerate}
\item $\mathcal{ST}$ can be partitioned into three sets $S$, $R$ and $C$ of sizes $n-x$, $x$ and $x$ respectively. We write $S=\{S_1, S_2, \ldots, S_{n-x}\}$,
$R=\{R_1, R_2, \ldots, R_{x}\}$ and $C=\{C_1, C_2, \ldots, C_{x}\}$.
\item $S \cup R$ is a partition of $X$.
\item For each $i \in \{0,1,2\}$, each element of $G$ occurs at most once as a pure $(i,i)$ difference
within a triple of $\mathcal{ST}$. Also for each pair $(i,j) \in \{ (0,1), (0,2), (1,2)\}$, each element of $G$ occurs at most once as a mixed $(i,j)$ difference
within a triple of $\mathcal{ST}$. If $|G|$ is even, no pure $(i,i)$ difference $g$ with order 2 in $G$ occurs within these triples.
\item If $S + A$ is the set of blocks $\{S_1 + a_1, S_2+ a_2, \ldots, S_{n-x} + a_{n-x}\}$ then $(S + A) \cup C$ is a partition of $X$.
As before, $S_j + a_j$ denotes the block obtained by adding $a_j$ to the non-subscript part of all elements of $S_j$.
\end{enumerate}
\end{definition}
To obtain a $\mathrm{GHD}(n+x, 3n)$ from such an intransitive starter-adder, we proceed as follows. Let $\mathcal{G}$ be the required GHD, and let its the top
left $n \times n$ subarray be $\mathcal{H}$. Label the first $n$ rows and columns of $\cal{G}$ by the elements of $G$.
For $i \in G$, we then place the block $S_j + i$ in the $(i, i-a_j)$ cell of $\mathcal{H}$.
In the top row of $\mathcal{G}$ and the final $x$ columns (which we label as $n+1, n+2, \ldots, n+x$), place the blocks $R_1, R_2, \ldots, R_{x}$ from $R$; then
for $i \in G$, and $j=1,2, \ldots, x$, place $R_j + i$ in the $(i, n+j)$ cell of $\mathcal{G}$. Similarly, label the last $x$ rows of $\mathcal{G}$ as $n+1, n+2, \ldots, n+x$.
In the initial column and last $x$ rows of $\mathcal{G}$ we place the blocks $C_1, C_2, \ldots, C_{x}$ from $C$, and for $i \in G$, $j=1,2, \ldots, x$, we
place $C_j + i$ in the $(n+j, i)$ cell of $\mathcal{G}$. The bottom right $x \times x$ subarray of $\mathcal{G}$ is always an empty subarray.
\begin{theorem}
If there exists an intransitive starter $\mathcal{ST}$ and a corresponding adder $A$ for a $\mathrm{GHD}(n+x,3n)$ then there exists a $\mathrm{GHD}(n+x,3n)$ with an empty $x \times x$ sub-array.
\end{theorem}
\begin{example} \label{SAexample7}
For $n=7$, $x=2$, the following is an intransitive starter-adder over $(\mathbb{Z}_{7} \times\{0,1,2\})$ for a $\mathrm{GHD}^*( 9,21)$. (In square brackets
we either give the adder for the corrresponding starter block, or indicate whether it belongs to $R$ or $C$.)
\[
\begin{array}{@{}*{5}l@{}}
2_{2} 3_{2} 5_{2} [0] & 0_{0} 1_{0} 3_{0} [6] & 6_{1} 0_{1} 2_{1} [1] & 6_{0} 3_{1} 4_{2} [2] & 5_0 1_{1} 6_{2} [5] \\
4_0 5_{1} 0_{2} [R] & 5_{0} 4_{1} 0_2 [C] & 2_0 4_1 1_2 [R] & 4_0 2_{1} 1_{2} [C]
\end{array}
\]
Using this starter and adder, we obtain the following $\mathrm{GHD}^*( 9,21)$.
\renewcommand{1.0}{1.25}
\[
\arraycolsep 4.5pt
\begin{array}{|c|c|c|c|c|c|c||c|c|} \hline
2_2 3_2 5_2 & 0_0 1_0 3_0 & 5_0 1_1 6_2 & & & 6_0 3_1 4_2 & 6_1 0_1 2_1 & 4_0 5_1 0_2 & 2_0 4_1 1_2 \\ \hline
0_1 1_1 3_1 & 3_2 4_2 6_2 & 1_0 2_0 4_0 & 6_0 2_1 0_2 & & & 0_0 4_1 5_2 & 5_0 6_1 1_2 & 3_0 5_1 2_2 \\ \hline
1_0 5_1 6_2 & 1_1 2_1 4_1 & 4_2 5_2 0_2 & 2_0 3_0 5_0 & 0_0 3_1 1_2 & & & 6_0 0_1 2_2 & 4_0 6_1 3_2 \\ \hline
& 2_0 6_1 0_2 & 2_1 3_1 5_1 & 5_2 6_2 1_2 & 3_0 4_0 6_0 & 1_0 4_1 2_2 & & 0_0 1_1 3_2 & 5_0 0_1 4_2 \\ \hline
& & 3_0 0_1 1_2 & 3_1 4_1 6_1 & 6_2 0_2 2_2 & 4_0 5_0 0_0 & 2_0 5_1 3_2 & 1_0 2_1 4_2 & 6_0 1_1 5_2 \\ \hline
3_0 6_1 4_2 & & & 4_0 1_1 2_2 & 4_1 5_1 0_1 & 0_2 1_2 3_2 & 5_0 6_0 1_0 & 2_0 3_1 5_2 & 0_0 2_1 6_2 \\ \hline
6_0 0_0 2_0 & 4_0 0_1 5_2 & & & 5_0 2_1 3_2 & 5_1 6_1 1_1 & 1_2 2_2 4_2 & 3_0 4_1 6_2 & 1_0 3_1 0_2 \\ \hline \hline
5_0 4_1 0_2 & 6_0 5_1 1_2 & 0_0 6_1 2_2 & 1_0 0_1 3_2 & 2_0 1_1 4_2 & 3_0 2_1 5_2 & 4_0 3_1 6_2 & & \\ \hline
4_0 2_1 1_2 & 5_0 3_1 2_2 & 6_0 4_1 3_2 & 0_0 5_1 4_2 & 1_0 6_1 5_2 & 2_0 0_1 6_2 & 3_0 1_1 0_2 & & \\ \hline
\end{array}
\]
\renewcommand{1.0}{1.0}
\end{example}
We point out that the underlying block design for this GHD is a $(3,1)$-GDD of type $3^7$ with groups $\{t_0, t_1, t_2\}$ for $t \in \mathbb{Z}_7$. Since it has a pairwise hole
of size $3$, this GHD also has the $*$-property. However, GHDs obtained by the intransitive starter-adder method usually do not possess the $*$-property.
In all examples of $\mathrm{GHD}(n+x,3n)$s obtained by an intransitive starter-adder in this paper, the group $G$ will be taken as $Z_n$. Also,
the underlying block design for any $\mathrm{GHD}(n+x,3n)$ that we construct in this way will have (in addition to the obvious automorphism of order $n$) an automorphism of order $2$.
This automorphism maps the point $t_0$ to $t_1$, $t_1$ to $t_0$ and $t_2$ to $t_2$. It also maps any starter block with adder $a$ to the
starter + adder block with adder $-a$, and each block in $R$ to a block in $C$. For instance, in the previous example, the starter block $\{6_0, 3_1, 4_2\}$
is mapped to the starter + adder block $\{3_0, 6_1, 4_2\} = \{5_0, 1_1, 6_2\} + 5$.
| 2,763 | 67,737 |
en
|
train
|
0.100.7
|
\subsection{Designs with the $*$-property}
The following lemma generalizes constructions of Stinson~\cite{Stinson} and Vanstone~\cite{Vanstone 80} for $\mathrm{GHD}^*$s. In~\cite{Stinson}, the result is given only for block
size $k=2$, while in~\cite{Vanstone 80}, it is done for general block size in the case that $g=1$.
\begin{lemma}
\label{Stinson 1}
Let $g$ be a positive integer, and for each $i \in \{1,2,\ldots,g\}$, let $u_i$ be a non-negative integer. Suppose that there exists a $\mathrm{PBD}(v,K,1)$, $(X, \cB)$, containing $g$ (not
necessarily disjoint) resolution classes, $P_1, P_2, \ldots, P_g$. Moreover, suppose that for every block $B\in \cB$, there exists a $\mathrm{GHD}_k^*(|B|, (k-1)|B|+1+u_B)$,
where $u_B = \sum_{\{i\mid B\in P_i\}} u_i$. Then there exists a $\mathrm{GHD}_k^*(v, (k-1)v+u+1)$, where $u = \sum_{i=1}^g u_i$.
\end{lemma}
\begin{proof}
We construct the resulting $\mathrm{GHD}_k^*$, $\cA$, on point set $(X\times \Z_{k-1})\cup I\cup \{\infty\}$, where $I = \{ \infty_{ij} \mid 1\leq i \leq g, 1\leq j \leq u_i\}$, and index
the rows and columns of $\cA$ by $X$. For each block $B\in \cB$ we define $I_B = \{\infty_{ij}\mid B\in P_i, 1\leq j \leq u_i\}$
and construct a \mbox{$\mathrm{GHD}_k^*(|B|, (k-1)|B|+1+u_B)$,} $\cA_B$, indexed by $B$, with point set $(B\times\Z_{k-1})\cup I_B\cup \{\infty\}$ and pairwise hole $I_B \cup \{\infty\}$.
In this $\mathrm{GHD}$, the block $\{\infty, (x,0), \ldots, (x,{k-2})\}$ should appear in the $(x,x)$-entry of $\cA_B$ for all $x \in B$.
For each cell indexed by $(x,y)\in X^2$, if $x\neq y$ there is a block with $\{x,y\}\in B$ and we place the entry from $\cA_B$ indexed by $(x,y)$ in the cell of $\cA$ indexed by $(x,y)$.
For each $x\in X$, in the diagonal $(x,x)$-entry of $\cA$ we place the block $\{\infty, (x,0), \ldots, (x,{k-2})\}$.
We now show that the resulting $\cA$ is a $\mathrm{GHD}_k^*$, with pairwise hole $I\cup\{\infty\}$.
We first show that no pair appears more than once by considering a pair of points in $(X\times\Z_{k-1})\cup I\cup \{\infty\}$.
If $x,y\in I\cup\{\infty\}$, it is evident that none of the elements of $I\cup \{\infty\}$ appear together in a block of $\cA$ as the elements of this set are always in the
pairwise holes of the $\cA_B$ from which the blocks of $\cA$ are drawn, nor do they appear together in the diagonal elements.
We now consider $(x,a) \in X\times\Z_{k-1}$.
If $y = \infty_{ij}$ then there is a unique block $B$ which contains the point $x$ in $P_i$ and $(x,a)$ and $y$ cannot appear together more than once in $\cA_B$.
If $y=\infty$, it appears with $(x,a)$ only on the diagonal.
Finally, if $(y,b)\in X\times\Z_{k-1}$, there is a unique block $B$ which contains $x$ and $y$ and $(x,a)$ and $(y,b)$ cannot appear together more than once in $\cA_B$.
We now show that the rows of $\cA$ are resolutions of $(X\times\Z_{k-1})\cup I\cup\{\infty\}$. Consider a row indexed by $x\in X$ and an element $\alpha\in (X\times \Z_{k-1})\cup I\cup\{\infty\}$.
If $\alpha = (x,a)$ or $\alpha = \infty$, $\alpha$ appears as a diagonal entry. If $\alpha=(y,b)$, where $y\in X\setminus \{x\}$, find the block $B$ containing $\{x,y\}$. Now, in
the row indexed by $x$ in $\cA_B$ the element $\alpha$ must appear, say it appears in the column indexed by $z$, then the cell indexed by $(x,z)$ of $\cA$ will contain $\alpha$.
If $\alpha = \infty_{ij}\in I$, find the block $B$ of the resolution class $P_i$ which contains $x$. As above, in the row indexed by $x$ in $\cA_B$ the element $\alpha$ must appear, say it
appears in the column indexed by $z$, then the cell indexed by $(x,z)$ of $\cA$ will contain $\alpha$.
A similar argument shows that the columns are also resolutions of $X$.
\end{proof}
Note that the statement of Lemma~\ref{Stinson 1} does not require the $g$ resolution classes to be disjoint, or even distinct. In practice, however, when applying Lemma~\ref{Stinson 1}, we
will use resolvable pairwise balanced designs in the construction and take $P_1,P_2,\ldots,P_g$ to be the distinct classes of the resolution. Thus, for any block $B$, $u_B$ will be $u_i$,
where $P_i$ is the parallel class containing $B$. In particular, we will use PBDs formed from resolvable transversal designs, so we record this case for $k=3$ in the following lemma.
\begin{lemma}
\label{Stinson 1 TD}
Suppose there exists a $\mathrm{RTD}(n,g)$. For $i \in \{1, 2, \ldots, g\}$, let $u_i$ be a non-negative integer such that there exists a $\mathrm{GHD}^*(n,2n+1+u_i)$, and let $u_{g+1}$ be a non-negative integer
such that there exists a $\mathrm{GHD}^*(g,2g+1+u_{g+1})$. Then there exists a $\mathrm{GHD}^*(ng,2ng+u+1)$, where $u=\sum_{i=1}^{g+1}u_i$.
\end{lemma}
\begin{proof}
Beginning with the $\mathrm{RTD}(n,g)$, construct a resolvable $\mathrm{PBD}(ng,\{n,g\},1)$ whose blocks are those of the resolvable transversal design together with a single parallel class whose
blocks are its groups. Note that the resolution of this design consists of $g$ parallel classes, say $P_1, P_2, \ldots, P_g$, consisting of blocks of size $n$, and one parallel
class, $P_{g+1}$, consisting of blocks of size $g$. The result is now a clear application of Lemma~\ref{Stinson 1}.
\end{proof}
| 1,884 | 67,737 |
en
|
train
|
0.100.8
|
\section{GHDs with one or two empty cells in each row and column} \label{TwoEmptySection}
| 25 | 67,737 |
en
|
train
|
0.100.9
|
\subsection{Existence of $\mathrm{GHD}(n+1,3n)$} \label{t=1 section}
In~\cite{WangDu}, Wang and Du asserted the existence of $\mathrm{GHD}(n+1,3n)$ for all $n \geq 7$, with at most five possible exceptions.
However, there are issues with some of the constructions in their paper, in particular the modified starter-adder constructions for GHD$(n+1,3n)$ with $n$ even and $10 \leq n \leq 36$.
These constructions contained at least one starter block such that one infinite point was added to half its translates (i.e $(n+1)/2$ of them) and another infinite point was
added to the other $(n+1)/2$ translates. However this procedure is not possible for $n$ even, since $(n+1)/2$ is then not an integer. In addition, many of their recursive
constructions later rely on existence of some of these faulty designs. We thus prove an existence result for $\mathrm{GHD}(n+1,3n)$ here, namely Theorem~\ref{Existence_t=1}, which has no exceptions for $n \geq 7$. Note that not all of the constructions in~\cite{WangDu} are problematic, and we will quote some results from that paper as part of our proof.
\begin{theorem} \label{Existence_t=1}
Let $n$ be a positive integer. There exists a $\mathrm{GHD}(n+1,3n)$ if and only if $n \geq 6$, except possibly for $n =6$.
\end{theorem}
If $n \leq 5$, there does not exist a $\mathrm{GHD}(n+1,3n)$. In particular, if $n<3$, the obvious necessary conditions are not satisfied. For $n=3$ or $4$, a $\mathrm{GHD}(n+1,3n)$ would be equivalent to
a Kirkman square of order 9 or a doubly resolvable nearly Kirkman triple system of order 12, both of which are known not to exist; see~\cite{ACCLWW,CLLM}. The nonexistence of
a $\mathrm{GHD}(6,15)$ was stated in~\cite{WangDu} as a result of a computer search.
We now turn our attention to existence. The following GHDs are constructed in~\cite{WangDu}.
\begin{lemma}[Wang and Du~\cite{WangDu}] \label{revised WangDu starter adder}
There exists a $\mathrm{GHD}^*(n+1,3n)$ if either (1) $n=8$, or (2) $n$ is odd, and either $7 \leq n \leq 33$ or $n=39$.
\end{lemma}
We find a number of other small GHDs by starter-adder methods.
\begin{lemma} \label{new starter adder t=1}
\begin{enumerate}
\item
There exists a $\mathrm{GHD}^*(n+1,3n)$ for $n \in \{14, 20, 26, 32, 38, 41, 44\}$.
\item
There exists a $\mathrm{GHD}(n+1,3n)$ for $n \in \{10, 12, 16, 18, 22, 28, 30, 36, 46\}$.
\end{enumerate}
\end{lemma}
\begin{proof}
Starters and adders for these GHDs can be found in Appendix~\ref{1EmptyAppendix}.
We use a transitive starter and adder for the values in (1), and an intransitive one for the values in (2).
\end{proof}
\begin{lemma} \label{StarterAdderMod2}
There exists a $\mathrm{GHD}^*(n+1,3n)$ for $n=37$.
\end{lemma}
\begin{proof}
We give a transitive starter and adder, modifying how we develop some of the blocks; this method is also used in~\cite{WangDu}. We develop the subscripts of $\infty_0$ and $\infty_1$ modulo 2.
The points $\infty_2$ and $\infty_3$ are treated similarly: $\{\infty_2,a_i,b_i\}$ gives $\{\infty_3, (a+1)_i, (b+1)_i\}$ in the next row, while $\{\infty_3,c_j,d_j\}$ yields
$\{\infty_2,(c+1)_j,(d+1)_j\}$ in the next row. Note that this ``swapping'' of infinite points in subsequent rows allows us to include a pure difference in blocks containing an infinite point.
That the block of the starter containing $\infty_0$ (resp.\ $\infty_2$) also has points of the form $a_0$, $b_0$ where $a$ and $b$ have different parities and the block
containing $\infty_1$ (resp.\ $\infty_3$) also has points of the form $c_1$, $d_1$, where $c$ and $d$ have different parities, combined with the fact that $n+1$ is even, ensures that no pair
of points is repeated as we develop. The points $\infty_4, \infty_5, \ldots, \infty_{34}$ remain fixed as their blocks are developed.
The starter blocks are given below, with the corresponding adders in square brackets.
\[
\arraycolsep 1.9pt
\begin{array}{@{}*{6}l@{}}
0_0 7_0 13_0 [0] & 10_1 14_1 20_1 [12] & \infty_0 37_0 2_0 [18] & \infty_1 32_1 1_1 [10] & \infty_2 33_0 34_0 [32] & \infty_3 2_1 7_1 [22] \\
\infty_4 25_0 25_1 [6] & \infty_5 8_0 9_1 [14] & \infty_6 16_0 18_1 [26] & \infty_7 26_0 29_1 [4] & \infty_8 18_0 22_1 [17] & \infty_9 21_0 26_1 [28] \\
\infty_{10} 24_0 30_1 [15] & \infty_{11} 3_0 11_1 [23] & \infty_{12} 4_0 13_1 [2] & \infty_{13} 5_0 15_1 [31] & \infty_{14} 10_0 21_1 [27] & \infty_{15} 12_0 24_1 [35] \\
\infty_{16} 28_0 3_1 [34] & \infty_{17} 19_0 34_1 [21] & \infty_{18} 30_0 8_1 [33] & \infty_{19} 27_0 6_1 [19] & \infty_{20} 15_0 33_1 [3] & \infty_{21} 23_0 4_1 [9] \\
\infty_{22} 11_0 31_1 [37] & \infty_{23} 22_0 5_1 [30] & \infty_{24} 35_0 19_1 [8] & \infty_{25} 31_0 16_1 [36] & \infty_{26} 14_0 0_1 [20] & \infty_{27} 1_0 27_1 [11] \\
\infty_{28} 9_0 36_1 [7] & \infty_{29} 6_0 37_1 [13] & \infty_{30} 29_0 23_1 [24] & \infty_{31} 17_0 12_1 [16] & \infty_{32} 32_0 28_1 [29] & \infty_{33} 20_0 17_1 [1] \\
\infty_{34} 36_0 35_1 [5]
\end{array}
\]
\end{proof}
\begin{lemma}\label{StarterAdderMod5}
There exists a $\mathrm{GHD}(n+1,3n)$ for $n\in \{24,34\}$.
\end{lemma}
\begin{proof}
We use a similar modification of the transitive starter-adder method to that in Lemma~\ref{StarterAdderMod2}. In this case, the subscripts
of $\infty_0, \infty_1, \infty_2, \infty_3, \infty_4$ are developed modulo $5$ as we develop the remaining points modulo ${n+1}$.
For $n=24$, the starter and adder are as follows:
\[
\arraycolsep 2.2pt
\begin{array}{@{}*{6}l@{}}
1_0 4_0 10_0 [19] & 0_1 7_1 15_1 [24] & \infty_0 20_0 21_1 [5] & \infty_1 12_0 19_0 [20] & \infty_2 5_1 19_1 [10] & \infty_3 2_1 3_1 [15] \\
\infty_4 13_0 21_0 [0] & \infty_5 7_0 12_1 [8] & \infty_6 6_0 13_1 [6] & \infty_7 23_0 6_1 [7] & \infty_8 2_0 11_1 [17] & \infty_9 24_0 9_1 [2] \\
\infty_{10} 18_0 4_1 [4] & \infty_{11} 14_0 1_1 [21] & \infty_{12} 5_0 18_1 [23] & \infty_{13} 8_0 22_1 [3] & \infty_{14} 9_0 24_1 [22] & \infty_{15} 0_0 17_1 [18] \\
\infty_{16} 15_0 8_1 [1] & \infty_{17} 3_0 23_1 [14] & \infty_{18} 17_0 14_1 [16] & \infty_{19} 22_0 20_1 [12] & \infty_{20} 11_0 10_1 [13] & \infty_{21} 16_0 16_1 [11]
\end{array}
\]
For $n=34$, the starter and adder are as follows:
\[
\arraycolsep 1.9pt
\begin{array}{@{}*{6}l@{}}
0_0 11_0 17_0 [4] & 10_1 21_1 22_1 [24] & \infty_0 6_0 13_1 [0] & \infty_1 20_1 27_1 [10] & \infty_2 15_0 34_0 [15] & \infty_3 18_0 27_0 [30] \\
\infty_4 11_1 14_1 [5] & \infty_5 29_0 29_1 [26] & \infty_6 5_0 6_1 [11] & \infty_7 9_0 12_1 [9] & \infty_8 1_0 5_1 [2] & \infty_9 2_0 7_1 [34] \\
\infty_{10} 20_0 28_1 [8] & \infty_{11} 7_0 16_1 [16] & \infty_{12} 8_0 18_1 [17] & \infty_{13} 4_0 15_1 [29] & \infty_{14} 12_0 24_1 [23] & \infty_{15} 21_0 34_1 [6] \\
\infty_{16} 19_0 33_1 [33] & \infty_{17} 23_0 3_1 [20] & \infty_{18} 22_0 4_1 [25] & \infty_{19} 26_0 9_1 [19] & \infty_{20} 24_0 8_1 [18] & \infty_{21} 16_0 1_1 [3] \\
\infty_{22} 10_0 31_1 [22] & \infty_{23} 13_0 0_1 [27] & \infty_{24} 14_0 2_1 [12] & \infty_{25} 33_0 23_1 [1] & \infty_{26} 28_0 19_1 [31] & \infty_{27} 25_0 17_1 [21] \\
\infty_{28} 32_0 25_1 [32] & \infty_{29} 3_0 32_1 [28] & \infty_{30} 30_0 26_1 [7] & \infty_{31} 31_0 30_1 [13]
\end{array}
\]
\end{proof}
Most of our GHDs are constructed using the Basic Frame Construction with $u=0$ (Corollary~\ref{FrameCorollary}). Note that in
every $\mathrm{GHD}(n+1,3n)$ there is exactly one empty cell in each row and column; thus every $\mathrm{GHD}(n+1,3n)$ contains a $\mathrm{GHD}(1,0)$ as
a subdesign. We therefore do not need to separately verify existence of this subdesign in the construction.
\begin{lemma} \label{uniform t=1}
There is a $\mathrm{GHD}(n+1,3n)$ for $n \in \{35,40,45,49,50,51,54,55\}$.
\end{lemma}
\begin{proof}
For $n=35,40,45,49,54$ and $55$, there exist (by Theorem~\ref{Uniform Frames}) GHFs of types $7^5$, $8^5$, $9^5$ $7^7$, $9^6$ and $11^5$.
For $n=50,51$, there exist (by Lemma~\ref{n=68 lemma}, with $h=m=7$, $x=1$ and either $w_1 = w = 1$ or $w_1 = w = 2$)
GHFs of types $7^6 8^1$ and $7^6 9^1$. Since there exist $\mathrm{GHD}(8,21)$, $\mathrm{GHD}(9,24)$, $\mathrm{GHD}(10,27)$ and $\mathrm{GHD}(12,33)$
by Lemma~\ref{revised WangDu starter adder}, the result follows by Corollary~\ref{FrameCorollary}.
\end{proof}
| 3,992 | 67,737 |
en
|
train
|
0.100.10
|
Most of our GHDs are constructed using the Basic Frame Construction with $u=0$ (Corollary~\ref{FrameCorollary}). Note that in
every $\mathrm{GHD}(n+1,3n)$ there is exactly one empty cell in each row and column; thus every $\mathrm{GHD}(n+1,3n)$ contains a $\mathrm{GHD}(1,0)$ as
a subdesign. We therefore do not need to separately verify existence of this subdesign in the construction.
\begin{lemma} \label{uniform t=1}
There is a $\mathrm{GHD}(n+1,3n)$ for $n \in \{35,40,45,49,50,51,54,55\}$.
\end{lemma}
\begin{proof}
For $n=35,40,45,49,54$ and $55$, there exist (by Theorem~\ref{Uniform Frames}) GHFs of types $7^5$, $8^5$, $9^5$ $7^7$, $9^6$ and $11^5$.
For $n=50,51$, there exist (by Lemma~\ref{n=68 lemma}, with $h=m=7$, $x=1$ and either $w_1 = w = 1$ or $w_1 = w = 2$)
GHFs of types $7^6 8^1$ and $7^6 9^1$. Since there exist $\mathrm{GHD}(8,21)$, $\mathrm{GHD}(9,24)$, $\mathrm{GHD}(10,27)$ and $\mathrm{GHD}(12,33)$
by Lemma~\ref{revised WangDu starter adder}, the result follows by Corollary~\ref{FrameCorollary}.
\end{proof}
\begin{lemma} \label{t=1 recursion}
If $ n \in \{42,43,47,48,52,53\}$ or $n \geq 56$, then there is a $\mathrm{GHD}(n+1,3n)$.
\end{lemma}
\begin{proof}
First, suppose that $n \geq 84$. Here, we can write $n=7m+v$, where $m \geq 11$ is odd,
$v$ is odd and $7 \leq v \leq 20 \leq 2m-2$. By Lemma~\ref{Frame 7^uv}, there is a GHF of type $7^m v^1$.
Since there exists a $\mathrm{GHD}(8,21)$ and a $\mathrm{GHD}(v+1,3v)$ (by Lemma~\ref{revised WangDu starter adder}
or Lemma~\ref{new starter adder t=1}), a $\mathrm{GHD}(n+1,3n)$ exists by Corollary~\ref{FrameCorollary}.
For the remaining values of $n$, we give in Table~\ref{ghd(n+1,3n) Frame Table} a frame of appropriate type $h^mv^1$,
where $n= hm+v$; these frames all exist by Lemma~\ref{Frame 7^uv}. Together with the existence of a $\mathrm{GHD}(h+1,3h)$ and
a $\mathrm{GHD}(v+1,3v)$ (by Lemma~\ref{revised WangDu starter adder} or Lemma~\ref{new starter adder t=1}),
these frames give the required GHD$(n+1,3n)$ by Corollary~\ref{FrameCorollary}.
\begin{table}[htbp]
\caption{Frames used for $\mathrm{GHD}(n+1,3n)$ with $n \in \{42,43,47,48,52,53\}$ and $56 \leq n \leq 83$.}
\centering
\begin{tabular}{ccccccccccc} \hline
$n=hm+v$ & $h$ & $m$ & $v$ & \hspace{0.5cm} & $n=hm+v$ & $h$ & $m$ & $v$ \\ \hline
42--43 & 7 & 5 & 7--8 & & 47--48 & 8 & 5 & 7--8 \\
52--53 & 9 & 5 & 7--8 & & 56--61 & 7 & 7 & 7--12 \\
62 & 11 & 5 & 7 & & 63--69 & 7 & 8 & 7--13 \\
70--79 & 7 & 9 & 7--16 & & 80--83 & 8 & 9 & 8--11 \\ \hline
\end{tabular}
\label{ghd(n+1,3n) Frame Table}
\end{table}
\end{proof}
Taken together, Lemmas~\ref{revised WangDu starter adder}--\ref{StarterAdderMod5} and \ref{uniform t=1}--\ref{t=1 recursion} prove Theorem~\ref{Existence_t=1}.
| 1,293 | 67,737 |
en
|
train
|
0.100.11
|
\subsection{Existence of $\mathrm{GHD}(n+2,3n)$} \label{t=2 section}
In this section, we consider the existence of $\mathrm{GHD}(n+2,3n)$, which have two empty cells in each row and column, and prove the following theorem.
\begin{theorem} \label{Existence_t=2}
Let $n$ be a positive integer. Then there exists a $\mathrm{GHD}(n+2,3n)$ if and only if $n \geq 6$.
\end{theorem}
Note that if $1 \leq n \leq 4$, the necessary conditions for the existence of a $\mathrm{GHD}(n+2,3n)$ are not satisfied. Moreover, for $n=5$, there is
no $\mathrm{GHD}(7,15)$~\cite{Mathon Vanstone}. Thus it suffices to consider the case that $n \geq 6$.
For relatively small values of $n$, we construct $\mathrm{GHD}(n+2,3n)$ mainly by starter-adder methods. These will then be used in the Basic Frame Construction
with $u=0$ (Corollary~\ref{FrameCorollary}) to give the remaining GHDs.
\begin{lemma} \label{Small Cases}
\rule{0ex}{0ex}
For all $n \in \{6, \ldots, 29\} \cup \{31, \ldots, 34\} \cup \{39,44\}$, a $\mathrm{GHD}^*(n+2,3n)$ exists.
Moreover, if $n$ is even, then there exists such a design containing a $\mathrm{GHD}(2,0)$ as a subdesign.
\end{lemma}
\begin{proof}
As mentioned in Section~\ref{DefnSection}, there exists a $\mathrm{GHD}(8,18)$~\cite{Finland}. As such a design is equivalent to a $\mathrm{DRNKTS}(18)$, it has the $*$-property. Note that
the $\mathrm{GHD}^*(8,18)$ exhibited in Example~\ref{FinlandExample} has a $2\times2$ empty subsquare, i.e\ a sub-$\mathrm{GHD}(2,0)$. For the remaining values of $n$, a transitive starter and adder for
a $\mathrm{GHD}^*(n+2,3n)$ can be found in Appendix~\ref{starters and adders}. Note that the group used is $\mathbb{Z}_{n+2}$. For $n$ even, the adder does not contain 0 or $(n+2)/2$,
thus ensuring that the $(0,0)$-cell and $(0,(n+2)/2)$-cell are empty, and as we develop, the $((n+2)/2,0)$-cell and $((n+2)/2,(n+2)/2)$-cell will also be empty, yielding a sub-$\mathrm{GHD}(2,0)$.
\end{proof}
We remark that a $\mathrm{GHD}^*(10,24)$ with a sub-$\mathrm{GHD}(2,0)$ can also be obtained from~\cite[Table 8]{DuAbelWang}.
\begin{lemma} \label{Odd2hole}
\rule{0ex}{0ex}
For $n \in \{7,9,11\}$, there exists a $\mathrm{GHD}(n+2,3n)$ containing a $\mathrm{GHD}(2,0)$ as a subdesign. Further, when $n=7$, this GHD has the $*$-property.
\end{lemma}
\begin{proof}
For these values of $n$, an intransitive starter and adder for a $\mathrm{GHD}^*(n+2,3n)$ can be found in Appendix~\ref{starters and adders}. When $n=7$,
this GHD is also given in Example~\ref{SAexample7}; there we indicated that this one has the $*$-property.
\end{proof}
Our main recursive construction for $n \geq 84$ uses frames of type $7^mv^1$ from Lemma~\ref{Frame 7^uv}. For smaller $n$, we also use frames of types $h^mv^1$ with $h \in \{6,8,9,11\}$,
which also come from Lemma~\ref{Frame 7^uv}. Note that in light of Lemma~\ref{Small Cases}, in order to prove Theorem~\ref{Existence_t=2}, we need to obtain a $\mathrm{GHD}(n+2,3n)$ for
each $n \in \{30, 35, 36, 37, 38, 40, 41, 42, 43\}$ and for all $n \geq 45$.
\begin{lemma} \label{GHDs from uniform frames}
There exists a $\mathrm{GHD}(n+2,3n)$ for $n \in \{30,$ $35,$ $36,$ $37,$ $38,$ $40,$ $41,$ $46,$ $49,$ $50,$ $51,$ $54,$ $55\}$.
\end{lemma}
\begin{proof}
We apply Corollary~\ref{FrameCorollary} to construct the required designs. For $n=30,$ $35$, $36$, $40$, $49$, $54$ and $55$, we use GHFs of types
$6^5$, $7^5$, $6^6$, $8^5$, $7^7$, $9^6$ and $11^5$ respectively. These frames all exist by Theorem~\ref{Uniform Frames}. For $n=37$, $38$,
$41$ and $46$, we use GHFs of types $6^5 7^1$, $6^5 8^1$, $7^5 6^1$ and $8^5 6^1$ respectively (these all exist by Lemma~\ref{Frame 7^uv}). For
$n=50$ and $51$, we use GHFs of types $7^6 8^1$ and $7^6 9^1$ respectively (these exist by Lemma~\ref{n=68 lemma} with $h=m=7$, $x=2$ and
$w_1 = w = 1$ or $2$). Since there exist $\mathrm{GHD}(8,18)$, $\mathrm{GHD}(9,21)$, $\mathrm{GHD}(10,24)$, $\mathrm{GHD}(11,27)$ and $\mathrm{GHD}(13,33)$ each containing
a sub-$\mathrm{GHD}(2,0)$ (by Lemmas~\ref{Small Cases} and \ref{Odd2hole}) the result follows.
\end{proof}
\begin{lemma} \label{Intermediate range}
There exists a $\mathrm{GHD}(n+2,3n)$ if $n \in \{42, 43, 47, 48, 52, 53\}$ or $n \geq 56$.
\end{lemma}
\begin{proof}
We write $n$ in one of the forms $7m+v$, $8m+v$, $9m+v$ or $11m+v$ (where (1) $m \geq 5$ and either $m$ is odd or $m=8$, (2) $7 \leq v \leq$ min$(2(m-1), 20$))
in the same manner as we did in Lemma~\ref{t=1 recursion}. We then construct a frame of type $7^mv^1$, $8^mv^1$, $9^m v^1$ or $11^mv^1$ from
Lemma~\ref{Frame 7^uv} together with a $\mathrm{GHD}(9,21)$, $\mathrm{GHD}(10,24)$, $\mathrm{GHD}(11,27)$ or $\mathrm{GHD}(13,33)$ (each containing
a $\mathrm{GHD}(2,0)$ as a subdesign) and a $\mathrm{GHD}(v+2,3v)$ (all of these exist by Lemma~\ref{Small Cases} or Lemma~\ref{Odd2hole}).
Applying Corollary~\ref{FrameCorollary}, using these frames and GHDs then produces the required $\mathrm{GHD}(n+2,3n)$.
\end{proof}
Lemmas~\ref{Small Cases}, \ref{Odd2hole}, \ref{GHDs from uniform frames} and \ref{Intermediate range} together
prove Theorem~\ref{Existence_t=2}.
| 2,142 | 67,737 |
en
|
train
|
0.100.12
|
\section{GHDs across the spectrum} \label{Spectrum}
In a non-trivial $\mathrm{GHD}(s,v)$ (i.e.\ where $v\neq 0)$, we require that $2s+1 \leq v \leq 3s$. A $\mathrm{GHD}(s,3s)$ has no empty cells, while a $\mathrm{GHD}(s,2s+1)$ has $(s-1)/3$ empty cells in each row and column.
Noting that $\lim_{s\rightarrow\infty} \frac{s-1}{3s} = \frac{1}{3}$, we see that the proportion of cells in a given row or column which are empty falls in the interval $[0,1/3)$. In this section,
we prove that for any $\pi \in [0,5/18]$, there is a GHD whose proportion of empty cells in a row or column is arbitrarily close to $\pi$.
Our main tool in this section is Lemma~\ref{Stinson 1} and its variant, Lemma~\ref{Stinson 1 TD}. As an ingredient for this construction, we require GHDs which have the $*$-property.
We note that GHDs constructed by the Basic Frame Construction do not always have the $*$-property, even if the input designs do.\footnote{
GHDs constructed by the Basic Frame Construction do have the $*$-property if (1) the frame used is a $\mathrm{GHF}_k$ of type $(s_1,g_1), \ldots, (s_n, g_n)$ with $g_i = (k-1)s_i$ for $i=1, \ldots, n$ and (2) for $i=1, \ldots n$, the input designs are $\mathrm{GHD}(s_i, g_i + t)$s with a pairwise hole of size $t$ for some $t$. However, condition (1) is not satisfied by the frames in this paper.}
Thus, in general, we cannot use the results of Section~\ref{TwoEmptySection} for this purpose. However, as previously noted, those GHDs constructed by transitive starter-adder methods do have the $*$-property.
\begin{lemma} \label{1-empty-cell}
Let $m \geq 6$. There exists a $\mathrm{GHD}^*(2^m,3\cdot 2^m-3)$.
\end{lemma}
\begin{proof}
Since $2^{m-3}$ is a prime power and $m \geq 6$, there is an $\mathrm{RTD}(8,2^{m-3})$, with disjoint parallel classes $P_1, P_2, \ldots, P_{2^{m-3}}$. Form a $\mathrm{PBD}(2^m,\{2^{m-3},8\},1)$ by adding a single
parallel class $P_{2^{m-3}+1}$ consisting of the groups of the RTD. For the parallel class $P_{2^{m-3}+1}$, set $u_{2^{m-3}+1} =2^{m-3}-1$. For parallel classes $P_i$ with $1 \leq i \leq 2^{m-3}-1$,
let $u_i=7$, and for parallel class $P_{2^{m-3}}$, let $u_{2^{m-3}}=4$. Since there exist a $\mathrm{GHD}^*(2^{m-3},3\cdot 2^{m-3})$ and a $\mathrm{GHD}^*(8,24)$ (both by Theorem~\ref{GHD*}(ii)), as
well as a $\mathrm{GHD}^*(8,21)$ (by Lemma~\ref{revised WangDu starter adder}), and
\[
2 \cdot 2^m+1 + \left((2^{m-3}-1) + (2^{m-3}-1)(7)+4\right) = 3 \cdot 2^m-3
\]
it follows by Lemma~\ref{Stinson 1 TD} that there exists a $\mathrm{GHD}^*(2^m,3 \cdot 2^m-3)$.
\end{proof}
\begin{lemma} \label{2-empty-cell}
Let $m \geq 6$. There exists a $\mathrm{GHD}^*(2^m,3\cdot 2^m-6)$.
\end{lemma}
\begin{proof}
The proof is similar to that of Lemma~\ref{1-empty-cell}, except that we take $u_{2^{m-3}}=1$ rather than 4, which requires a $\mathrm{GHD}^*(8,18)$ (given in Example~\ref{FinlandExample}) rather than a $\mathrm{GHD}^*(8,21)$.
\end{proof}
With these ingredients in hand, we now construct GHDs with side length a power of 2.
\begin{lemma} \label{Power-2-even}
Let $m \geq 7$ be odd, and let $A=\frac{5}{36}\cdot 2^{2m}+\frac{5}{18}\cdot 2^m-\frac{19}{9}$. For all $0 \leq \alpha \leq A$,
there exists a $\mathrm{GHD}^*(2^{2m},3 \cdot 2^{2m}-6\alpha)$.
\end{lemma}
\begin{proof}
Since $2^m$ is a prime power, there is a $\mathrm{PBD}(2^{2m},\{2^m\},1)$ (an affine plane of order $2^m$); note that the number of parallel classes is $2^m+1$. Let $x$ and $y$ be integers with $0 \leq x, y \leq 2^m+1$ and $x+y \leq 2^{m}+1$. In Lemma~\ref{Stinson 1}, for $x$ parallel classes take $u_i=1$, for $y$ parallel classes take $u_i=2^m-1$, and for the remaining $2^m+1-x-y$ parallel classes take $u_i=2^m-7$. Note that there exist a $\mathrm{GHD}^*(2^m, 2 \cdot 2^m + 2)$ (by Theorem~\ref{GHD*}(i)), a $\mathrm{GHD}^*(2^m, 3 \cdot 2^m)$ (by Theorem~\ref{GHD*}(ii)) and a $\mathrm{GHD}^*(2^m, 3 \cdot 2^m-6)$ (by Lemma~\ref{2-empty-cell}). Thus, by Lemma~\ref{Stinson 1}, there is a $\mathrm{GHD}^*(2^{2m}, 2 \cdot 2^{2m}+1+x+y(2^m-1)+(2^m+1-x-y)(2^m-7))$. Note that the number of points is
\[
2 \cdot 2^{2m}+1+x+y(2^m-1)+(2^m+1-x-y)(2^m-7) = 3 \cdot 2^{2m} -6(1+2^{m})-x(2^m-8)+6y.
\]
Let $f(m,x,y)$ denote this number of points. For a fixed $x$, varying $y$ between 0 and $2^m+1-x$ gives all values of the number of points congruent to $0\pmod{6}$ between $f(m,x,0)$ and $f(m,x,2^m+1-x)$. Noting that for fixed $m$, $f(m,x,y)$ is linear in $x$ and $y$, and solving $f(m,x,0)=f(m,x+1,2^m+1-(x+1))$ for $x$, we obtain the solution $x_0=\frac{5}{6}2^m+\frac{4}{3}$ (which is an integer since $m$ is odd). For $x \leq x_0$, we have that $f(m,x,0) \leq f(m,x+1,2^m+1-(x+1))$, which means that we cover all possible values for the number of points congruent to $0\pmod{6}$ from $f(m,x_0+1,0)$ to $3 \cdot 2^{2m}$. Moreover,
\begin{eqnarray*}
f(m,x_0+1,0) &=& 3\cdot 2^{2m}-6(1+2^m)-\left(\frac{5}{6} \cdot 2^m+\frac{7}{3}\right)(2^m-8) \\
&=& 3 \cdot 2^{2m} - \left(6 + 6 \cdot 2^m + \frac{5}{6} \cdot 2^{2m}-\frac{20}{3} \cdot 2^m +\frac{7}{3} \cdot 2^m - \frac{56}{3}\right) \\
&=& 3 \cdot 2^{2m} - \left( \frac{5}{6} \cdot 2^{2m} + \frac{5}{3} \cdot 2^{m} - \frac{38}{3}\right) \\
&=& 3 \cdot 2^{2m} - 6 \left( \frac{5}{36} \cdot 2^{2m} + \frac{5}{18} \cdot 2^m - \frac{19}{9} \right) \\
&=& 3 \cdot 2^{2m} - 6A,
\end{eqnarray*}
and so the result is verified.
\end{proof}
\begin{lemma} \label{Power-2-odd}
Let $m \geq 7$ be odd, and let $A'=\frac{5}{36} \cdot 2^{2m} + \frac{1}{9} \cdot 2^m - \frac{23}{18}$. For all $1 \leq \alpha \leq A'$, there exists a $\mathrm{GHD}(2^{2m},3\cdot 2^{2m}-6\alpha+3)$.
\end{lemma}
\begin{proof}
The proof is similar to that of Lemma~\ref{Power-2-even}, except that on one parallel class we take $u_i=2^m-4$ instead of $u_i=2^m-7$. This requires a $\mathrm{GHD}^*(2^m,3 \cdot 2^m-3)$, which exists by Lemma~\ref{1-empty-cell}.
\end{proof}
In Lemma~\ref{Power-2-even}, the number of empty cells in each row of the $\mathrm{GHD}^*(2^{2m},3 \cdot 2^{2m}-6\alpha)$ is $2\alpha$, while in Lemma~\ref{Power-2-odd}, the number of
empty cells in each row of the \mbox{$\mathrm{GHD}^*(2^{2m},3 \cdot 2^{2m}-6\alpha+3)$} is $2\alpha-1$. Note that in Lemmas~\ref{Power-2-even} and~\ref{Power-2-odd}, for $\alpha=A$ and $\alpha=A'$,
respectively, the number of points is less than $3 \cdot 2^{2m} - \frac{5}{6} \cdot 2^{2m}$, so that we have GHDs of side length $2^{2m}$ for any number of empty cells per row
between 0 and $\frac{5}{18} \cdot 2^{2m}$, giving proportions of empty cells per row or column between 0 and $\frac{5}{18}$.
Approximating any real number $\pi \in [0,5/18]$ by a dyadic rational, this means that we can now construct a $\mathrm{GHD}$ such that the proportion of empty cells in a row is arbitrarily close to $\pi$.
\begin{theorem} \label{proportion}
Let $\pi \in [0,5/18]$. For any $\varepsilon>0$, there exists an odd integer $m$ and an integer $v$ for which there exists a $\mathrm{GHD}^*(2^{2m},v)$, $\mathcal{D}$, such that the proportion $\pi_0$
of empty cells in each row or column of $\mathcal{D}$ satisfies $|\pi-\pi_0|<\epsilon$.
\end{theorem}
\begin{proof}
Given $\pi$ and $\varepsilon$, there exists $m_0$ such that for all $m_1>m_0$, $|\pi-\lfloor 2^{m_1} \pi \rfloor / 2^{m_1}|<\varepsilon$.
Let $m$ be an odd integer with $2m>m_0$, and let $\pi_0=\lfloor 2^{2m} \pi \rfloor / 2^{2m}$. Note that if $\pi=0$, then $\pi_0=0$. Otherwise, since $\pi-\frac{1}{2^{2m}} < \pi_0 \leq \pi$, we
may also choose $m$ sufficiently large to ensure that $\pi_0 \in [0,\frac{5}{18}]$. Thus Lemmas~\ref{Power-2-odd} and~\ref{Power-2-even} enable us to construct a $\mathrm{GHD}^*$ with side
length $2^{2m}$ whose proportion of empty cells in a row is $\pi_0$.
\end{proof}
Theorem~\ref{proportion} shows that we can find a $\mathrm{GHD}$ with an arbitrary proportion of empty cells across five-sixths of the interval $[0,1/3)$ of possible proportions. This improves on previous
work which has shown existence only very close to the ends of the spectrum. The impact of this result is discussed further in Section~\ref{concl}.
We remark also that GHDs exist throughout the spectrum with more general side lengths than powers of~$2$.
First, the methods of Lemmas~\ref{1-empty-cell}--\ref{Power-2-odd} work for primes other than~$2$, provided we can find appropriate ingredient $\mathrm{GHD}$s. Also, by a straightforward
generalization of the Moore--MacNeish product construction for MOLS~\cite{MacNeish22, Moore}, the existence of a $\mathrm{GHD}(s,v)$ and $3~\mathrm{MOLS}(n)$ implies that there exists
a $\mathrm{GHD}(ns,nv)$. See, for instance, Theorem 2.6 in \cite{YanYin} for a similar result. By applying this construction to the results of Lemmas~\ref{Power-2-even} and~\ref{Power-2-odd}, we
can find GHDs with an arbitrary proportion of empty cells (for proportions in $[0,5/18]$) for side lengths of the form $2^{2m}n$.
| 3,569 | 67,737 |
en
|
train
|
0.100.13
|
\section{Conclusion} \label{concl}
The concept of generalized Howell designs brings together various classes of designs, from doubly resolvable BIBDs on one side of the spectrum to MOLS and SOMAs on the other. In this paper,
we have defined generalized Howell designs in a way that encompasses several previously studied generalizations of Howell designs, and have attempted to unify disparate terminology for techniques
used on both ends of the spectrum.
In Section~\ref{ConstrSection}, we described several construction techniques for $\mathrm{GHD}$s, several of which generalize known constructions for Howell designs, doubly resolvable designs and MOLS.
These construction techniques were used in Section~\ref{TwoEmptySection} to settle existence of $\mathrm{GHD}(s,v)$ in the case that the number of empty cells in each row and
column is one or two, with one possible exception (a $\mathrm{GHD}(7,18)$) (Theorems~\ref{Existence_t=1} and \ref{Existence_t=2}).
The existence of $\mathrm{GHD}(s,v)$ with $e$ empty cells, where $e \in \{3, 4, \ldots, (s-3)/3 \} \setminus \{(s-6)/3\}$
remains open in general (although in \cite{DuAbelWang}, the case $e=(s-4)/3$ was solved for $e$ even with 15 possible exceptions).
$e=0$ (a $\mathrm{GHD}(6,18)$) $e=1$ (a $\mathrm{GHD}(9,24)$) and $e=2$ (a $\mathrm{GHD}(12,30)$) are now known to exist. (see Theorems~\ref{618}, \ref{Existence_t=1} and \ref{Existence_t=2}).)
A simpler interim result would be to show existence for an interval of $e$-values
of a given fixed length. We conjecture that there exists a $\mathrm{GHD}(s,v)$ whenever the obvious necessary conditions are satisfied, with at most a small number of exceptions for each $e$.
The main result of Section~\ref{Spectrum} is that for any $\pi \in [0,5/18]$, there exists a $\mathrm{GHD}$ whose proportion of cells in a given row or column which are empty is arbitrarily close
to $\pi$. This is a powerful result. While it does not close the existence spectrum, it does provide strong evidence that this should be possible. Previous work has focused on the two
ends of the spectrum: Kirkman squares and DRNKTSs at one end, and MOLS, SOMAs, and GHDs with one empty cell per row/column at the other; Theorem~\ref{proportion} shows existence of
GHDs across five-sixths of the spectrum.
The techniques of Section~\ref{Spectrum} can be used to give some examples of $\mathrm{GHD}$s with proportion greater than $5/18$, but necessarily bounded away from $1/3$. It remains a challenging
open problem to show that there exist $\mathrm{GHD}$s whose proportion of empty cells per row or column can be arbitrarily close to any element of $[0,1/3)$.
\section{Acknowledgments}
The authors would like to thank Esther Lamken for a number of useful comments
and in particular for suggesting the intransitive starter-adder method,
which was used for several of the smaller GHDs in this paper.
\begin{appendices}
| 862 | 67,737 |
en
|
train
|
0.100.14
|
\section{Starters and adders for small \mbox{\boldmath $\mathrm{GHD}(n+1,3n)$}}
\label{1EmptyAppendix}
First we give those obtained by transitive starters and adders:
\begin{example} For $n=14$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 3_0 5_0 [10] & 1_1 3_1 7_1 [ 5] & \infty_0 4_0 4_1 [12] & \infty_1 8_0 9_1 [ 1] & \infty_2 9_0 11_1 [ 2] & \infty_3 11_0 14_1 [ 8] \\
\infty_4 13_0 2_1 [ 9] & \infty_5 10_0 0_1 [ 4] & \infty_6 14_0 5_1 [13] & \infty_7 6_0 13_1 [11] & \infty_8 1_0 10_1 [ 7] & \infty_9 2_0 12_1 [ 3] \\
\infty_{10} 12_0 8_1 [ 6] & \infty_{11} 7_0 6_1 [14]
\end{array}
\]
\end{example}
\begin{example} For $n=20$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 1_0 3_0 [14] & 2_1 6_1 8_1 [ 7] & \infty_0 8_0 9_1 [ 2] & \infty_1 9_0 11_1 [12] & \infty_2 7_0 10_1 [11] & \infty_3 11_0 15_1 [ 5] \\
\infty_4 13_0 18_1 [17] & \infty_5 18_0 3_1 [ 4] & \infty_6 10_0 17_1 [10] & \infty_7 14_0 1_1 [18] & \infty_8 12_0 0_1 [16] & \infty_9 15_0 4_1 [ 8] \\
\infty_{10} 5_0 16_1 [ 1] & \infty_{11} 16_0 7_1 [ 3] & \infty_{12} 20_0 12_1 [13] & \infty_{13} 4_0 19_1 [20] & \infty_{14} 19_0 14_1 [15] & \infty_{15} 17_0 13_1 [ 9] \\
\infty_{16} 2_0 20_1 [ 6] & \infty_{17} 6_0 5_1 [19]
\end{array}
\]
\end{example}
\begin{example} For $n=26$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 6_0 10_0 [18] & 3_1 11_1 16_1 [ 9] & \infty_0 8_0 8_1 [19] & \infty_1 14_0 15_1 [17] & \infty_2 22_0 24_1 [25] & \infty_3 11_0 14_1 [ 2] \\
\infty_4 13_0 17_1 [16] & \infty_5 17_0 22_1 [24] & \infty_6 7_0 13_1 [15] & \infty_7 2_0 9_1 [ 4] & \infty_8 24_0 5_1 [12] & \infty_9 1_0 10_1 [11] \\
\infty_{10} 15_0 25_1 [20] & \infty_{11} 12_0 23_1 [ 7] & \infty_{12} 19_0 4_1 [ 6] & \infty_{13} 16_0 2_1 [21] & \infty_{14} 20_0 7_1 [ 1] & \infty_{15} 18_0 6_1 [ 5] \\
\infty_{16} 23_0 12_1 [ 3] & \infty_{17} 3_0 20_1 [14] & \infty_{18} 9_0 1_1 [23] & \infty_{19} 25_0 18_1 [13] & \infty_{20} 5_0 26_1 [10] & \infty_{21} 26_0 21_1 [ 8] \\
\infty_{22} 4_0 0_1 [26] & \infty_{23} 21_0 19_1 [22]
\end{array}
\]
\end{example}
\begin{example} For $n=32$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 4_0 7_0 [22] & 5_1 6_1 11_1 [11] & \infty_0 25_0 25_1 [ 8] & \infty_1 8_0 9_1 [17] & \infty_2 16_0 18_1 [25] & \infty_3 11_0 14_1 [ 1] \\
\infty_4 13_0 17_1 [14] & \infty_5 17_0 22_1 [29] & \infty_6 6_0 12_1 [15] & \infty_7 1_0 8_1 [ 3] & \infty_8 2_0 10_1 [32] & \infty_9 10_0 19_1 [13] \\
\infty_{10} 3_0 13_1 [28] & \infty_{11} 18_0 29_1 [18] & \infty_{12} 19_0 31_1 [21] & \infty_{13} 21_0 1_1 [23] & \infty_{14} 22_0 4_1 [26] & \infty_{15} 20_0 3_1 [ 4] \\
\infty_{16} 23_0 7_1 [16] & \infty_{17} 12_0 30_1 [31] & \infty_{18} 14_0 0_1 [ 5] & \infty_{19} 15_0 2_1 [ 2] & \infty_{20} 5_0 26_1 [27] & \infty_{21} 27_0 16_1 [20] \\
\infty_{22} 9_0 32_1 [ 7] & \infty_{23} 24_0 15_1 [ 6] & \infty_{24} 29_0 21_1 [24] & \infty_{25} 30_0 23_1 [12] & \infty_{26} 26_0 20_1 [ 9] & \infty_{27} 32_0 27_1 [19] \\
\infty_{28} 28_0 24_1 [10] & \infty_{29} 31_0 28_1 [30]
\end{array}
\]
\end{example}
\begin{example} For $n=38$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 6_0 7_0 [26] & 8_1 14_1 24_1 [13] & \infty_0 20_0 20_1 [20] & \infty_1 24_0 25_1 [ 1] & \infty_2 17_0 19_1 [ 4] & \infty_3 26_0 29_1 [ 2] \\
\infty_4 19_0 23_1 [23] & \infty_5 23_0 28_1 [30] & \infty_6 25_0 31_1 [19] & \infty_7 2_0 9_1 [29] & \infty_8 3_0 12_1 [27] & \infty_9 5_0 15_1 [ 7] \\
\infty_{10} 10_0 21_1 [31] & \infty_{11} 18_0 30_1 [25] & \infty_{12} 29_0 3_1 [ 9] & \infty_{13} 35_0 10_1 [24] & \infty_{14} 28_0 4_1 [28] & \infty_{15} 30_0 7_1 [17] \\
\infty_{16} 33_0 11_1 [22] & \infty_{17} 14_0 32_1 [21] & \infty_{18} 37_0 17_1 [12] & \infty_{19} 32_0 13_1 [36] & \infty_{20} 34_0 16_1 [32] & \infty_{21} 12_0 34_1 [33] \\
\infty_{22} 22_0 6_1 [14] & \infty_{23} 16_0 1_1 [34] & \infty_{24} 11_0 36_1 [11] & \infty_{25} 13_0 0_1 [ 5] & \infty_{26} 38_0 26_1 [38] & \infty_{27} 9_0 37_1 [ 6] \\
\infty_{28} 15_0 5_1 [37] & \infty_{29} 8_0 38_1 [16] & \infty_{30} 1_0 33_1 [ 8] & \infty_{31} 27_0 22_1 [35] & \infty_{32} 31_0 27_1 [ 3] & \infty_{33} 21_0 18_1 [18] \\
\infty_{34} 4_0 2_1 [15] & \infty_{35} 36_0 35_1 [10]
\end{array}
\]
\end{example}
\begin{example} For $n=41$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 24_0 32_0 [28] & 10_1 11_1 13_1 [14] & \infty_0 26_0 26_1 [ 8] & \infty_1 8_0 9_1 [ 1] & \infty_2 16_0 18_1 [ 5] & \infty_3 29_0 32_1 [10] \\
\infty_4 18_0 22_1 [15] & \infty_5 22_0 27_1 [36] & \infty_6 17_0 23_1 [33] & \infty_7 35_0 0_1 [11] & \infty_8 4_0 12_1 [31] & \infty_9 5_0 14_1 [19] \\
\infty_{10} 6_0 16_1 [32] & \infty_{11} 9_0 20_1 [39] & \infty_{12} 3_0 15_1 [17] & \infty_{13} 11_0 24_1 [20] & \infty_{14} 33_0 5_1 [35] & \infty_{15} 2_0 17_1 [41] \\
\infty_{16} 21_0 37_1 [34] & \infty_{17} 31_0 6_1 [40] & \infty_{18} 15_0 33_1 [29] & \infty_{19} 30_0 7_1 [24] & \infty_{20} 25_0 3_1 [16] & \infty_{21} 19_0 40_1 [30] \\
\infty_{22} 14_0 36_1 [13] & \infty_{23} 23_0 4_1 [22] & \infty_{24} 20_0 2_1 [37] & \infty_{25} 36_0 19_1 [38] & \infty_{26} 1_0 29_1 [18] & \infty_{27} 10_0 39_1 [12] \\
\infty_{28} 12_0 1_1 [ 2] & \infty_{29} 38_0 28_1 [27] & \infty_{30} 40_0 31_1 [ 7] & \infty_{31} 7_0 41_1 [23] & \infty_{32} 41_0 34_1 [26] & \infty_{33} 27_0 21_1 [ 9] \\
\infty_{34} 13_0 8_1 [ 4] & \infty_{35} 34_0 30_1 [ 6] & \infty_{36} 28_0 25_1 [25] & \infty_{37} 37_0 35_1 [ 0] & \infty_{38} 39_0 38_1 [ 3]
\end{array}
\]
\end{example}
| 3,871 | 67,737 |
en
|
train
|
0.100.15
|
\end{example}
\begin{example} For $n=41$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 24_0 32_0 [28] & 10_1 11_1 13_1 [14] & \infty_0 26_0 26_1 [ 8] & \infty_1 8_0 9_1 [ 1] & \infty_2 16_0 18_1 [ 5] & \infty_3 29_0 32_1 [10] \\
\infty_4 18_0 22_1 [15] & \infty_5 22_0 27_1 [36] & \infty_6 17_0 23_1 [33] & \infty_7 35_0 0_1 [11] & \infty_8 4_0 12_1 [31] & \infty_9 5_0 14_1 [19] \\
\infty_{10} 6_0 16_1 [32] & \infty_{11} 9_0 20_1 [39] & \infty_{12} 3_0 15_1 [17] & \infty_{13} 11_0 24_1 [20] & \infty_{14} 33_0 5_1 [35] & \infty_{15} 2_0 17_1 [41] \\
\infty_{16} 21_0 37_1 [34] & \infty_{17} 31_0 6_1 [40] & \infty_{18} 15_0 33_1 [29] & \infty_{19} 30_0 7_1 [24] & \infty_{20} 25_0 3_1 [16] & \infty_{21} 19_0 40_1 [30] \\
\infty_{22} 14_0 36_1 [13] & \infty_{23} 23_0 4_1 [22] & \infty_{24} 20_0 2_1 [37] & \infty_{25} 36_0 19_1 [38] & \infty_{26} 1_0 29_1 [18] & \infty_{27} 10_0 39_1 [12] \\
\infty_{28} 12_0 1_1 [ 2] & \infty_{29} 38_0 28_1 [27] & \infty_{30} 40_0 31_1 [ 7] & \infty_{31} 7_0 41_1 [23] & \infty_{32} 41_0 34_1 [26] & \infty_{33} 27_0 21_1 [ 9] \\
\infty_{34} 13_0 8_1 [ 4] & \infty_{35} 34_0 30_1 [ 6] & \infty_{36} 28_0 25_1 [25] & \infty_{37} 37_0 35_1 [ 0] & \infty_{38} 39_0 38_1 [ 3]
\end{array}
\]
\end{example}
\begin{example} For $n=44$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 2_0 10_0 [30] & 16_1 30_1 36_1 [15] & \infty_0 11_0 11_1 [28] & \infty_1 25_0 26_1 [31] & \infty_2 26_0 28_1 [ 2] & \infty_3 21_0 24_1 [5] \\
\infty_4 16_0 20_1 [ 7] & \infty_5 27_0 32_1 [17] & \infty_6 29_0 35_1 [32] & \infty_7 1_0 8_1 [35] & \infty_8 4_0 12_1 [23] & \infty_9 8_0 17_1 [43] \\
\infty_{10} 19_0 29_1 [33] & \infty_{11} 33_0 44_1 [22] & \infty_{12} 30_0 42_1 [16] & \infty_{13} 36_0 4_1 [12] & \infty_{14} 38_0 7_1 [29] & \infty_{15} 39_0 9_1 [10] \\
\infty_{16} 34_0 5_1 [ 4] & \infty_{17} 37_0 10_1 [27] & \infty_{18} 32_0 6_1 [42] & \infty_{19} 20_0 40_1 [13] & \infty_{20} 42_0 18_1 [24] & \infty_{21} 15_0 37_1 [26] \\
\infty_{22} 9_0 33_1 [38] & \infty_{23} 14_0 39_1 [39] & \infty_{24} 40_0 21_1 [19] & \infty_{25} 7_0 34_1 [36] & \infty_{26} 3_0 31_1 [21] & \infty_{27} 18_0 2_1 [44] \\
\infty_{28} 41_0 27_1 [41] & \infty_{29} 6_0 38_1 [ 3] & \infty_{30} 13_0 1_1 [37] & \infty_{31} 24_0 13_1 [34] & \infty_{32} 35_0 25_1 [25] & \infty_{33} 12_0 3_1 [ 8] \\
\infty_{34} 31_0 23_1 [11] & \infty_{35} 22_0 15_1 [ 9] & \infty_{36} 28_0 22_1 [ 6] & \infty_{37} 5_0 0_1 [20] & \infty_{38} 23_0 19_1 [40] & \infty_{39} 17_0 14_1 [18] \\
\infty_{40} 43_0 41_1 [14] & \infty_{41} 44_0 43_1 [ 1]
\end{array}
\]
\end{example}
Now the intransitive starters and adders:
\begin{example} For $n=10$:
\[
\begin{array}{@{}*{6}l@{}}
0_2 6_2 7_2 [ 0] & 7_0 8_0 3_2 [ 8] & 5_1 6_1 1_2 [ 2] & 9_0 6_0 8_2 [ 4] & 3_1 0_1 2_2 [ 6] & 3_0 1_0 4_2 [ 1] \\
4_1 2_1 5_2 [ 9] & 4_0 0_0 8_1 [ 7] & 5_0 1_1 7_1 [ 3] & 2_0 9_1 9_2 [ R] & 9_0 2_1 9_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=12$:
\[
\begin{array}{@{}*{6}l@{}}
3_2 6_2 10_2 [ 0] & 6_0 10_0 9_0 [ 4] & 10_1 2_1 1_1 [ 8] & 5_0 0_1 9_2 [ 2] & 2_0 7_1 11_2 [10] & 7_0 3_1 1_2 [ 1] \\
4_0 8_1 2_2 [11] & 11_0 1_0 0_2 [ 5] & 4_1 6_1 5_2 [ 7] & 8_0 9_1 4_2 [ 3] & 0_0 11_1 7_2 [ 9] & 3_0 5_1 8_2 [ R] \\
5_0 3_1 8_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=16$:
\[
\begin{array}{@{}*{6}l@{}}
6_2 12_2 15_2 [ 0] & 0_0 2_0 7_0 [10] & 10_1 12_1 1_1 [ 6] & 4_0 10_0 5_2 [ 4] & 8_1 14_1 9_2 [12] & 8_0 9_1 1_2 [13] \\
6_0 5_1 14_2 [ 3] & 3_0 6_1 8_2 [15] & 5_0 2_1 7_2 [ 1] & 12_0 3_1 0_2 [11] & 14_0 7_1 11_2 [ 5] & 13_0 1_0 4_2 [14] \\
11_1 15_1 2_2 [ 2] & 11_0 0_1 10_2 [ 9] & 9_0 4_1 3_2 [ 7] & 15_0 13_1 13_2 [ R] & 13_0 15_1 13_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=18$:
\[
\begin{array}{@{}*{6}l@{}}
1_2 2_2 12_2 [ 0] & 0_0 8_0 7_0 [12] & 12_1 2_1 1_1 [ 6] & 4_0 10_0 3_2 [ 4] & 8_1 14_1 7_2 [14] & 12_0 15_1 13_2 [ 1] \\
16_0 13_1 14_2 [17] & 1_0 0_1 5_2 [ 5] & 5_0 6_1 10_2 [13] & 14_0 3_1 6_2 [ 3] & 6_0 17_1 9_2 [15] & 3_0 5_1 0_2 [ 8] \\
13_0 11_1 8_2 [10] & 9_0 11_0 17_2 [16] & 7_1 9_1 15_2 [ 2] & 17_0 4_1 11_2 [11] & 15_0 10_1 4_2 [ 7] & 2_0 16_1 16_2 [ R] \\
16_0 2_1 16_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=22$:
\[
\begin{array}{@{}*{6}l@{}}
0_2 7_2 13_2 [ 0] & 0_0 4_0 7_0 [16] & 16_1 20_1 1_1 [ 6] & 8_0 14_0 11_2 [10] & 18_1 2_1 21_2 [12] & 10_0 11_1 4_2 [ 5] \\
16_0 15_1 9_2 [17] & 19_0 0_1 6_2 [ 9] & 9_0 6_1 15_2 [13] & 2_0 9_1 14_2 [ 3] & 12_0 5_1 17_2 [19] & 1_0 19_1 18_2 [ 2] \\
21_0 3_1 20_2 [20] & 11_0 21_1 12_2 [18] & 17_0 7_1 8_2 [ 4] & 20_0 3_0 5_2 [14] & 12_1 17_1 19_2 [ 8] & 15_0 13_1 1_2 [15] \\
6_0 8_1 16_2 [ 7] & 5_0 14_1 3_2 [21] & 13_0 4_1 2_2 [ 1] & 18_0 10_1 10_2 [ R] & 10_0 18_1 10_2 [ C]
\end{array}
\]
\end{example}
| 3,723 | 67,737 |
en
|
train
|
0.100.16
|
\end{example}
\begin{example} For $n=16$:
\[
\begin{array}{@{}*{6}l@{}}
6_2 12_2 15_2 [ 0] & 0_0 2_0 7_0 [10] & 10_1 12_1 1_1 [ 6] & 4_0 10_0 5_2 [ 4] & 8_1 14_1 9_2 [12] & 8_0 9_1 1_2 [13] \\
6_0 5_1 14_2 [ 3] & 3_0 6_1 8_2 [15] & 5_0 2_1 7_2 [ 1] & 12_0 3_1 0_2 [11] & 14_0 7_1 11_2 [ 5] & 13_0 1_0 4_2 [14] \\
11_1 15_1 2_2 [ 2] & 11_0 0_1 10_2 [ 9] & 9_0 4_1 3_2 [ 7] & 15_0 13_1 13_2 [ R] & 13_0 15_1 13_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=18$:
\[
\begin{array}{@{}*{6}l@{}}
1_2 2_2 12_2 [ 0] & 0_0 8_0 7_0 [12] & 12_1 2_1 1_1 [ 6] & 4_0 10_0 3_2 [ 4] & 8_1 14_1 7_2 [14] & 12_0 15_1 13_2 [ 1] \\
16_0 13_1 14_2 [17] & 1_0 0_1 5_2 [ 5] & 5_0 6_1 10_2 [13] & 14_0 3_1 6_2 [ 3] & 6_0 17_1 9_2 [15] & 3_0 5_1 0_2 [ 8] \\
13_0 11_1 8_2 [10] & 9_0 11_0 17_2 [16] & 7_1 9_1 15_2 [ 2] & 17_0 4_1 11_2 [11] & 15_0 10_1 4_2 [ 7] & 2_0 16_1 16_2 [ R] \\
16_0 2_1 16_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=22$:
\[
\begin{array}{@{}*{6}l@{}}
0_2 7_2 13_2 [ 0] & 0_0 4_0 7_0 [16] & 16_1 20_1 1_1 [ 6] & 8_0 14_0 11_2 [10] & 18_1 2_1 21_2 [12] & 10_0 11_1 4_2 [ 5] \\
16_0 15_1 9_2 [17] & 19_0 0_1 6_2 [ 9] & 9_0 6_1 15_2 [13] & 2_0 9_1 14_2 [ 3] & 12_0 5_1 17_2 [19] & 1_0 19_1 18_2 [ 2] \\
21_0 3_1 20_2 [20] & 11_0 21_1 12_2 [18] & 17_0 7_1 8_2 [ 4] & 20_0 3_0 5_2 [14] & 12_1 17_1 19_2 [ 8] & 15_0 13_1 1_2 [15] \\
6_0 8_1 16_2 [ 7] & 5_0 14_1 3_2 [21] & 13_0 4_1 2_2 [ 1] & 18_0 10_1 10_2 [ R] & 10_0 18_1 10_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=28$:
\[
\begin{array}{@{}*{6}l@{}}
4_2 12_2 17_2 [ 0] & 0_0 8_0 5_0 [16] & 16_1 24_1 21_1 [12] & 20_0 26_1 23_2 [ 8] & 6_0 0_1 3_2 [20] & 22_0 5_1 16_2 [19] \\
24_0 13_1 7_2 [ 9] & 9_0 12_1 27_2 [23] & 7_0 4_1 22_2 [ 5] & 10_0 17_1 18_2 [ 1] & 18_0 11_1 19_2 [27] & 13_0 17_0 2_2 [18] \\
3_1 7_1 20_2 [10] & 15_0 25_1 1_2 [ 4] & 1_0 19_1 5_2 [24] & 25_0 2_1 21_2 [21] & 23_0 18_1 14_2 [ 7] & 3_0 15_1 24_2 [17] \\
4_0 20_1 13_2 [11] & 21_0 2_0 9_2 [ 6] & 27_1 8_1 15_2 [22] & 11_0 12_0 10_2 [26] & 9_1 10_1 8_2 [ 2] & 27_0 1_1 11_2 [15] \\
16_0 14_1 26_2 [13] & 26_0 22_1 0_2 [25] & 19_0 23_1 25_2 [ 3] & 14_0 6_1 6_2 [ R] & 6_0 14_1 6_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=30$:
\[
\begin{array}{@{}*{6}l@{}}
3_2 7_2 10_2 [ 0] & 0_0 8_0 5_0 [18] & 18_1 26_1 23_1 [12] & 18_0 24_1 22_2 [28] & 22_0 16_1 20_2 [ 2] & 28_0 9_1 15_2 [27] \\
6_0 25_1 12_2 [ 3] & 27_0 0_1 2_2 [23] & 23_0 20_1 25_2 [ 7] & 10_0 17_1 11_2 [17] & 4_0 27_1 28_2 [13] & 9_0 13_0 21_2 [ 6] \\
15_1 19_1 27_2 [24] & 19_0 29_1 26_2 [22] & 21_0 11_1 18_2 [ 8] & 7_0 12_1 23_2 [21] & 3_0 28_1 14_2 [ 9] & 1_0 13_1 16_2 [ 1] \\
14_0 2_1 17_2 [29] & 15_0 24_0 4_2 [20] & 5_1 14_1 24_2 [10] & 25_0 26_0 9_2 [26] & 21_1 22_1 5_2 [ 4] & 29_0 1_1 19_2 [11] \\
12_0 10_1 0_2 [19] & 2_0 6_1 1_2 [ 5] & 11_0 7_1 6_2 [25] & 20_0 3_1 29_2 [14] & 17_0 4_1 13_2 [16] & 16_0 8_1 8_2 [ R] \\
8_0 16_1 8_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=36$:
\[
\begin{array}{@{}*{6}l@{}}
6_2 18_2 35_2 [ 0] & 0_0 4_0 7_0 [26] & 26_1 30_1 33_1 [10] & 34_0 4_1 3_2 [14] & 18_0 12_1 17_2 [22] & 32_0 7_1 33_2 [35] \\
6_0 31_1 32_2 [ 1] & 35_0 2_1 15_2 [25] & 27_0 24_1 4_2 [11] & 28_0 35_1 19_2 [31] & 30_0 23_1 14_2 [ 5] & 21_0 25_1 28_2 [ 8] \\
33_0 29_1 0_2 [28] & 25_0 1_1 31_2 [30] & 31_0 19_1 25_2 [ 6] & 15_0 20_1 2_2 [27] & 11_0 6_1 29_2 [ 9] & 3_0 13_1 1_2 [ 7] \\
20_0 10_1 8_2 [29] & 19_0 28_1 21_2 [20] & 12_0 3_1 5_2 [16] & 17_0 18_1 13_2 [34] & 16_0 15_1 11_2 [ 2] & 13_0 27_1 24_2 [19] \\
10_0 32_1 7_2 [17] & 9_0 22_0 34_2 [12] & 21_1 34_1 10_2 [24] & 29_0 2_0 12_2 [15] & 8_1 17_1 27_2 [21] & 26_0 9_1 30_2 [32] \\
5_0 22_1 26_2 [ 4] & 1_0 23_0 9_2 [13] & 14_1 0_1 22_2 [23] & 8_0 14_0 23_2 [33] & 5_1 11_1 20_2 [ 3] & 24_0 16_1 16_2 [ R] \\
16_0 24_1 16_2 [ C]
\end{array}
\]
\end{example}
| 3,384 | 67,737 |
en
|
train
|
0.100.17
|
\end{example}
\begin{example} For $n=30$:
\[
\begin{array}{@{}*{6}l@{}}
3_2 7_2 10_2 [ 0] & 0_0 8_0 5_0 [18] & 18_1 26_1 23_1 [12] & 18_0 24_1 22_2 [28] & 22_0 16_1 20_2 [ 2] & 28_0 9_1 15_2 [27] \\
6_0 25_1 12_2 [ 3] & 27_0 0_1 2_2 [23] & 23_0 20_1 25_2 [ 7] & 10_0 17_1 11_2 [17] & 4_0 27_1 28_2 [13] & 9_0 13_0 21_2 [ 6] \\
15_1 19_1 27_2 [24] & 19_0 29_1 26_2 [22] & 21_0 11_1 18_2 [ 8] & 7_0 12_1 23_2 [21] & 3_0 28_1 14_2 [ 9] & 1_0 13_1 16_2 [ 1] \\
14_0 2_1 17_2 [29] & 15_0 24_0 4_2 [20] & 5_1 14_1 24_2 [10] & 25_0 26_0 9_2 [26] & 21_1 22_1 5_2 [ 4] & 29_0 1_1 19_2 [11] \\
12_0 10_1 0_2 [19] & 2_0 6_1 1_2 [ 5] & 11_0 7_1 6_2 [25] & 20_0 3_1 29_2 [14] & 17_0 4_1 13_2 [16] & 16_0 8_1 8_2 [ R] \\
8_0 16_1 8_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=36$:
\[
\begin{array}{@{}*{6}l@{}}
6_2 18_2 35_2 [ 0] & 0_0 4_0 7_0 [26] & 26_1 30_1 33_1 [10] & 34_0 4_1 3_2 [14] & 18_0 12_1 17_2 [22] & 32_0 7_1 33_2 [35] \\
6_0 31_1 32_2 [ 1] & 35_0 2_1 15_2 [25] & 27_0 24_1 4_2 [11] & 28_0 35_1 19_2 [31] & 30_0 23_1 14_2 [ 5] & 21_0 25_1 28_2 [ 8] \\
33_0 29_1 0_2 [28] & 25_0 1_1 31_2 [30] & 31_0 19_1 25_2 [ 6] & 15_0 20_1 2_2 [27] & 11_0 6_1 29_2 [ 9] & 3_0 13_1 1_2 [ 7] \\
20_0 10_1 8_2 [29] & 19_0 28_1 21_2 [20] & 12_0 3_1 5_2 [16] & 17_0 18_1 13_2 [34] & 16_0 15_1 11_2 [ 2] & 13_0 27_1 24_2 [19] \\
10_0 32_1 7_2 [17] & 9_0 22_0 34_2 [12] & 21_1 34_1 10_2 [24] & 29_0 2_0 12_2 [15] & 8_1 17_1 27_2 [21] & 26_0 9_1 30_2 [32] \\
5_0 22_1 26_2 [ 4] & 1_0 23_0 9_2 [13] & 14_1 0_1 22_2 [23] & 8_0 14_0 23_2 [33] & 5_1 11_1 20_2 [ 3] & 24_0 16_1 16_2 [ R] \\
16_0 24_1 16_2 [ C]
\end{array}
\]
\end{example}
\begin{example} For $n=46$:
\[
\begin{array}{@{}*{6}l@{}}
4_2 17_2 35_2 [ 0] & 0_0 4_0 7_0 [36] & 36_1 40_1 43_1 [10] & 44_0 4_1 45_2 [22] & 26_0 20_1 21_2 [24] & 40_0 5_1 44_2 [25] \\
30_0 19_1 23_2 [21] & 45_0 2_1 1_2 [45] & 1_0 44_1 0_2 [ 1] & 42_0 3_1 11_2 [ 3] & 6_0 45_1 14_2 [43] & 37_0 41_1 27_2 [ 2] \\
43_0 39_1 29_2 [44] & 21_0 33_1 12_2 [16] & 3_0 37_1 28_2 [30] & 17_0 22_1 5_2 [11] & 33_0 28_1 16_2 [35] & 13_0 23_1 20_2 [39] \\
16_0 6_1 13_2 [ 7] & 25_0 34_1 7_2 [32] & 20_0 11_1 39_2 [14] & 31_0 32_1 37_2 [28] & 14_0 13_1 19_2 [18] & 41_0 9_1 30_2 [13] \\
22_0 8_1 43_2 [33] & 15_0 32_0 42_2 [ 6] & 21_1 38_1 2_2 [40] & 19_0 24_0 36_2 [ 5] & 24_1 29_1 41_2 [41] & 38_0 7_1 18_2 [ 4] \\
11_0 42_1 22_2 [42] & 27_0 9_0 40_2 [37] & 18_1 0_1 31_2 [ 9] & 8_0 18_0 38_2 [17] & 25_1 35_1 9_2 [29] & 36_0 12_1 6_2 [27] \\
39_0 17_1 33_2 [19] & 10_0 31_1 34_2 [20] & 5_0 30_1 8_2 [26] & 28_0 1_1 15_2 [34] & 35_0 16_1 3_2 [12] & 2_0 15_1 24_2 [ 8] \\
23_0 10_1 32_2 [38] & 12_0 14_1 10_2 [15] & 29_0 27_1 25_2 [31] & 34_0 26_1 26_2 [ R] & 26_0 34_1 26_2 [ C]
\end{array}
\]
\end{example}
| 2,479 | 67,737 |
en
|
train
|
0.100.18
|
\section{Starters and adders for small \mbox{\boldmath $\mathrm{GHD}(n+2,3n)$}}
\label{starters and adders}
First we give those obtained by transitive starters and adders:
\begin{example} For $n=8$:
\[
\begin{array}{@{}*{8}l@{}}
0_{1} 6_{1} 7_{1} [2] & 4_{1} 2_{1} 7_{0} [3] & 6_{0} 8_{0} 8_{1} [8] & 0_{0} 1_{0} 4_{0} [7] & \infty_0 2_{0} 3_{1} [1] & \infty_1 5_{0} 9_{1} [4] & \infty_2 3_{0} 1_{1} [9] & \infty_3 9_{0} 5_{1} [6]
\end{array}
\] \end{example}
\begin{example} For $n=9$:
\[
\begin{array}{@{}*{7}l@{}}
4_{0} 5_{1} 3_{1} [8] & 10_{0} 2_{0} 3_{0} [4] & 6_{0} 8_{0} 6_{1} [3] & 9_{1} 10_{1} 2_{1} [6] & \infty_0 0_{0} 4_{1} [2] & \infty_1 7_{0} 1_{1} [9] & \infty_2 9_{0} 0_{1} [1] \\
\infty_3 5_{0} 8_{1} [10] & \infty_4 1_{0} 7_{1} [7] &
\end{array}
\] \end{example}
\begin{example} For $n=10$:
\[
\begin{array}{@{}*{7}l@{}}
3_{1} 10_{1} 11_{1} [3] & 3_{0} 4_{0} 6_{0} [10] & 1_{0} 8_{0} 4_{1} [7] & 7_{0} 5_{1} 8_{1} [4] & \infty_{0} 0_{0} 0_{1} [5] & \infty_{1} 11_{0} 6_{1} [1] & \infty_{2} 10_{0} 9_{1} [11] \\
\infty_{3} 5_{0} 2_{1} [2] & \infty_{4} 2_{0} 7_{1} [8] & \infty_{5} 9_{0} 1_{1} [9]
\end{array}
\] \end{example}
\begin{example} For $n=11$:
\[
\begin{array}{lllllll}
3_{1} 11_{0} 0_{0} [7] & 9_{0} 2_{0} 5_{0} [4] & 10_{1} 11_{1} 2_{1} [6] & 4_{1} 10_{0} 6_{1}[1] & \infty_0 3_{0} 0_{1}[0] & \infty_1 12_{0} 5_{1} [9] & \infty_2 8_{0} 9_{1} [2] \\
\infty_3 6_{0} 1_{1} [8] & \infty_4 4_{0} 8_{1} [11] & \infty_5 1_{0} 12_{1} [3] & \infty_6 7_{0} 7_{1} [5]
\end{array}
\] \end{example}
\begin{example} For $n=12$:
\[
\begin{array}{@{}*{7}l@{}}
3_{1} 7_{0} 2_{0} [2] & 8_{1} 0_{1} 10_{1} [6] & 1_{1} 6_{1} 6_{0} [11] & 1_{0} 3_{0} 4_{0} [9] & \infty_{0} 11_{0} 13_{1} [5] & \infty_{1} 0_{0} 5_{1} [8] \\
\infty_{2} 5_{0} 9_{1} [1] & \infty_{3} 8_{0} 2_{1} [13] & \infty_{4} 10_{0} 7_{1} [4] & \infty_{5} 12_{0} 4_{1} [3] & \infty_{6} 13_{0} 11_{1} [12] & \infty_{7} 9_{0} 12_{1} [10] &
\end{array}
\] \end{example}
\begin{example} For $n=13$:
\[
\begin{array}{lllllll}
0_{0} 4_{1} 8_{1} [4] & 6_{0} 7_{0} 9_{0} [8] & 11_{0} 1_{0} 2_{1} [7] & 5_{1} 6_{1}, 11_{1} [9] & \infty_0,13_{0} 7_{1} [0] & \infty_1 8_{0} 3_{1} [14] \\
\infty_2 4_{0} 1_{1} [5] & \infty_3 14_{0}, 13_{1} [12] & \infty_4 12_{0} 0_{1} [13] & \infty_5 10_{0} 12_{1} [6] & \infty_6 5_{0} 10_{1} [1] & \infty_7 3_{0} 14_{1} [2] \\
\infty_8 2_{0} 9_{1} [10]
\end{array}
\] \end{example}
\begin{example} For $n=14$:
\[
\begin{array}{@{}*{7}l@{}}
6_{0} 9_{1} 8_{1} [5] & 9_{0} 13_{0} 0_{0} [7] & 10_{0} 5_{1} 11_{0} [3] & 0_{1} 4_{1} 6_{1} [1] & \infty_0 3_{0} 12_{1} [6] & \infty_1 12_{0} 10_{1} [9] \\
\infty_2 8_{0} 14_{1} [14] & \infty_3 15_{0} 3_{1} [13] & \infty_4 4_{0} 11_{1} [4] & \infty_5 7_{0} 15_{1} [11] & \infty_6 1_{0} 2_{1} [2] & \infty_7 2_{0} 7_{1} [15] \\
\infty_8 5_{0} 1_{1} [10] & \infty_9 14_{0} 13_{1} [12]
\end{array}
\] \end{example}
\begin{example} For $n=15$:
\[
\begin{array}{@{}*{7}l@{}}
10_{1} 8_{1} 16_{1} [7] & 16_{0} 4_{0} 13_{1} [11] & 7_{0} 6_{1} 5_{1} [4] & 2_{0} 13_{0} 0_{0} [16] & \infty_0 14_{0} 4_{1} [0] & \infty_1 15_{0} 1_{1} [15] \\
\infty_2 10_{0}, 11_{1} [9] & \infty_3 6_{0} 0_{1} [14] & \infty_4 12_{0} 7_{1} [12] & \infty_5 9_{0} 14_{1} [8] & \infty_6 1_{0} 3_{1} [5] & \infty_7 3_{0} 9_{1} [2] \\
\infty_8 8_{0} 12_{1} [1] & \infty_{9} 5_{0} 15_{1} [3] & \infty_{10} 11_{0} 2_{1} [10]
\end{array}
\] \end{example}
\begin{example} For $n=16$:
\[
\begin{array}{lllllll}
0_{0} 16_{0} 4_{0} [10] & 17_{1} 2_{1} 6_{0} [14] & 12_{1} 2_{0} 17_{0} [5] & 13_{1} 0_{1} 1_{1} [7] & \infty_{0} 14_{0} 14_{1} [1] & \infty_{1} 15_{0} 5_{1} [4] \\
\infty_{2} 13_{0} 4_{1} [17] & \infty_{3} 11_{0} 15_{1} [13] & \infty_{4} 9_{0} 3_{1} [8] & \infty_{5} 10_{0} 16_{1} [6] & \infty_{6} 3_{0} 10_{1} [2] & \infty_{7} 12_{0} 9_{1} [15] \\
\infty_{8} 7_{0} 8_{1} [11] & \infty_{9} 8_{0} 11_{1} [3] & \infty_{10} 1_{0} 6_{1} [12] & \infty_{11} 5_{0} 7_{1} [16]
\end{array}
\] \end{example}
\begin{example} For $n=17$:
\[
\begin{array}{lllllll}
11_{1} 3_{1} 7_{0} [2] & 10_{0} 5_{1} 14_{0} [5] & 0_{1} 10_{1} 15_{1} [16] & 11_{0} 1_{0} 3_{0} [15] & \infty_0 2_{0} 8_{1} [0] & \infty_1 17_{0} 9_{1} [8] \\
\infty_2 9_{0} 12_{1} [11] & \infty_3 4_{0} 4_{1} [10] & \infty_4 0_{0} 18_{1} [12] & \infty_5 18_{0} 1_{1} [18] & \infty_6 12_{0} 2_{1} [1] & \infty_7 15_{0} 16_{1} [9] \\
\infty_8 16_{0} 13_{1} [7] & \infty_{9} 6_{0} 14_{1} [4] & \infty_{10} 5_{0} 17_{1} [17] & \infty_{11} 13_{0} 7_{1} [14] & \infty_{12} 8_{0} 6_{1} [3]
\end{array}
\] \end{example}
\begin{example} For $n=18$:
\[
\begin{array}{lllllll}
17_{0} 18_{1} 17_{1} [7] & 1_{0} 15_{0} 10_{0} [17] & 19_{0} 16_{0} 1_{1} [1] & 0_{1} 2_{1} 16_{1} [15] & \infty_0 9_{0} 5_{1} [4] & \infty_1 8_{0} 6_{1} [13] \\
\infty_2 18_{0} 11_{1} [12] & \infty_3 6_{0} 3_{1} [5] & \infty_4 5_{0} 4_{1} [3] & \infty_5 14_{0} 9_{1} [11] & \infty_6 4_{0} 13_{1} [19] & \infty_7 2_{0} 12_{1} [14] \\
\infty_8 13_{0} 7_{1} [9] & \infty_9 3_{0} 14_{1} [16] & \infty_{10} 7_{0} 10_{1} [8] & \infty_{11} 11_{0} 15_{1} [18] & \infty_{12} 12_{0} 19_{1} [2] & \infty_{13} 0_{0} 8_{1} [6]
\end{array}
\] \end{example}
\begin{example} For $n=19$:
\[
\begin{array}{llllll}
0_{0} 1_{0} 5_{0} [0] & 16_{1} 3_{1} 7_{0} [1] & 18_{0} 16_{0} 8_{1} [16] & 6_{1} 7_{1} 9_{1} [12] & \infty_0 20_{0} 20_{1} [3] & \infty_1 12_{0} 14_{1} [2] \\
\infty_2 17_{0} 4_{1} [10] & \infty_3 9_{0} 2_{1} [6] & \infty_4 10_{0} 11_{1} [9] & \infty_5 19_{0} 17_{1} [5] & \infty_6 14_{0} 0_{1} [11] & \infty_7 4_{0} 1_{1} [14] \\
\infty_8 3_{0} 18_{1} [13] & \infty_{9} 2_{0} 12_{1} [15] & \infty_{10} 13_{0} 19_{1} [7] & \infty_{11} 15_{0} 10_{1} [18] & \infty_{12} 6_{0} 5_{1} [4] & \infty_{13} 11_{0} 15_{1} [19] \\
\infty_{14} 8_{0} 13_{1} [20]
\end{array}
\] \end{example}
| 3,905 | 67,737 |
en
|
train
|
0.100.19
|
\begin{example} For $n=17$:
\[
\begin{array}{lllllll}
11_{1} 3_{1} 7_{0} [2] & 10_{0} 5_{1} 14_{0} [5] & 0_{1} 10_{1} 15_{1} [16] & 11_{0} 1_{0} 3_{0} [15] & \infty_0 2_{0} 8_{1} [0] & \infty_1 17_{0} 9_{1} [8] \\
\infty_2 9_{0} 12_{1} [11] & \infty_3 4_{0} 4_{1} [10] & \infty_4 0_{0} 18_{1} [12] & \infty_5 18_{0} 1_{1} [18] & \infty_6 12_{0} 2_{1} [1] & \infty_7 15_{0} 16_{1} [9] \\
\infty_8 16_{0} 13_{1} [7] & \infty_{9} 6_{0} 14_{1} [4] & \infty_{10} 5_{0} 17_{1} [17] & \infty_{11} 13_{0} 7_{1} [14] & \infty_{12} 8_{0} 6_{1} [3]
\end{array}
\] \end{example}
\begin{example} For $n=18$:
\[
\begin{array}{lllllll}
17_{0} 18_{1} 17_{1} [7] & 1_{0} 15_{0} 10_{0} [17] & 19_{0} 16_{0} 1_{1} [1] & 0_{1} 2_{1} 16_{1} [15] & \infty_0 9_{0} 5_{1} [4] & \infty_1 8_{0} 6_{1} [13] \\
\infty_2 18_{0} 11_{1} [12] & \infty_3 6_{0} 3_{1} [5] & \infty_4 5_{0} 4_{1} [3] & \infty_5 14_{0} 9_{1} [11] & \infty_6 4_{0} 13_{1} [19] & \infty_7 2_{0} 12_{1} [14] \\
\infty_8 13_{0} 7_{1} [9] & \infty_9 3_{0} 14_{1} [16] & \infty_{10} 7_{0} 10_{1} [8] & \infty_{11} 11_{0} 15_{1} [18] & \infty_{12} 12_{0} 19_{1} [2] & \infty_{13} 0_{0} 8_{1} [6]
\end{array}
\] \end{example}
\begin{example} For $n=19$:
\[
\begin{array}{llllll}
0_{0} 1_{0} 5_{0} [0] & 16_{1} 3_{1} 7_{0} [1] & 18_{0} 16_{0} 8_{1} [16] & 6_{1} 7_{1} 9_{1} [12] & \infty_0 20_{0} 20_{1} [3] & \infty_1 12_{0} 14_{1} [2] \\
\infty_2 17_{0} 4_{1} [10] & \infty_3 9_{0} 2_{1} [6] & \infty_4 10_{0} 11_{1} [9] & \infty_5 19_{0} 17_{1} [5] & \infty_6 14_{0} 0_{1} [11] & \infty_7 4_{0} 1_{1} [14] \\
\infty_8 3_{0} 18_{1} [13] & \infty_{9} 2_{0} 12_{1} [15] & \infty_{10} 13_{0} 19_{1} [7] & \infty_{11} 15_{0} 10_{1} [18] & \infty_{12} 6_{0} 5_{1} [4] & \infty_{13} 11_{0} 15_{1} [19] \\
\infty_{14} 8_{0} 13_{1} [20]
\end{array}
\] \end{example}
\begin{example} For $n=20$:
\[
\begin{array}{@{}*{6}l@{}}
1_{1} 2_{1} 9_{0} [15] & 16_{1} 5_{0} 10_{0} [21] & 0_{0} 20_{0} 4_{0} [3] & 7_{1} 17_{1} 21_{1} [5] & \infty_0 15_{0} 18_{1} [1] & \infty_1 18_{0} 15_{1} [18] \\
\infty_2 11_{0} 5_{1} [4] & \infty_3 2_{0} 9_{1} [9] & \infty_4 13_{0} 4_{1} [19] & \infty_5 1_{0} 10_{1} [20] & \infty_6 16_{0} 20_{1} [12] & \infty_7 14_{0} 13_{1} [16] \\
\infty_8 7_{0} 3_{1} [10] & \infty_9 12_{0} 0_{1} [6] & \infty_{10} 6_{0} 14_{1} [7] & \infty_{11} 3_{0} 8_{1} [17] & \infty_{12} 17_{0} 12_{1} [2] & \infty_{13} 8_{0} 6_{1} [14] \\
\infty_{14} 19_{0} 19_{1} [8] & \infty_{15} 21_{0} 11_{1} [13] &
\end{array}
\] \end{example}
\begin{example} For $n=21$:
\[
\begin{array}{llllll}
22_{0} 5_{0} 7_{0} [13] & 21_{0} 14_{0} 3_{1} [5] & 20_{1} 8_{1} 13_{0} [15] & 12_{1} 4_{1} 5_{1} [2] & \infty_0 11_{0} 9_{1} [0] & \infty_1 15_{0} 17_{1} [1] \\
\infty_2 0_{0} 13_{1} [4] & \infty_3 12_{0} 0_{1} [11] & \infty_4 3_{0} 22_{1} [3] & \infty_5 17_{0} 2_{1} [8] & \infty_6 6_{0} 15_{1} [9] & \infty_7 8_{0} 11_{1} [16] \\
\infty_8 19_{0} 18_{1} [21] & \infty_{9} 2_{0} 19_{1} [7] & \infty_{10} 4_{0} 14_{1} [6] & \infty_{11} 10_{0} 10_{1} [12] & \infty_{12} 1_{0} 16_{1} [20] & \infty_{13} 16_{0} 7_{1} [14] \\
\infty_{14} 20_{0} 21_{1} [17] & \infty_{15} 9_{0} 6_{1} [22] & \infty_{16} 18_{0} 1_{1} [18]
\end{array}
\] \end{example}
\begin{example} For $n=22$:
\[
\begin{array}{@{}*{6}l@{}}
13_{1} 9_{1} 20_{0} [3] & 10_{0} 23_{0} 7_{1} [15] & 17_{1} 22_{1} 19_{1} [9] & 19_{0} 0_{0} 1_{0} [21] & \infty_0 3_{0} 18_{1} [1] & \infty_1 4_{0} 6_{1} [2] \\
\infty_2 11_{0} 10_{1} [7] & \infty_3 14_{0} 4_{1} [5] & \infty_4 13_{0} 5_{1} [20] & \infty_5 17_{0} 20_{1} [14] & \infty_6 12_{0} 12_{1} [17] & \infty_7 15_{0} 16_{1} [11] \\
\infty_8 22_{0} 2_{1} [22] & \infty_9 9_{0} 21_{1} [23] & \infty_{10} 18_{0} 3_{1} [18] & \infty_{11} 8_{0} 14_{1} [16] & \infty_{12} 21_{0} 8_{1} [6] & \infty_{13} 16_{0} 23_{1} [19] \\
\infty_{14} 6_{0} 11_{1} [4] & \infty_{15} 7_{0} 1_{1} [10] & \infty_{16} 2_{0} 0_{1} [13] & \infty_{17} 5_{0} 15_{1} [8] &
\end{array}
\] \end{example}
\begin{example} For $n=23$:
\[
\begin{array}{@{}*{6}l@{}}
12_{0} 8_{0} 21_{1} [1] & 19_{1} 17_{0} 20_{0} [6] & 14_{1} 13_{0} 7_{0} [14] & 2_{1} 11_{1} 16_{1} [7] & \infty_0 18_{0} 5_{1} [0] & \infty_1 6_{0} 12_{1} [2] \\
\infty_2 3_{0} 22_{1} [9] & \infty_3 19_{0} 17_{1} [10] & \infty_4 5_{0} 20_{1} [17] & \infty_5 22_{0} 13_{1} [19] & \infty_6 24_{0} 7_{1} [8] & \infty_7 1_{0} 18_{1} [24] \\
\infty_8 15_{0} 1_{1} [15] & \infty_9 0_{0} 10_{1} [3] & \infty_{10} 21_{0} 24_{1} [21] & \infty_{11} 10_{0} 15_{1} [4] & \infty_{12} 9_{0} 4_{1} [22] & \infty_{13} 11_{0} 8_{1} [13] \\
\infty_{14} 14_{0} 3_{1} [5] & \infty_{15} 23_{0} 23_{1} [12] & \infty_{16} 2_{0} 6_{1} [18] & \infty_{17} 16_{0} 9_{1} [20] & \infty_{18} 4_{0} 0_{1} [11] &
\end{array}
\] \end{example}
\begin{example} For $n=24$:
\[
\begin{array}{@{}*{6}l@{}}
4_{0} 24_{0} 23_{0} [12] & 14_{0} 0_{0} 5_{0} [14] & 4_{1} 15_{1} 16_{1} [19] & 8_{1} 12_{1} 17_{1} [7] & \infty_0 21_{0} 13_{1} [1] & \infty_1 1_{0} 10_{1} [2] \\
\infty_2 15_{0} 25_{1} [5] & \infty_3 13_{0} 21_{1} [18] & \infty_4 16_{0} 20_{1} [11] & \infty_5 25_{0} 0_{1} [22] & \infty_6 11_{0} 14_{1} [23] & \infty_7 12_{0} 19_{1} [6] \\
\infty_8 20_{0} 6_{1} [10] & \infty_9 6_{0} 11_{1} [17] & \infty_{10} 17_{0} 23_{1} [21] & \infty_{11} 22_{0} 24_{1} [3] & \infty_{12} 3_{0} 3_{1} [4] & \infty_{13} 2_{0} 1_{1} [9] \\
\infty_{14} 8_{0} 22_{1} [24] & \infty_{15} 7_{0} 18_{1} [8] & \infty_{16} 18_{0} 7_{1} [25] & \infty_{17} 10_{0} 5_{1} [16] & \infty_{18} 9_{0} 2_{1} [15] & \infty_{19} 19_{0} 9_{1} [20]
\end{array}
\] \end{example}
| 3,852 | 67,737 |
en
|
train
|
0.100.20
|
\begin{example} For $n=23$:
\[
\begin{array}{@{}*{6}l@{}}
12_{0} 8_{0} 21_{1} [1] & 19_{1} 17_{0} 20_{0} [6] & 14_{1} 13_{0} 7_{0} [14] & 2_{1} 11_{1} 16_{1} [7] & \infty_0 18_{0} 5_{1} [0] & \infty_1 6_{0} 12_{1} [2] \\
\infty_2 3_{0} 22_{1} [9] & \infty_3 19_{0} 17_{1} [10] & \infty_4 5_{0} 20_{1} [17] & \infty_5 22_{0} 13_{1} [19] & \infty_6 24_{0} 7_{1} [8] & \infty_7 1_{0} 18_{1} [24] \\
\infty_8 15_{0} 1_{1} [15] & \infty_9 0_{0} 10_{1} [3] & \infty_{10} 21_{0} 24_{1} [21] & \infty_{11} 10_{0} 15_{1} [4] & \infty_{12} 9_{0} 4_{1} [22] & \infty_{13} 11_{0} 8_{1} [13] \\
\infty_{14} 14_{0} 3_{1} [5] & \infty_{15} 23_{0} 23_{1} [12] & \infty_{16} 2_{0} 6_{1} [18] & \infty_{17} 16_{0} 9_{1} [20] & \infty_{18} 4_{0} 0_{1} [11] &
\end{array}
\] \end{example}
\begin{example} For $n=24$:
\[
\begin{array}{@{}*{6}l@{}}
4_{0} 24_{0} 23_{0} [12] & 14_{0} 0_{0} 5_{0} [14] & 4_{1} 15_{1} 16_{1} [19] & 8_{1} 12_{1} 17_{1} [7] & \infty_0 21_{0} 13_{1} [1] & \infty_1 1_{0} 10_{1} [2] \\
\infty_2 15_{0} 25_{1} [5] & \infty_3 13_{0} 21_{1} [18] & \infty_4 16_{0} 20_{1} [11] & \infty_5 25_{0} 0_{1} [22] & \infty_6 11_{0} 14_{1} [23] & \infty_7 12_{0} 19_{1} [6] \\
\infty_8 20_{0} 6_{1} [10] & \infty_9 6_{0} 11_{1} [17] & \infty_{10} 17_{0} 23_{1} [21] & \infty_{11} 22_{0} 24_{1} [3] & \infty_{12} 3_{0} 3_{1} [4] & \infty_{13} 2_{0} 1_{1} [9] \\
\infty_{14} 8_{0} 22_{1} [24] & \infty_{15} 7_{0} 18_{1} [8] & \infty_{16} 18_{0} 7_{1} [25] & \infty_{17} 10_{0} 5_{1} [16] & \infty_{18} 9_{0} 2_{1} [15] & \infty_{19} 19_{0} 9_{1} [20]
\end{array}
\] \end{example}
\begin{example} For $n=25$:
\[
\begin{array}{@{}*{6}l@{}}
15_{1} 3_{1} 5_{1} [19] & 7_{0} 1_{0} 11_{0} [9] & 16_{1} 21_{1} 25_{1} [25] & 14_{0} 9_{0} 16_{0} [8] & \infty_0 13_{0} 11_{1} [0] & \infty_1 2_{0} 17_{1} [1] \\
\infty_2 12_{0} 7_{1} [2] & \infty_3 5_{0} 23_{1} [3] & \infty_4 21_{0} 22_{1} [5] & \infty_5 0_{0} 4_{1} [6] & \infty_6 25_{0} 9_{1} [23] & \infty_7 18_{0} 26_{1} [18] \\
\infty_8 6_{0} 13_{1} [26] & \infty_9 20_{0} 19_{1} [14] & \infty_{10} 23_{0} 8_{1} [22] & \infty_{11} 24_{0} 18_{1} [7] & \infty_{12} 26_{0} 1_{1} [12] & \infty_{13} 3_{0} 0_{1} [16] \\
\infty_{14} 22_{0} 12_{1} [17] & \infty_{15} 17_{0} 10_{1} [10] & \infty_{16} 8_{0} 14_{1} [21] & \infty_{17} 10_{0} 6_{1} [15] & \infty_{18} 15_{0} 2_{1} [13] & \infty_{19} 4_{0} 20_{1} [11] \\
\infty_{20} 19_{0} 24_{1} [4] &
\end{array}
\] \end{example}
\begin{example} For $n=26$:
\[
\begin{array}{@{}*{6}l@{}}
23_{0} 23_{1} 1_{1} [11] & 8_{0} 9_{0} 20_{0} [5] & 11_{0} 3_{0} 7_{1} [15] & 16_{1} 5_{1} 6_{1} [25] & \infty_0 1_{0} 8_{1} [1] & \infty_1 2_{0} 21_{1} [2] \\
\infty_2 13_{0} 2_{1} [3] & \infty_3 0_{0} 10_{1} [9] & \infty_4 27_{0} 11_{1} [4] & \infty_5 25_{0} 15_{1} [20] & \infty_6 24_{0} 17_{1} [12] & \infty_7 12_{0} 26_{1} [27] \\
\infty_8 16_{0} 18_{1} [24] & \infty_9 7_{0} 22_{1} [22] & \infty_{10} 19_{0} 24_{1} [8] & \infty_{11} 5_{0} 14_{1} [10] & \infty_{12} 15_{0} 3_{1} [7] & \infty_{13} 17_{0} 0_{1} [21] \\
\infty_{14} 26_{0} 25_{1} [23] & \infty_{15} 10_{0} 4_{1} [13] & \infty_{16} 14_{0} 27_{1} [19] & \infty_{17} 6_{0} 9_{1} [18] & \infty_{18} 4_{0} 12_{1} [16] & \infty_{19} 18_{0} 19_{1} [17] \\
\infty_{20} 21_{0} 13_{1} [26] & \infty_{21} 22_{0} 20_{1} [6] &
\end{array}
\] \end{example}
\begin{example} For $n=27$:
\[
\begin{array}{@{}*{6}l@{}}
4_{0} 8_{0} 22_{0} [17] & 11_{0} 19_{0} 22_{1} [11] & 5_{1} 18_{1} 6_{0} [14] & 2_{1} 10_{1} 4_{1} [16] & \infty_0 24_{0} 16_{1} [0] & \infty_1 14_{0} 27_{1} [1] \\
\infty_2 25_{0} 14_{1} [3] & \infty_3 9_{0} 26_{1} [4] & \infty_4 27_{0} 8_{1} [2] & \infty_5 10_{0} 19_{1} [6] & \infty_6 26_{0} 20_{1} [7] & \infty_7 0_{0} 24_{1} [27] \\
\infty_8 16_{0} 21_{1} [21] & \infty_9 18_{0} 9_{1} [5] & \infty_{10} 20_{0} 17_{1} [12] & \infty_{11} 23_{0} 25_{1} [13] & \infty_{12} 12_{0} 13_{1} [22] & \infty_{13} 2_{0} 0_{1} [15] \\
\infty_{14} 3_{0} 28_{1} [8] & \infty_{15} 13_{0} 6_{1} [28] & \infty_{16} 1_{0} 15_{1} [25] & \infty_{17} 15_{0} 1_{1} [23] & \infty_{18} 17_{0} 23_{1} [18] & \infty_{19} 7_{0} 7_{1} [24] \\
\infty_{20} 28_{0} 3_{1} [20] & \infty_{21} 21_{0} 11_{1} [26] & \infty_{22} 5_{0} 12_{1} [9] &
\end{array}
\] \end{example}
\begin{example} For $n=28$:
\[
\begin{array}{@{}*{6}l@{}}
22_{0} 3_{0} 6_{0} [26] & 27_{0} 2_{0} 28_{0} [24] & 10_{1} 18_{1} 13_{1} [23] & 8_{1} 14_{1} 27_{1} [17] & \infty_0 12_{0} 25_{1} [1] & \infty_1 10_{0} 22_{1} [2] \\
\infty_2 16_{0} 2_{1} [3] & \infty_3 1_{0} 19_{1} [4] & \infty_4 5_{0} 5_{1} [5] & \infty_5 15_{0} 24_{1} [8] & \infty_6 24_{0} 29_{1} [13] & \infty_7 4_{0} 23_{1} [29] \\
\infty_8 25_{0} 12_{1} [9] & \infty_9 7_{0} 17_{1} [20] & \infty_{10} 14_{0} 6_{1} [14] & \infty_{11} 26_{0} 28_{1} [18] & \infty_{12} 13_{0} 16_{1} [12] & \infty_{13} 0_{0} 7_{1} [11] \\
\infty_{14} 18_{0} 11_{1} [28] & \infty_{15} 29_{0} 26_{1} [21] & \infty_{16} 20_{0} 4_{1} [25] & \infty_{17} 21_{0} 20_{1} [10] & \infty_{18} 17_{0} 21_{1} [22] & \infty_{19} 9_{0} 0_{1} [27] \\
\infty_{20} 23_{0} 1_{1} [7] & \infty_{21} 8_{0} 3_{1} [16] & \infty_{22} 19_{0} 15_{1} [19] & \infty_{23} 11_{0} 9_{1} [6] &
\end{array}
\] \end{example}
| 3,657 | 67,737 |
en
|
train
|
0.100.21
|
\begin{example} For $n=27$:
\[
\begin{array}{@{}*{6}l@{}}
4_{0} 8_{0} 22_{0} [17] & 11_{0} 19_{0} 22_{1} [11] & 5_{1} 18_{1} 6_{0} [14] & 2_{1} 10_{1} 4_{1} [16] & \infty_0 24_{0} 16_{1} [0] & \infty_1 14_{0} 27_{1} [1] \\
\infty_2 25_{0} 14_{1} [3] & \infty_3 9_{0} 26_{1} [4] & \infty_4 27_{0} 8_{1} [2] & \infty_5 10_{0} 19_{1} [6] & \infty_6 26_{0} 20_{1} [7] & \infty_7 0_{0} 24_{1} [27] \\
\infty_8 16_{0} 21_{1} [21] & \infty_9 18_{0} 9_{1} [5] & \infty_{10} 20_{0} 17_{1} [12] & \infty_{11} 23_{0} 25_{1} [13] & \infty_{12} 12_{0} 13_{1} [22] & \infty_{13} 2_{0} 0_{1} [15] \\
\infty_{14} 3_{0} 28_{1} [8] & \infty_{15} 13_{0} 6_{1} [28] & \infty_{16} 1_{0} 15_{1} [25] & \infty_{17} 15_{0} 1_{1} [23] & \infty_{18} 17_{0} 23_{1} [18] & \infty_{19} 7_{0} 7_{1} [24] \\
\infty_{20} 28_{0} 3_{1} [20] & \infty_{21} 21_{0} 11_{1} [26] & \infty_{22} 5_{0} 12_{1} [9] &
\end{array}
\] \end{example}
\begin{example} For $n=28$:
\[
\begin{array}{@{}*{6}l@{}}
22_{0} 3_{0} 6_{0} [26] & 27_{0} 2_{0} 28_{0} [24] & 10_{1} 18_{1} 13_{1} [23] & 8_{1} 14_{1} 27_{1} [17] & \infty_0 12_{0} 25_{1} [1] & \infty_1 10_{0} 22_{1} [2] \\
\infty_2 16_{0} 2_{1} [3] & \infty_3 1_{0} 19_{1} [4] & \infty_4 5_{0} 5_{1} [5] & \infty_5 15_{0} 24_{1} [8] & \infty_6 24_{0} 29_{1} [13] & \infty_7 4_{0} 23_{1} [29] \\
\infty_8 25_{0} 12_{1} [9] & \infty_9 7_{0} 17_{1} [20] & \infty_{10} 14_{0} 6_{1} [14] & \infty_{11} 26_{0} 28_{1} [18] & \infty_{12} 13_{0} 16_{1} [12] & \infty_{13} 0_{0} 7_{1} [11] \\
\infty_{14} 18_{0} 11_{1} [28] & \infty_{15} 29_{0} 26_{1} [21] & \infty_{16} 20_{0} 4_{1} [25] & \infty_{17} 21_{0} 20_{1} [10] & \infty_{18} 17_{0} 21_{1} [22] & \infty_{19} 9_{0} 0_{1} [27] \\
\infty_{20} 23_{0} 1_{1} [7] & \infty_{21} 8_{0} 3_{1} [16] & \infty_{22} 19_{0} 15_{1} [19] & \infty_{23} 11_{0} 9_{1} [6] &
\end{array}
\] \end{example}
\begin{example} For $n=29$:
\[
\begin{array}{@{}*{6}l@{}}
12_{0} 17_{0} 13_{1} [29] & 9_{0} 10_{0} 0_{0} [13] & 7_{1} 23_{1} 25_{1} [7] & 27_{1} 30_{1} 2_{0} [16] & \infty_{0} 27_{0} 19_{1} [0] & \infty_{1} 20_{0} 2_{1} [1] \\
\infty_{2} 15_{0} 22_{1} [2] & \infty_{3} 26_{0} 5_{1} [3] & \infty_{4} 3_{0} 18_{1} [4] & \infty_{5} 28_{0} 15_{1} [5] & \infty_{6} 22_{0} 26_{1} [9] & \infty_{7} 21_{0} 29_{1} [11] \\
\infty_{8} 14_{0} 28_{1} [10] & \infty_{9} 18_{0} 8_{1} [17] & \infty_{10} 6_{0} 17_{1} [30] & \infty_{11} 16_{0} 14_{1} [14] & \infty_{12} 24_{0} 12_{1} [19] & \infty_{13} 29_{0} 20_{1} [21] \\
\infty_{14} 19_{0} 0_{1} [18] & \infty_{15} 8_{0} 24_{1} [12] & \infty_{16} 25_{0} 3_{1} [20] & \infty_{17} 4_{0} 4_{1} [22] & \infty_{18} 7_{0} 10_{1} [27] & \infty_{19} 30_{0} 1_{1} [26] \\
\infty_{20} 23_{0} 9_{1} [24] & \infty_{21} 11_{0} 16_{1} [28] & \infty_{22} 5_{0} 11_{1} [6] & \infty_{23} 1_
{0} 21_{1} [8] & \infty_{24} 13_{0} 6_{1} [15] &
\end{array}
\] \end{example}
\begin{example} For $n=31$:
\[
\begin{array}{@{}*{6}l@{}}
11_{0} 19_{0} 13_{0} [28] & 22_{1} 17_{1} 24_{1} [26] & 4_{0} 24_{0} 27_{0} [9] & 28_{1} 19_{1} 31_{1} [11] & \infty_{0} 1_{0} 5_{1} [0] & \infty_{1} 23_{0} 30_{1} [1] \\
\infty_{2} 25_{0} 26_{1} [2] & \infty_{3} 17_{0} 10_{1} [3] & \infty_{4} 31_{0} 8_{1} [4] & \infty_{5} 5_{0} 18_{1} [5] & \infty_{6} 6_{0} 20_{1} [6] & \infty_{7} 12_{0} 3_{1} [13] \\
\infty_{8} 28_{0} 13_{1} [31] & \infty_{9} 8_{0} 25_{1} [7] & \infty_{10} 14_{0} 11_{1} [16] & \infty_{11} 3_{0} 12_{1} [8] & \infty_{12} 20_{0} 23_{1} [17] & \infty_{13} 9_{0} 14_{1} [23] \\
\infty_{14} 26_{0} 21_{1} [12] & \infty_{15} 21_{0} 0_{1} [29] & \infty_{16} 16_{0} 32_{1} [15] & \infty_{17} 22_{0} 4_{1} [30] & \infty_{18} 15_{0} 7_{1} [14] & \infty_{19} 0_{0} 6_{1} [18] \\
\infty_{20} 10_{0} 9_{1} [32] & \infty_{21} 29_{0} 27_{1} [25] & \infty_{22} 32_{0} 1_{1} [24] & \infty_{23} 30_{0} 16_{1} [19] & \infty_{24} 18_{0} 29_{1} [22] & \infty_{25} 7_{0} 15_{1} [21] \\
\infty_{26} 2_{0} 2_{1} [20] &
\end{array}
\] \end{example}
\begin{example} For $n=32$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 3_0 11_0 [31] & 12_0 13_0 27_0 [3] & 23_1 24_1 29_1 [7] & 15_1 26_1 5_1 [27] & \infty_0 17_0 17_1 [9] & \infty_1 15_0 16_1 [12] \\
\infty_2 26_0 28_1 [26] & \infty_3 1_0 4_1 [1] & \infty_4 2_0 6_1 [4] & \infty_5 4_0 9_1 [5] & \infty_6 5_0 11_1 [6] & \infty_7 6_0 13_1 [11] \\
\infty_8 10_0 18_1 [22] & \infty_9 16_0 25_1 [13] & \infty_{10} 9_0 19_1 [30] & \infty_{11} 19_0 30_1 [25] & \infty_{12} 20_0 32_1 [2] & \infty_{13} 18_0 31_1 [15] \\
\infty_{14} 21_0 1_1 [32] & \infty_{15} 22_0 3_1 [24] & \infty_{16} 28_0 10_1 [19] & \infty_{17} 25_0 8_1 [10] & \infty_{18} 23_0 7_1 [18] & \infty_{19} 29_0 14_1 [29] \\
\infty_{20} 14_0 0_1 [23] & \infty_{21} 33_0 20_1 [21] & \infty_{22} 24_0 12_1 [33] & \infty_{23} 32_0 21_1 [16] & \infty_{24} 31_0 22_1 [28] & \infty_{25} 7_0 33_1 [14] \\
\infty_{26} 8_0 2_1 [20] & \infty_{27} 30_0 27_1 [8]
\end{array}
\]
\end{example}
| 3,464 | 67,737 |
en
|
train
|
0.100.22
|
\begin{example} For $n=31$:
\[
\begin{array}{@{}*{6}l@{}}
11_{0} 19_{0} 13_{0} [28] & 22_{1} 17_{1} 24_{1} [26] & 4_{0} 24_{0} 27_{0} [9] & 28_{1} 19_{1} 31_{1} [11] & \infty_{0} 1_{0} 5_{1} [0] & \infty_{1} 23_{0} 30_{1} [1] \\
\infty_{2} 25_{0} 26_{1} [2] & \infty_{3} 17_{0} 10_{1} [3] & \infty_{4} 31_{0} 8_{1} [4] & \infty_{5} 5_{0} 18_{1} [5] & \infty_{6} 6_{0} 20_{1} [6] & \infty_{7} 12_{0} 3_{1} [13] \\
\infty_{8} 28_{0} 13_{1} [31] & \infty_{9} 8_{0} 25_{1} [7] & \infty_{10} 14_{0} 11_{1} [16] & \infty_{11} 3_{0} 12_{1} [8] & \infty_{12} 20_{0} 23_{1} [17] & \infty_{13} 9_{0} 14_{1} [23] \\
\infty_{14} 26_{0} 21_{1} [12] & \infty_{15} 21_{0} 0_{1} [29] & \infty_{16} 16_{0} 32_{1} [15] & \infty_{17} 22_{0} 4_{1} [30] & \infty_{18} 15_{0} 7_{1} [14] & \infty_{19} 0_{0} 6_{1} [18] \\
\infty_{20} 10_{0} 9_{1} [32] & \infty_{21} 29_{0} 27_{1} [25] & \infty_{22} 32_{0} 1_{1} [24] & \infty_{23} 30_{0} 16_{1} [19] & \infty_{24} 18_{0} 29_{1} [22] & \infty_{25} 7_{0} 15_{1} [21] \\
\infty_{26} 2_{0} 2_{1} [20] &
\end{array}
\] \end{example}
\begin{example} For $n=32$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 3_0 11_0 [31] & 12_0 13_0 27_0 [3] & 23_1 24_1 29_1 [7] & 15_1 26_1 5_1 [27] & \infty_0 17_0 17_1 [9] & \infty_1 15_0 16_1 [12] \\
\infty_2 26_0 28_1 [26] & \infty_3 1_0 4_1 [1] & \infty_4 2_0 6_1 [4] & \infty_5 4_0 9_1 [5] & \infty_6 5_0 11_1 [6] & \infty_7 6_0 13_1 [11] \\
\infty_8 10_0 18_1 [22] & \infty_9 16_0 25_1 [13] & \infty_{10} 9_0 19_1 [30] & \infty_{11} 19_0 30_1 [25] & \infty_{12} 20_0 32_1 [2] & \infty_{13} 18_0 31_1 [15] \\
\infty_{14} 21_0 1_1 [32] & \infty_{15} 22_0 3_1 [24] & \infty_{16} 28_0 10_1 [19] & \infty_{17} 25_0 8_1 [10] & \infty_{18} 23_0 7_1 [18] & \infty_{19} 29_0 14_1 [29] \\
\infty_{20} 14_0 0_1 [23] & \infty_{21} 33_0 20_1 [21] & \infty_{22} 24_0 12_1 [33] & \infty_{23} 32_0 21_1 [16] & \infty_{24} 31_0 22_1 [28] & \infty_{25} 7_0 33_1 [14] \\
\infty_{26} 8_0 2_1 [20] & \infty_{27} 30_0 27_1 [8]
\end{array}
\]
\end{example}
\begin{example} For $n=33$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 11_0 32_0 [0] & 6_0 23_0 25_0 [ 8] & 16_1 17_1 21_1 [11] & 7_1 27_1 33_1 [32] & \infty_0 17_0 18_1 [22] & \infty_1 26_0 28_1 [14] \\
\infty_2 16_0 19_1 [26] & \infty_3 1_0 5_1 [ 1] & \infty_4 3_0 8_1 [ 3] & \infty_5 4_0 10_1 [ 4] & \infty_6 2_0 9_1 [13] & \infty_7 5_0 13_1 [21] \\
\infty_8 14_0 23_1 [10] & \infty_9 10_0 20_1 [15] & \infty_{10} 13_0 24_1 [34] & \infty_{11} 18_0 31_1 [ 5] & \infty_{12} 20_0 34_1 [ 9] & \infty_{13} 21_0 1_1 [24] \\
\infty_{14} 31_0 12_1 [17] & \infty_{15} 22_0 4_1 [33] & \infty_{16} 28_0 11_1 [ 2] & \infty_{17} 30_0 14_1 [ 6] & \infty_{18} 15_0 0_1 [19] & \infty_{19} 29_0 15_1 [23] \\
\infty_{20} 19_0 6_1 [25] & \infty_{21} 7_0 30_1 [31] & \infty_{22} 8_0 32_1 [20] & \infty_{23} 12_0 2_1 [ 7] & \infty_{24} 34_0 25_1 [28] & \infty_{25} 9_0 3_1 [12] \\
\infty_{26} 33_0 29_1 [18] & \infty_{27} 24_0 22_1 [29] & \infty_{28} 27_0 26_1 [30]
\end{array}
\]
\end{example}
\begin{example} For $n=34$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 1_0 11_0 [35] & 15_0 17_0 32_0 [1] & 1_1 23_1 26_1 [11] & 10_1 14_1 27_1 [25] & \infty_0 18_0 18_1 [20] & \infty_1 12_0 13_1 [12] \\
\infty_2 26_0 28_1 [27] & \infty_3 2_0 5_1 [2] & \infty_4 3_0 7_1 [3] & \infty_5 4_0 9_1 [5] & \infty_6 5_0 11_1 [9] & \infty_7 8_0 15_1 [17] \\
\infty_8 9_0 17_1 [14] & \infty_9 7_0 16_1 [24] & \infty_{10} 10_0 20_1 [16] & \infty_{11} 19_0 30_1 [32] & \infty_{12} 20_0 32_1 [28] & \infty_{13} 21_0 34_1 [7] \\
\infty_{14} 22_0 0_1 [15] & \infty_{15} 25_0 4_1 [19] & \infty_{16} 23_0 3_1 [6] & \infty_{17} 27_0 8_1 [22] & \infty_{18} 24_0 6_1 [23] &
\infty_{19} 29_0 12_1 [10] \\
\infty_{20} 13_0 33_1 [21] & \infty_{21} 14_0 35_1 [29] & \infty_{22} 16_0 2_1 [4] & \infty_{23} 35_0 22_1 [31] & \infty_{24} 6_0 31_1 [13] &
\infty_{25} 31_0 21_1 [26] \\
\infty_{26} 28_0 19_1 [30] & \infty_{27} 33_0 25_1 [8] & \infty_{28} 30_0 24_1 [33] & \infty_{29} 34_0 29_1 [34]
\end{array}
\]
\end{example}
\begin{example} For $n=39$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 11_0 18_0 [0] & 10_0 22_0 36_0 [17] & 2_1 27_1 33_1 [ 4] & 13_1 39_1 40_1 [13] & \infty_0 19_0 19_1 [ 2] & \infty_1 12_0 14_1 [30] \\
\infty_2 20_0 23_1 [18] & \infty_3 16_0 21_1 [28] & \infty_4 2_0 8_1 [35] & \infty_5 3_0 10_1 [20] & \infty_6 1_0 9_1 [31] & \infty_7 6_0 15_1 [40] \\
\infty_8 14_0 24_1 [15] & \infty_9 4_0 16_1 [ 6] & \infty_{10} 25_0 38_1 [10] & \infty_{11} 32_0 5_1 [11] & \infty_{12} 29_0 3_1 [25] & \infty_{13} 31_0 6_1 [29] \\
\infty_{14} 35_0 11_1 [22] & \infty_{15} 24_0 1_1 [26] & \infty_{16} 26_0 4_1 [21] & \infty_{17} 7_0 28_1 [23] & \infty_{18} 13_0 35_1 [ 7] & \infty_{19} 40_0 22_1 [37] \\
\infty_{20} 8_0 32_1 [14] & \infty_{21} 9_0 34_1 [36] & \infty_{22} 15_0 0_1 [19] & \infty_{23} 21_0 7_1 [27] & \infty_{24} 33_0 20_1 [16] & \infty_{25} 37_0 26_1 [32] \\
\infty_{26} 5_0 36_1 [ 9] & \infty_{27} 27_0 18_1 [38] & \infty_{28} 38_0 31_1 [34] & \infty_{29} 23_0 17_1 [33] & \infty_{30} 17_0 12_1 [ 8] & \infty_{31} 34_0 30_1 [24] \\
\infty_{32} 28_0 25_1 [39] & \infty_{33} 39_0 37_1 [ 1] & \infty_{34} 30_0 29_1 [ 3]
\end{array}
\]
\end{example}
| 3,792 | 67,737 |
en
|
train
|
0.100.23
|
\begin{example} For $n=39$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 11_0 18_0 [0] & 10_0 22_0 36_0 [17] & 2_1 27_1 33_1 [ 4] & 13_1 39_1 40_1 [13] & \infty_0 19_0 19_1 [ 2] & \infty_1 12_0 14_1 [30] \\
\infty_2 20_0 23_1 [18] & \infty_3 16_0 21_1 [28] & \infty_4 2_0 8_1 [35] & \infty_5 3_0 10_1 [20] & \infty_6 1_0 9_1 [31] & \infty_7 6_0 15_1 [40] \\
\infty_8 14_0 24_1 [15] & \infty_9 4_0 16_1 [ 6] & \infty_{10} 25_0 38_1 [10] & \infty_{11} 32_0 5_1 [11] & \infty_{12} 29_0 3_1 [25] & \infty_{13} 31_0 6_1 [29] \\
\infty_{14} 35_0 11_1 [22] & \infty_{15} 24_0 1_1 [26] & \infty_{16} 26_0 4_1 [21] & \infty_{17} 7_0 28_1 [23] & \infty_{18} 13_0 35_1 [ 7] & \infty_{19} 40_0 22_1 [37] \\
\infty_{20} 8_0 32_1 [14] & \infty_{21} 9_0 34_1 [36] & \infty_{22} 15_0 0_1 [19] & \infty_{23} 21_0 7_1 [27] & \infty_{24} 33_0 20_1 [16] & \infty_{25} 37_0 26_1 [32] \\
\infty_{26} 5_0 36_1 [ 9] & \infty_{27} 27_0 18_1 [38] & \infty_{28} 38_0 31_1 [34] & \infty_{29} 23_0 17_1 [33] & \infty_{30} 17_0 12_1 [ 8] & \infty_{31} 34_0 30_1 [24] \\
\infty_{32} 28_0 25_1 [39] & \infty_{33} 39_0 37_1 [ 1] & \infty_{34} 30_0 29_1 [ 3]
\end{array}
\]
\end{example}
\begin{example} For $n=44$:
\[
\begin{array}{@{}*{6}l@{}}
0_0 2_0 7_0 [44] & 21_0 24_0 33_0 [2] & 0_1 11_1 17_1 [6] & 10_1 14_1 19_1 [40] & \infty_0 32_0 32_1 [28] & \infty_1 26_0 27_1 [29] \\
\infty_2 18_0 20_1 [45] & \infty_3 1_0 4_1 [18] & \infty_4 3_0 7_1 [36] & \infty_5 4_0 9_1 [16] & \infty_6 6_0 12_1 [9] & \infty_7 8_0 15_1 [5] \\
\infty_8 10_0 18_1 [30] & \infty_9 34_0 43_1 [37] & \infty_{10} 35_0 45_1 [32] & \infty_{11} 13_0 24_1 [15] & \infty_{12} 29_0 41_1 [41] & \infty_{13} 36_0 3_1 [43] \\
\infty_{14} 38_0 6_1 [38] & \infty_{15} 39_0 8_1 [3] & \infty_{16} 31_0 1_1 [31] & \infty_{17} 17_0 34_1 [1] & \infty_{18} 19_0 37_1 [33] & \infty_{19} 20_0 39_1 [25] \\
\infty_{20} 22_0 42_1 [19] & \infty_{21} 23_0 44_1 [14] & \infty_{22} 14_0 36_1 [13] & \infty_{23} 5_0 28_1 [17] & \infty_{24} 9_0 33_1 [20] & \infty_{25} 15_0 40_1 [34] \\
\infty_{26} 45_0 25_1 [12] & \infty_{27} 40_0 21_1 [8] & \infty_{28} 41_0 23_1 [39] & \infty_{29} 44_0 31_1 [10] & \infty_{30} 12_0 2_1 [24] & \infty_{31} 25_0 16_1 [22] \\
\infty_{32} 43_0 35_1 [7] & \infty_{33} 37_0 30_1 [21] & \infty_{34} 11_0 5_1 [42] & \infty_{35} 27_0 22_1 [11] & \infty_{36} 42_0 38_1 [35] & \infty_{37} 16_0 13_1 [27] \\
\infty_{38} 28_0 26_1 [4] & \infty_{39} 30_0 29_1 [26]
\end{array}
\]
\end{example}
Now the intransitive starters and adders:
\begin{example} For $n= 7$:
\[
\begin{array}{@{}*{7}l@{}}
2_{2} 3_{2} 5_{2} [0] & 0_{0} 1_{0} 3_{0} [6] & 6_{1} 0_{1} 2_{1} [1] & 6_{0} 3_{1} 4_{2} [ 2] & 5_0 1_{1} 6_{2} [5] & 4_{0} 5_1 0_{2} [R] & 5_{0} 4_{1} 0_{2} [C] \\
2_{0} 4_{1} 1_{2} [ R] & 4_0 2_{1} 1_{2} [C] \\
\end{array}
\] \end{example}
\begin{example} For $n= 9$:
\[
\begin{array}{@{}*{7}l@{}}
0_{2} 3_{2} 5_{2} [0] & 6_{0} 4_{0} 8_{2} [5] & 2_{1} 0_{1} 4_{2} [4] & 1_{0} 0_{0} 7_{2} [3] & 4_1 3_{1} 1_{2} [6] & 8_{0} 3_0 7_{1} [7] & 5_{0} 6_{1} 1_{1} [2] \\
2_{0} 8_{1} 2_{2} [R] & 8_{0} 2_{1} 2_{2} [C] & 7_{0} 5_{1} 6_{2} [R] & 5_0 7_{1} 6_{2} [C] \\
\end{array}
\] \end{example}
\begin{example} For $n=11$:
\[
\begin{array}{@{}*{7}l@{}}
0_{2} 1_{2} 9_{2} [0] & 4_{0} 8_{0} 7_{0} [5] & 9_{1} 2_{1} 1_{1} [6] & 5_{0} 4_{1} 7_{2} [ 9] & 2_0 3_{1} 5_{2} [2] & 1_{0} 6_0 6_{2} [4] & 5_{1} 10_{1} 10_{2} [7] \\
9_{0} 0_{1} 4_{2} [10] & 10_{0} 8_{1} 3_{2} [1] & 0_{0} 7_{1} 8_{2} [R] & 7_0 0_{1} 8_{2} [C] & 3_{0} 6_1 2_{2} [R] & 6_0 3_{1} 2_{2} [C] \\
\end{array}
\] \end{example}
\end{appendices}
\end{document}
| 2,622 | 67,737 |
en
|
train
|
0.101.0
|
\begin{document}
\title{Rollercoaster Permutations and Partition Numbers}
\author{William Adamczak}
\address{Siena College, Loudonville, NY 12211}
\email{[email protected]}
\author{Jacob Boni}
\address{Siena College, Loudonville, NY 12211}
\email{[email protected]}
\subjclass[2000]{Primary 54C40, 14E20; Secondary 46E25, 20C20}
\date{\today}
\keywords{Combinatorics, Permutations}
\begin{abstract}
This paper explores the properties of partitions of roller coaster permutations. A roller coaster permutation is a permutation the alternates between increasing and decreasing a maximum number of times, while its subsequences also alternate between increasing and decreasing a maximum number of times simultaneously. The focus of this paper is on achieving an upper bound for the partition number of a roller coaster permutation of length $n$.
\end{abstract}
\maketitle
\section{Introduction}
Roller coaster permutations first show up in a work of Ahmed \& Snevily \cite{ahsn} where roller coaster permutations are described as a permutations that maximize the total switches from ascending to descending (or visa versa) for a permutation and all of its subpermutations simultaneously. More basically, this counts the greatest number of ups and downs or increases and decreases for the permutation and all possible subpermutations. Several of the properties of roller coaster permutations that were conjectured by Ahmed \& Snevily are proven in a paper of the first author \cite{adam} and are relied on heavily in developing an upper bound for the partition number of a roller coaster permutation.
These permutations are connected to pattern avoiding permutations as is seen in Mansour \cite{mans} in the context of avoiding the subpermutation 132. These are also strongly connected to forbidden subsequences and partitions of permutations is seen in Stankova \cite{stank}, where certain forbidden subsequences end up being roller coaster permutations, particularly $F(1,1)$ is a subset of $RC(n)$. Consequently, these permutations are related to stack sortable permutations as seen in Egge \& Mansour \cite{egma}, where the connection between forbidden subsequences and stack sortability is made.
Kezdy, Snevily \& Wang\cite{kesnwa} explored partitions of permutations into increasing and decreasing subsequences, where they took the approach of associating a graph to a permutation, they then translated the notion of partitions to the lack of existence of certain subgraphs. Our approach here relies rather on the underlying structure of these permutations, particularly the alternating structure, together with the relative positions of entries that are forced on roller coaster permutations.
| 747 | 8,754 |
en
|
train
|
0.101.1
|
\section{Background}
\begin{defn}
A permutation of length $n$ is an ordered rearrangement on the set $\{1,2,3 ... n\}$ for some $n$. The collection of all such permutations is denoted $S_n$.
\end{defn}
\begin{defn}
A Roller coaster permutation is a permutation that maximizes the number of changes from increasing to decreasing over itself, and all of it's subsequences, simultaneously. Here a subsequence of a permutation is an ordered subset of the original permutation \cite{ahsn}.
\end{defn}
The collection of all roller coaster permutations in $S_n$ is denoted $RC_n$. and have been explicitly found for small $n$ and are as follows:
\begin{flushleft}
RC(3) = \{132, 213, 231, 312\}\newline
RC(4) = \{2143, 2413, 3142, 3412\} \newline
RC(5) = \{24153, 25143, 31524, 32514, 34152, 35142, 41523, 42513\} \newline
RC(6) = \{326154, 351624, 426153, 451623\} \newline
RC(7) = \{3517264, 3527164, 3617254, 3627154, 4261735, 4271635, \newline
4361725, 4371625, 4517263, 4527163, 4617253, 4627153,\newline
5261734, 5271634, 5361724, 5371624\} \newline
RC(8) = \{43718265, 46281735, 53718264, 56281734\} \newline
RC(9) = \{471639285, 471936285, 472639185, 472936185, 481639275, 481936275, \newline 482639175, 482936175, 528174936, 528471936, 529174836, 529471836,\newline
538174926, 538471926, 539174826, 539471826, 571639284, 571936284, \newline
572639184, 572936184, 581639274, 581936274, 582639174, 582936174,\newline
628174935, 628471935, 629174835, 629471835, 638174925, 638471925, \newline
639174825, 639471825\}.\cite{ahsn}
\end{flushleft}
\begin{defn}
An alternating permutation is a permutation $\pi$ of such that $\pi_1 < \pi_2 > \pi_3 \ldots $ and a reverse alternating permutation is a permutation $\pi$ of such that $\pi_1 > \pi_2 < \pi_3 \ldots $.
\end{defn}
\begin{example}
The following is a graphical representation of the permutation \{4,3,7,1,8,2,6,5\}. This permutation is reverse alternating, as you can see that the first entry is greater than the second entry and the pattern defined above continues throughout the entire permutation.
\begin{center}
\begin{tikzpicture}
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v1) at (1,4) {4};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v2) at (2,3) {3};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v3) at (3,7) {7};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v4) at (4,1) {1};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v5) at (5,8) {8};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v6) at (6,2) {2};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v7) at (7,6) {6};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v8) at (8,5) {5};
\draw (v1) edge (v2);
\draw (v2) edge (v3);
\draw (v3) edge (v4);
\draw (v4) edge (v5);
\draw (v5) edge (v6);
\draw (v6) edge (v7);
\draw (v7) edge (v8);
\end{tikzpicture}
\end{center}
\end{example}
\begin{example}
The permutation \{5,6,2,8,1,7,3,4\}, pictured below, is an example of a forward alternating permutation. Sometimes forward alternating permutations are simply referred to as being alternating.
\begin{center}
\begin{tikzpicture}
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v1) at (1,5) {5};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v2) at (2,6) {6};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v3) at (3,2) {2};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v4) at (4,8) {8};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v5) at (5,1) {1};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v6) at (6,7) {7};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v7) at (7,3) {3};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v8) at (8,4) {4};
\draw (v1) edge (v2);
\draw (v2) edge (v3);
\draw (v3) edge (v4);
\draw (v4) edge (v5);
\draw (v5) edge (v6);
\draw (v6) edge (v7);
\draw (v7) edge (v8);
\end{tikzpicture}
\end{center}
\end{example}
\begin{defn}
The reverse of a permutation $\pi$ is the permutation with entries given by $(\pi_n, \pi_{n-1}, \ldots, \pi_1)$.
\end{defn}
\begin{defn}
The compliment of a permutation $\pi$ is $(n+1-\pi_1, n+1-\pi_2, \ldots, n+1-\pi_n)$.
\end{defn}
\begin{example}
An example of a permutation and it's compliment are \{3,6,2,7,1,5,4\} and \{5,2,6,1,7,3,4\}. These permutations follow the deffinition above, notice that the first element of each, 3 and 5 fit in the equation $5=7+1-3$. Both of these permutations have been graphically displayed below. The reverse of \{3,6,2,7,1,5,4\} is \{4,5,1,7,2,6,3\}. Notice that the reverse and complement of a permutation aren't necessarily equal.
\newline
\newline
\begin{tikzpicture}
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v1) at (1,3) {3};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v2) at (2,6) {6};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v3) at (3,2) {2};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v4) at (4,7) {7};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v5) at (5,1) {1};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v6) at (6,5) {5};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v7) at (7,4) {4};
\draw (v1) edge (v2);
\draw (v2) edge (v3);
\draw (v3) edge (v4);
\draw (v4) edge (v5);
\draw (v5) edge (v6);
\draw (v6) edge (v7);
\begin{scope}[xshift=7.5cm]
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v1) at (1,5) {5};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v2) at (2,2) {2};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v3) at (3,6) {6};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v4) at (4,1) {1};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v5) at (5,7) {7};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v6) at (6,3) {3};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v7) at (7,4) {4};
\draw (v1) edge (v2);
\draw (v2) edge (v3);
\draw (v3) edge (v4);
\draw (v4) edge (v5);
\draw (v5) edge (v6);
\draw (v6) edge (v7);
\end{scope}
\end{tikzpicture}
\end{example}
Below we give a collection of theorems regarding the structure of roller coaster permutations. We will use these heavily in arriving at an upper bound for the partition number.
\begin{thm}
Given $\pi \in RC_n$, the reverse and compliment of $\pi$ are also members of $RC_n$ \cite{ahsn}.
\end{thm}
\begin{thm}
Given $\pi \in RC_n$, we have that $\pi$ is either alternating or reverse alternating \cite{adam}.
\end{thm}
\begin{thm}
Given $\pi \in RC_n \ , \abs{\pi_1 - \pi_n} = 1$, \cite{adam}.
\end{thm}
\begin{thm}
For $\pi \in RC_n$ if $\pi$ is alternating then $\pi_i > \pi_1,\pi_n$ for even $i$. If $\pi$ is reverse alternating then $\pi_i > \pi_1,\pi_n$ for odd $i$ \cite{adam}.
\end{thm}
\begin{example}
Below is a graphical representation of the permutation \{5,3,7,1,8,2,6,4\}. As you can see, the end points are 5 and 4, which have a difference of 1 as stated in Theorem 2.8. Also in the drawing below, notice that some elements have been circled into different sets, these being 7,8 and 6 in the "top" set and 3,1 and 2 in the "bottom" set. Notice that the top set is entirely comprised of numbers greater than the end points and the bottom is comprised entirely of numbers less than the end points. The top set has elements that are in the odd indicies while the bottom set has elements that are in the even indecies, just as Theorem 2.9 states.
| 3,552 | 8,754 |
en
|
train
|
0.101.2
|
\end{example}
\begin{defn}
The reverse of a permutation $\pi$ is the permutation with entries given by $(\pi_n, \pi_{n-1}, \ldots, \pi_1)$.
\end{defn}
\begin{defn}
The compliment of a permutation $\pi$ is $(n+1-\pi_1, n+1-\pi_2, \ldots, n+1-\pi_n)$.
\end{defn}
\begin{example}
An example of a permutation and it's compliment are \{3,6,2,7,1,5,4\} and \{5,2,6,1,7,3,4\}. These permutations follow the deffinition above, notice that the first element of each, 3 and 5 fit in the equation $5=7+1-3$. Both of these permutations have been graphically displayed below. The reverse of \{3,6,2,7,1,5,4\} is \{4,5,1,7,2,6,3\}. Notice that the reverse and complement of a permutation aren't necessarily equal.
\newline
\newline
\begin{tikzpicture}
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v1) at (1,3) {3};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v2) at (2,6) {6};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v3) at (3,2) {2};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v4) at (4,7) {7};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v5) at (5,1) {1};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v6) at (6,5) {5};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v7) at (7,4) {4};
\draw (v1) edge (v2);
\draw (v2) edge (v3);
\draw (v3) edge (v4);
\draw (v4) edge (v5);
\draw (v5) edge (v6);
\draw (v6) edge (v7);
\begin{scope}[xshift=7.5cm]
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v1) at (1,5) {5};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v2) at (2,2) {2};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v3) at (3,6) {6};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v4) at (4,1) {1};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v5) at (5,7) {7};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v6) at (6,3) {3};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v7) at (7,4) {4};
\draw (v1) edge (v2);
\draw (v2) edge (v3);
\draw (v3) edge (v4);
\draw (v4) edge (v5);
\draw (v5) edge (v6);
\draw (v6) edge (v7);
\end{scope}
\end{tikzpicture}
\end{example}
Below we give a collection of theorems regarding the structure of roller coaster permutations. We will use these heavily in arriving at an upper bound for the partition number.
\begin{thm}
Given $\pi \in RC_n$, the reverse and compliment of $\pi$ are also members of $RC_n$ \cite{ahsn}.
\end{thm}
\begin{thm}
Given $\pi \in RC_n$, we have that $\pi$ is either alternating or reverse alternating \cite{adam}.
\end{thm}
\begin{thm}
Given $\pi \in RC_n \ , \abs{\pi_1 - \pi_n} = 1$, \cite{adam}.
\end{thm}
\begin{thm}
For $\pi \in RC_n$ if $\pi$ is alternating then $\pi_i > \pi_1,\pi_n$ for even $i$. If $\pi$ is reverse alternating then $\pi_i > \pi_1,\pi_n$ for odd $i$ \cite{adam}.
\end{thm}
\begin{example}
Below is a graphical representation of the permutation \{5,3,7,1,8,2,6,4\}. As you can see, the end points are 5 and 4, which have a difference of 1 as stated in Theorem 2.8. Also in the drawing below, notice that some elements have been circled into different sets, these being 7,8 and 6 in the "top" set and 3,1 and 2 in the "bottom" set. Notice that the top set is entirely comprised of numbers greater than the end points and the bottom is comprised entirely of numbers less than the end points. The top set has elements that are in the odd indicies while the bottom set has elements that are in the even indecies, just as Theorem 2.9 states.
\begin{center}
\begin{tikzpicture}
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v1) at (1,5) {5};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v2) at (2,3) {3};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v3) at (3,7) {7};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v4) at (4,1) {1};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v5) at (5,8) {8};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v6) at (6,2) {2};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v7) at (7,6) {6};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v8) at (8,4) {4};
\draw (v1) edge (v2);
\draw (v2) edge (v3);
\draw (v3) edge (v4);
\draw (v4) edge (v5);
\draw (v5) edge (v6);
\draw (v6) edge (v7);
\draw (v7) edge (v8);
\draw (0,5) edge (9,5);
\draw (0,4) edge (9,4);
\draw plot[smooth cycle, tension=.7] coordinates {(2,7) (5,9) (8,6) (7,5) (5,7) (3,6)};
\draw plot[smooth cycle, tension=.7] coordinates {(4,0) (7,2) (6,3) (4,2) (2,4) (1,3)};
\end{tikzpicture}
\end{center}
\end{example}
\begin{defn}
A subsequence of a permutation is said to be monotonic, if it is strictly increasing or strictly decreasing.
\end{defn}
Monotonic subsequences are sometimes called runs. In the permutation \{5,8,2,6,3,9,1,7,4\} there are a few runs. The run (589) is an increasing, while (974) is a decreasing run. The longest run in this permutation is (8631).
\begin{defn}
A partition of a permutation is the set of disjoint monotonic subsequences of that permutation.
\end{defn}
\begin{defn}
The partition number of a permutation, denoted $P(\pi)$, is the least number of partitions that permutation $\pi$ can be broken into.
\end{defn}
\begin{example}
Here you can see a graphical representation of the permutation \{3,2,6,1,5,4\}. The oval distinguish the runs in the partition. Notice that there are two ovals each with three numbers in them. This shows that the runs in this permutation are \{3,2,1\} and \{6,5,4\}. It also shows that there are two runs, which means that this permutation has a partition number of 2, or in other words, $P(326154)=2$.
\begin{center}
\begin{tikzpicture}
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v1) at (1,3) {3};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v2) at (2,2) {2};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v3) at (3,6) {6};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v4) at (4,1) {1};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v5) at (5,5) {5};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v6) at (6,4) {4};
\draw (v1) edge (v2);
\draw (v2) edge (v3);
\draw (v3) edge (v4);
\draw (v4) edge (v5);
\draw (v5) edge (v6);
\draw plot[smooth cycle, tension=.7] coordinates {(3,7) (2,6) (6,3) (7,4)};
\draw plot[smooth cycle, tension=.7] coordinates {(1,4) (0,3) (4,0) (5,1)};
\end{tikzpicture}
\end{center}
\end{example}
\begin{example}
Here is another partitioned permutation. This time the permutation is \{4,7,1,6,3,9,2,8,5\}.
\begin{center}
\begin{tikzpicture}
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v1) at (1,4) {4};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v2) at (2,7) {7};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v3) at (3,1) {1};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v4) at (4,6) {6};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v5) at (5,3) {3};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v6) at (6,9) {9};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v7) at (7,2) {2};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v8) at (8,8) {8};
\node [draw,outer sep=0,inner sep=1,minimum size=10] (v9) at (9,5) {5};
\draw (v1) edge (v2);
\draw (v2) edge (v3);
\draw (v3) edge (v4);
\draw (v4) edge (v5);
\draw (v5) edge (v6);
\draw (v6) edge (v7);
\draw (v7) edge (v8);
\draw (v8) edge (v9);
\draw plot[smooth cycle, tension=.7] coordinates {(6,10) (9,8) (10,5) (9,4) (7,7) (5,9)};
\draw plot[smooth cycle, tension=.7] coordinates {(1,5) (4,1) (3,0) (0,4)};
\draw plot[smooth cycle, tension=.7] coordinates {(4,7) (1,7) (6,1) (8,2)};
\end{tikzpicture}
\end{center}
\end{example}
\begin{defn}
$P(n)$ is the number of partitions that any permutation $\pi$ can be broken into where $\pi \in RC_n$.
$P_{max}(n)$ is the upper bound on $P(n)$.
\end{defn}
| 3,546 | 8,754 |
en
|
train
|
0.101.3
|
\section{Results}
\begin{thm}
For $\pi \in RC_n$ the partition number $p_{max}(\pi)$ is bounded above by $\floor{\frac{\ceil{\frac{n-2}{2}}}{2}} +2$.
\end{thm}
\begin{proof}
Without loss of generality we may assume that $\pi \in RC_n$ and $\pi$ is reverse alternating, i.e. $\pi$ starts with a descent, otherwise we could take the compliment of $\pi$ which is also in $RC_n$, since complimenting exchanges alternating for reverse alternating and we may then use the same argument that follows and then take the compliment again.
\begin{itemize}
\item Excluding the endpoints, there will be $\ceil{\frac{n-2}{2}}$ positions below the endpoints and $\floor{\frac{n-2}{2}}$ positions above the endpoints. Those positions below the endpoints are at even indices and the positions above are at odd indices.
\item Partition the even indices into contiguous increasing runs and do the same with odd indices. The number of runs made from even indices will be $\floor{\frac{\ceil{\frac{n-2}{2}}}{2}}+1$
\item Note that when partitioning a forward or reverse alternating partition into contiguous increasing runs, the $k^{th}$ run will have an earliest start at index $2k-2$ for $k>1$ and the latest finish for this run will be at index $2k$. The $k^{th}$ index from the from the bottom partitions comes before the $k^{th}$ partition of the top partitions due to $\pi$ being reverse alternating. So the latest finish for the $k-1^{st}$ run is, at worst, equal to the latest start for the $k^{th}$ run, thus the $k+1^{st}$ segment from the top starts after the $k^{th}$ run from the the bottom.
\item So the first run on the top pairs with the start point and then the $k^{th}$ run on the bottom pairs with the $k+1^{st}$ run on the top. If the number of runs on the bottom is greater than the the number of runs on the top then the second to last run on the bottom pairs with the end point and we have an extra $+1$ in the partition number. Otherwise the last run on the bottom will pair with the end point. Thereby establishing the claim.
\end{itemize}
\end{proof}
We found exact numbers for $P_{max}(n)$ for $n < 15$ experimentally using code developed in the Sage computer algebra system.
These values can be found in the table below.
\newline
\newline
\begin{center}
\begin{tabular}{c|c|c}
$n$ & $P_{max(n)}$ & $\floor{\frac{\ceil{\frac{n-2}{2}}}{2}} +2$ \\ \hline
3 & 2 & 2 \\
4 & 2 & 2 \\
5 & 2 & 3 \\
6 & 3 & 3 \\
7 & 3 & 3 \\
8 & 3 & 3 \\
9 & 4 & 4 \\
10 & 4 & 4 \\
11 & 4 & 4 \\
12 & 4 & 4 \\
13 & 5 & 5 \\
14 & 5 & 5 \\
\end{tabular}
\end{center}
Note that the bound found in the theorem above is nearly sharp. For $n<15$ the upper bound we found is very close to the actual values of $P_{max}(n)$. The only deviation is our upper bound at $n=5$ was 1 greater than the actual value.
\end{document}
| 909 | 8,754 |
en
|
train
|
0.102.0
|
\begin{document}
\author[M.~Bartoletti]{Massimo Bartoletti\lmcsorcid{0000-0003-3796-9774}}[a]
\author[M. Murgia]{Maurizio Murgia\lmcsorcid{0000-0001-7613-621X}}[b]
\author[R. Zunino]{Roberto Zunino\lmcsorcid{0000-0002-9630-429X}}[c]
\address{University of Cagliari, Cagliari, Italy}
\email{[email protected]}
\address{Gran Sasso Science Institute, L'Aquila, Italy}
\email{[email protected]}
\address{Universit\`a degli Studi di Trento, Trento, Italy}
\email{[email protected]}
\title[Probabilistic bisimulations for PCTL]{Sound approximate and asymptotic \texorpdfstring{\\}{} probabilistic bisimulations for PCTL}
\maketitle
\begin{abstract}
We tackle the problem of establishing the soundness of approximate
bisimilarity with respect to PCTL and its relaxed semantics.
To this purpose, we consider a notion of bisimilarity inspired by
the one introduced by Desharnais, Laviolette, and Tracol, and
parametric with respect to an approximation error $\delta$, and to
the depth $n$ of the observation along traces.
Essentially, our soundness theorem establishes that, when a state
$q$ satisfies a given formula up-to error $\delta$ and steps
$n$, and $q$ is bisimilar to $qi$ up-to error
$\deltai$ and enough steps, we prove that $qi$ also satisfies
the formula up-to a suitable error $\deltaii$ and steps $n$.
The new error $\deltaii$ is computed from $\delta,\deltai$
and the formula, and only depends linearly on $n$.
We provide a detailed overview of our soundness proof.
We extend our bisimilarity notion to families of states, thus
obtaining an asymptotic equivalence on such families.
We then consider an asymptotic satisfaction relation for PCTL
formulae, and prove that asymptotically equivalent families of
states asymptotically satisfy the same formulae.
\end{abstract}
\section{Introduction}
The behaviour of many real-world systems can be formally modelled as probabilistic processes, e.g.\@\xspace as discrete-time Markov chains.
Specifying and verifying properties on these systems requires probabilistic versions of temporal logics, such as PCTL~\cite{HanssonJonsson94}.
PCTL allows to express probability bounds using the formula $\logPr{\geq \pi}{\psi}$, which is satisfied by those states starting from which the path formula $\psi$ holds with probability $\geq \pi$.
A well-known issue is that real-world systems can have tiny deviations from their mathematical models, while logical properties, such as those written in PCTL, impose sharp constraints on the behaviour.
To address this issue, one can use a \emph{relaxed} semantics for PCTL, as in~\cite{DInnocenzo12hscc}.
There, the semantics of formulae is parameterised over the error
$\delta\geq 0$ one is willing to tolerate.
While in the standard semantics of $\logPr{\geq \pi}{\psi}$
the bound $\geq\pi$ is \emph{exact},
in relaxed PCTL this bound is weakened to $\geq\pi-\delta$.
So, the relaxed semantics generalises the standard PCTL semantics of~\cite{HanssonJonsson94}, which can be obtained by choosing $\delta=0$.
Instead, choosing an error $\delta > 0$ effectively provides a way to measure ``how much'' a state satisfies a given formula: some states might require only a very small error, while others a much larger one.
When dealing with temporal logics such as PCTL, one often wants to study some notion of state equivalence which preserves the semantics of formulae: that is, when two states are equivalent, they satisfy the same formulae.
For instance, probabilistic bisimilarities like those in~\cite{Desharnais10iandc,Desharnais02iandc,Larsen91iandc} preserve the semantics of formulae for PCTL and other temporal logics.
Although \emph{strict} probabilistic bisimilarity preserves
the semantics of relaxed PCTL,
it is not \emph{robust} against small deviations in
the probability of transitions in Markov chains~\cite{Giacalone90ifip2}.
A possible approach to deal with this issue is to also
relax the notion of probabilistic bisimilarity,
by making it parametric with respect to an error $\delta$~\cite{DInnocenzo12hscc}.
Relaxing bisimilarity in this way poses a choice regarding
which properties of the strict probabilistic bisimilarity are to be kept.
In particular, transitivity is enjoyed by the strict probabilistic bisimilarity,
but it is \emph{not} desirable for the relaxed notion.
Indeed, we could have three states $q,qi$ and $qii$ where the behaviour of $q$ and $qi$ is similar enough (within the error $\delta$), the behaviour of $qi$ and $qii$ is also similar enough (within $\delta$), but the distance between $q$ and $qii$ is larger than the allowed error $\delta$.
At best, we can have a sort of ``triangular inequality'', where $q$ and $qii$ can still be related but only with a larger error $2\cdot\delta$.
\begin{figure}
\caption{A Markov chain modelling repeated tosses of a fair coin.}
\end{figure}
Bisimilarity is usually defined by coinduction, essentially requiring that the relation is preserved along an arbitrarily long sequence of moves.
Still, in some settings, observing the behaviour over a very long run
is undesirable.
For instance, consider the PCTL formula
$\phi = \logPr{\geq 0.5}{{\sf true}\ {\sf U}^{\leq n}\ {\sf a}}$,
which is satisfied by those states from which, with probability $\geq 0.5$,
${\sf a}$ is satisfied within $n$ steps.
In this case, a behavioural equivalence relation
that preserves the semantics of $\phi$ can neglect the long-run behaviour
after $n$ steps.
More generally, if all the until operators are \emph{bounded},
as in $\phi_1 {\sf U}^{\leq k} \phi_2$,
then each formula has an upper bound of steps $n$ after which
a behavioural equivalence relation can ignore what happens next.
Observing the behaviour after this upper bound is unnecessarily strict,
and indeed in some settings it is customary to neglect
what happens in the very long run.
For instance, a real-world player repeatedly tossing a coin is usually
considered equivalent to a Markov chain with two states
and four transitions with probability $\nicefrac{1}{2}$
(see~\autoref{fig:fair-coin}),
even if in the long run the real-world system will diverge from the ideal one
(e.g.\@\xspace, when the player dies).
Another setting where observing the long-term behaviour is notoriously
undesirable is that of cryptography.
When studying the security of systems modelling cryptographic protocols,
two states are commonly considered equivalent when their behaviour is similar
(up to a small error $\delta$) in the short run,
even when in the very long run they diverge.
For instance, a state $q$ could represent an ideal system where no attacks can be performed by construction, while another state $qi$ could represent a real system where an adversary can try to disrupt the cryptographic protocol.
In such a scenario, if the protocol is secure, we would like to have $q$ and $qi$ equivalent, since the behaviour of the real system is close to the one of the ideal system.
Note that in the real system an adversary can repeatedly try to guess the secret cryptographic keys, and break security in the very long run, with very high probability.
Accordingly, standard security definitions require that the behaviour of the ideal and real system are within a small error, but only for a \emph{bounded} number of steps, after which their behaviour could diverge.
\paragraph{Contributions}
To overcome the above mentioned issues,
in this work we introduce a bounded, approximate notion of bisimilarity
$\crysim{n}{\delta}$, that only observes the first $n$ steps,
and allows for an error $\delta$.
Unlike standard bisimilarity, our relation is naturally defined by \emph{induction}
on $n$.
We call this looser variant of bisimilarity an \emph{up-to-$n,\delta$} bisimilarity.
We showcase up-to-$n,\delta$ bisimilarity on a running example
(Examples~\ref{ex:pctl:padlock}, \ref{ex:sim:padlock}, \ref{ex:results:padlock}, and~\ref{ex:asymptotic:padlock}),
comparing an ideal combination padlock against a real one which can be opened by
an adversary guessing its combination.
We show that the two systems are bisimilar up-to-$n,\delta$,
while they are not bisimilar according to the standard coinductive notion.
We then discuss how the two systems satisfy a
basic security property expressed in PCTL, with suitable errors.
To make our theory amenable to reason about infinite-state systems,
such as those usually found when modelling cryptographic protocols,
all our results apply to Markov chains with countably many states.
In this respect, our work departs from most literature on
probabilistic bisimulations~\cite{DInnocenzo12hscc,Song13lmcs}
and bisimilarity distances~\cite{Breugel17siglog,TangB17concur,TangB18cav,TangB16concur,Fu12icalp,ChenBW12fossacs,BreugelSW08lmcs},
which usually assume \emph{finite}-state Markov chains,
as they focus on computing the distances.
In~\autoref{ex:sim:pingpong} we exploit infinite-state Markov chains to
compare a biased random bit generator with an ideal one.
Our first main contribution is a soundness theorem establishing that,
when a state $q$ satisfies a PCTL formula $\phi$ (up to a
given error), any bisimilar state $qi \crysim{}{} q$ must
also satisfy $\phi$, at the cost of a slight increase of the error.
More precisely, if $\phi$ only involves until operators
bounded by $\leqn$, state $q$ satisfies $\phi$ up to some
error, and bisimilarity holds for enough steps and error $\delta$,
then $qi$ satisfies $\phi$ with an \emph{additional}
asymptotic error $O(n\cdot\delta)$.
This asymptotic behaviour is compatible with the usual assumptions of computational security in cryptography.
There, models of security protocols include a security parameter $\eta$, which affects the length of the cryptographic keys and the running time of the protocol:
more precisely, a protocol is assumed to run for $n(\eta)$ steps, which is polynomially bounded w.r.t.\@\xspace $\eta$.
As already mentioned above, cryptographic notions of security do not
observe the behaviour of the systems after this bound $n(\eta)$,
since in the long run an adversary can surely guess the secret keys by
brute force.
Coherently, a protocol is considered to be secure if (roughly) its
actual behaviour is \emph{approximately} equivalent to the ideal one
for $n(\eta)$ steps and up to an error $\delta(\eta)$, which has
to be a negligible function, asymptotically approaching zero faster
than any rational function.
Under these bounds on $n$ and $\delta$, the asymptotic error $O(n\cdot\delta)$ in our soundness theorem is negligible in $\eta$.
Consequently, if two states $q$ and $qi$ represent the ideal and actual behaviour, respectively, and they are bisimilar up to a negligible error, they will satisfy the same PCTL formulae with a negligible error.
We formalise this reasoning by providing a notion of \emph{asymptotic
equivalence}.
We start by considering families of states $\Xi(\eta)$, intuitively
representing the behaviour of a system depending on a security
parameter $\eta$.
Our asymptotic equivalence $\Xi_1 \equiv \Xi_2$ holds whenever
the behaviour of the two families is $n,\delta$-bisimilar within
a negligible error whenever we only perform a polynomial number of
steps.
We further introduce an \emph{asymptotic satisfaction relation}
$\Xi \models \phi$ which holds whenever the state $\Xi(\eta)$
satisfies $\phi$ under similar assumptions on the number of steps
and the allowed error.
Our second main result is the soundness of the asymptotic equivalence
with respect to asymptotic satisfaction.
Asymptotically equivalent families asymptotically satisfy the same
PCTL formulae.
We provide a detailed overview of the proof of our soundness theorem
for $n,\delta$-bisimilarity in~\autoref{sec:result},
deferring the gory technicalities to~\autoref{sec:proofs}.
The proof of asymptotic soundness, which exploits the
soundness theorem for $n,\delta$-bisimilarity, is given
in~\autoref{sec:asymptotic}.
| 3,380 | 39,865 |
en
|
train
|
0.102.1
|
\section{Related work}
There is a well-established line of research on
establishing soundness and completeness of probabilistic bisimulations
against various kinds of probabilistic logics
\cite{Desharnais10iandc,FurberMM19lmcs,Hermanns11iandc,Larsen91iandc,Mio17fuin,Mio18lics}.
The work closest to ours is that of D’Innocenzo, Abate and
Katoen~\cite{DInnocenzo12hscc}, which addresses the model checking
problem on a relaxed PCTL differing from ours in a few aspects.
First, their syntax allows for an individual bound
on the number of steps $k$
for each until operator ${\sf U}^{\leq k}$, while we assume all such
bounds are equal
and we make the semantics of PCTL parametrized w.r.t.\@\xspace the number of steps to be considered in until.
This approach allows us to simplify the statement of the soundness theorem and the definition of asymptotic satisfaction relation,
since the bound is not fixed by the formula, but it is a parameter of the semantics. Dealing with the case where each until in a formula could have its bound seems possible, at the cost of increasing the level of technicalities.
Second, their main result shows that bisimilar states
up-to a given error $\epsilon$
satisfy the same formulae $\psi$, provided that $\psi$ ranges
over the so-called $\epsilon$-robust formulae. Instead, our soundness
result applies to \emph{all} PCTL formulae, and ensures that when
moving from a state satisfying $\phi$ to a bisimilar one, $\phi$
is still satisfied, but at the cost of slightly increasing the error.
Third, their relaxed semantics differs from ours. In ours, we relax
all the probability bounds by the same amount $\delta$. Instead, the
relaxation in~\cite{DInnocenzo12hscc} affects the bounds by a
different amount which depends on the error~$\epsilon$, the until
bound $k$, and the underlying DTMC.
Desharnais, Laviolette and Tracol~\cite{Desharnais08qest} use a
coinductive approximate probabilistic bisimilarity, up-to an error
$\delta$.
Using such coinductive bisimilarity, \cite{Desharnais08qest}
establishes the soundness and completeness with respect to a
Larsen-Skou logic~\cite{Larsen91iandc} (instead of PCTL).
In~\cite{Desharnais08qest}, a bounded, up-to $n,\delta$ version
of bisimilarity is only briefly used to derive a decision algorithm
for coinductive bisimilarity under the assumption that the state space
is finite.
In our work, instead, the bounded up-to $n,\delta$ bisimilarity
is the main focus of study.
In particular, our soundness result only assumes $n,\delta$
bisimilarity, which is strictly weaker than coinductive bisimilarity.
Another minor difference is that~\cite{Desharnais08qest} considers a
labelled Markov process, i.e.\@\xspace the probabilistic variant of a labelled
transition system, while we instead focus on DTMCs having labels on
states.
Bian and Abate~\cite{Bian17fossacs} study bisimulation and trace
equivalence up-to an error $\epsilon$, and show that
$\epsilon$-bisimilar states are also $\epsilon’$-trace equivalent for
a suitable $\epsilon’$ which depends on~$\epsilon$.
Furthermore, they
show that $\epsilon$-trace equivalent states satisfy the same formulae
in a bounded LTL, up-to a certain error. In our work, we focus
instead on the branching logic PCTL.
A related research line is that on \emph{bisimulation metrics}
\cite{Breugel17siglog,BreugelHMW05icalp,BreugelW05tcs}.
Some of these metrics, like our up-to bisimilarity, take approximations into
account~\cite{Desharnais99concur,Castiglioni16qapl}.
Similarly to our
bisimilarity, bisimulation metrics allow to establish two states
equivalent up-to a certain error (but usually do not take into account
the bound on the number of steps). Interestingly, Castiglioni, Gebler
and Tini~\cite{Castiglioni16qapl} introduce a notion of distance
between Larsen-Skou formulae, and prove that the bisimulation distance
between two processes corresponds to the distance between their
mimicking formulae.
De Alfaro, Majumdar, Raman and Stoelinga~\cite{deAlfaroMRS08}
elegantly characterise bisimulation metrics with a quantitative
$\mu$-calculus.
Such logic allows to specify interesting properties such as maximal reachability and safety probability, and the maximal probability of satisfying a general
$\omega$-regular specification, but not full PCTL.
Mio~\cite{Mio14fossacs} characterises a bisimulation metric based on total variability
with a more general quantitative $\mu$-calculus,
dubbed {\L}ukasiewicz
$\mu$-calculus, able to encode PCTL.
Both \cite{deAlfaroMRS08} and~\cite{Mio14fossacs} do not take
the number of steps into account,
therefore their applicability to the analysis of security protocols
is yet to be investigated.
Metrics with discount~\cite{Desharnais04tcs,deAlfaro03icalp,Bacci21lmcs,DengCPP06entcs,BreugelSW08lmcs}
are sometimes used to relate the behaviour of probabilistic processes,
weighing less those events that happen in the far future compared to those
happening in the first steps.
Often, in these metrics each step causes the probability of the next events to be multiplied by a constant factor $c < 1$, in order to diminish their importance. Note that this discount makes it so that after $\eta$ steps, this diminishing factor becomes $c^\eta$, which is a negligible function of $\eta$. As discussed before, in cryptographic security one needs to consider as important those events happening within polynomially many steps, while neglecting the ones after such a polynomial threshold. Using an exponential discount factor $c^\eta$ after only $\eta$ steps goes against this principle, since it would cause a secure system to be at a negligible distance from an insecure one which can be violated after just $\eta$ steps.
For this reason, instead of using a metric with discount, in this paper we resort to a bisimilarity that is parametrized over the number of steps $n$ and error $\delta$, allowing us to obtain a notion which distinguishes between the mentioned secure and insecure systems.
Several works develop algorithms to decide probabilistic bisimilarity,
and to compute metrics
\cite{BreugelW14birthday,ChenBW12fossacs,Fu12icalp,TangB16concur,TangB17concur,TangB18cav}.
To this purpose, they restrict to finite-state systems,
like e.g.\@\xspace probabilistic automata.
Our results, instead, apply also to infinite-state systems.
In \cite{ZuninoD05} a calculus with cryptographic primitives is
introduced, together with a semantics where attackers have a
probability $\pi(\eta)$ of guessing encryption keys.
It is shown that, assuming that $\pi(\eta)$ is negligible and that attackers
run in polynomial time, some security properties (e.g.\@\xspace secrecy,
authentication) are equivalent to the analogous properties with
standard Dolev-Yao assumptions (that is, attackers never guess keys
but are not restricted to polynomial time).
This result can be seen as a special case of our asymptotic soundness
theorem.
The interesting work \cite{LagoG22} proposes a behavioural notion of
indistinguishability between session typed probabilistic
$\pi$-calculus processes, with the aim of providing a formal system
for proving security of real cryptographic protocols by comparison
with ideal ones. The type system, which is based on bounded linear
logic \cite{GirardSS92,LagoG16}, guarantees that processes terminate
in polynomial time.
This differs from our approach, where polynomiality appears directly
in the equivalence definition (\autoref{def:crysim}).
Moreover, the calculus of
\cite{LagoG22} is quite restrictive: for instance, it is not possible to
specify adversaries that access an oracle a polynomial number of times.
By contrast, our abstract model is general enough to represent such
adversaries.
\paragraph{Comparison with~\cite{BMZ22coordination}}
This paper extends the work~\cite{BMZ22coordination} in two
directions.
First, the current paper includes the proofs of all statements,
which were not present in~\cite{BMZ22coordination}.
Second, in~\cite{BMZ22coordination} we hinted at the possible
application of soundness to the asymptotic behaviour of systems which
depend on a parameter $\eta$.
Here, we properly develop and formalise that intuition
in~\autoref{sec:asymptotic}, providing a new asymptotic soundness result.
| 2,313 | 39,865 |
en
|
train
|
0.102.2
|
\section{The probabilistic temporal logic PCTL}
Assume a set $\mathcal{L}$ of labels, ranged over by $l$,
and let $\delta,\pi$ range over non-negative reals.
A \emph{discrete-time Markov chain} (DTMC) is a standard model of probabilistic
systems. Throughout this paper, we consider a DTMC having a countable,
possibly infinite, set of states $q$, each carrying a subset
of labels $\ell(q) \subseteq \mathcal{L}$.
\begin{defi}[Discrete-Time Markov Chain]
\ellel{def:pctl:dtmc}
A (labelled) DTMC is a triple $(\mathcal{Q}, \Pr, \ell)$ where:
\begin{itemize}
\item $\mathcal{Q}$ is a countable set of states;
\item $\Pr : \mathcal{Q}^2 \to [0,1]$ is a function, named transition
probability function;
\item $\ell : \mathcal{Q} \to \mathcal{P}(\mathcal{L})$ is a labelling function
\end{itemize}
Given $q \in \mathcal{Q}$ and $Q \subseteq \mathcal{Q}$,
we write $\tsPr{q}{Q}$ for
$\sum_{qi\inQ} \tsPr{q}{qi}$
and we require that $\tsPr{q}{\mathcal{Q}}=1$ for all $q\in\mathcal{Q}$.
\end{defi}
A \emph{trace} is an infinite sequence of states
$t = q_0q_1\cdots$,
where we write $t(i)$ for $q_i$,
i.e.\@\xspace the $i$-th element of $t$.
A \emph{trace fragment} is a finite, non-empty sequence of
states $\tilde{t} = q_0 \cdots q_{n-1}$,
where $\card{\tilde{t}}= n\geq 1$ is its length.
Given a trace fragment $\tilde{t}$ and a state $q$, we write
$\tilde{t}q^\omega$ for the trace
$\tilde{t}qqq\cdots$.
It is well-known that, given an initial state $q_0$, the DTMC
induces a $\sigma$-algebra of measurable sets of traces $T$
starting from $q_0$, i.e.\@\xspace~the $\sigma$-algebra generated by
cylinder sets~\cite{BaierKatoen08}.
More in detail, given a trace fragment
$\tilde{t} = q_0 \cdots q_{n-1}$, its \emph{cylinder set}
\[
\cyl{\tilde{t}}
\; = \;
\setcomp{t}{\text{$\tilde{t}$ is a prefix of $t$}}
\]
is given probability:
\[
\Pr(\cyl{\tilde{t}})
\; = \;
\prod_{i=0}^{n-2} \tsPr{q_i}{q_{i+1}}
\]
As usual, if $n=1$ the product is empty and evaluates to $1$.
Closing the family of cylinder sets under countable unions and complement
we obtain the family of measurable sets.
The probability measure on cylinder sets then uniquely extends to all the
measurable sets.
Given a set of trace fragments $\tilde{T}$, all starting from the
same state $q_0$ and having the same length, we let
\(
\Pr(\tilde{T}) =
\Pr(\bigcup_{\tilde{t}\in\tilde{T}}
\cyl{\tilde{t}})
= \sum_{\tilde{t}\in\tilde{T}} \Pr(\cyl{\tilde{t}})
\).
Note that using same-length trace fragments ensures that their
cylinder sets are disjoint, hence the second equality holds.
Below, we define PCTL formulae. Our syntax is mostly standard,
except for the \emph{until} operator.
There, for the sake of simplicity, we do not bound the number of steps
in the syntax $\phi_1\ {\sf U}\ \phi_2$, but we do so in the
semantics.
Concretely, this amounts to imposing the same bound to
\emph{all} the occurrences of ${\sf U}$ in the formula.
Such bound is then provided as a parameter to the semantics.
\begin{defi}[PCTL Syntax]
The syntax of PCTL is given by the following grammar, defining
\emph{state formulae} $\phi$ and \emph{path formulae} $\psi$:
\begin{align*}
\phi
& ::=
l
\mid {\sf true}
\mid \lnot \phi
\mid \phi \land \phi
\mid \logPr{\rhd \pi}{\psi}
\qquad \mbox{ where } \rhd \in \setenum{>,\geq}
\\
\psi
& ::=
{\sf X}\ \phi
\mid \phi\ {\sf U}\ \phi
\end{align*}
As syntactic sugar, we write $\logPr{< \pi}{\psi}$ for
$\lnot\logPr{\geq \pi}{\psi}$, and $\logPr{\leq \pi}{\psi}$
for $\lnot\logPr{> \pi}{\psi}$.
\end{defi}
Given a PCTL formula $\phi$, we define its
maximum ${\sf X}$-nesting $\nestMax{{\sf X}}{\phi}$
and its maximum ${\sf U}$-nesting $\nestMax{{\sf U}}{\phi}$
inductively as follows:
\begin{defi}[Maximum Nesting]
For $\circ \in \setenum{{\sf X},{\sf U}}$, we define:
\[
\begin{array}{c}
\nestMax{\circ}{l}
=
0
\qquad
\nestMax{\circ}{\sf true}
=
0
\qquad
\nestMax{\circ}{\lnot\phi}
=
\nestMax{\circ}{\phi}
\\[8pt]
\nestMax{\circ}{\phi_1 \land \phi_2}
=
\max(\nestMax{\circ}{\phi_1},\nestMax{\circ}{\phi_2})
\qquad
\nestMax{\circ}{\logPr{\rhd \pi}{\psi}}
=
\nestMax{\circ}{\psi}
\\[8pt]
\nestMax{\circ}{{\sf X} \phi}
=
\nestMax{\circ}{\phi} +
\begin{cases}
1 & \text{if $\circ = {\sf X}$} \\
0 & \text{otherwise}
\end{cases}
\\[16pt]
\nestMax{\circ}{\phi_1 {\sf U} \phi_2}
=
\max(\nestMax{\circ}{\phi_1},\nestMax{\circ}{\phi_2}) +
\begin{cases}
1 & \text{if $\circ = {\sf U}$} \\
0 & \text{otherwise}
\end{cases}
\end{array}
\]
\end{defi}
We now define a semantics for PCTL where the probability bounds
$\rhd \pi$ in $\logPr{\rhd \pi}{\psi}$ can be relaxed or
strengthened by an error $\delta$.
Our semantics is parameterized over the \emph{until} bound $n$,
the error $\delta\in\mathbb{R}^{\geq 0}$, and a direction
$r\in\setenum{+1,-1}$.
Given the parameters, the semantics associates each PCTL state formula
with the set of states satisfying it.
Intuitively, when $r = +1$ we relax the semantics of the formula,
so that increasing $\delta$ causes more states to satisfy it. More
precisely, the probability bounds $\rhd\pi$ in positive occurrences
of $\logPr{\rhd \pi}{\psi}$ are decreased by $\delta$, while
those in negative occurrences are increased by $\delta$.
Dually, when $r = -1$ we strengthen the semantics, modifying
$\rhd\pi$ in the opposite direction.
Our semantics is inspired by the relaxed / strengthened PCTL semantics
of~\cite{DInnocenzo12hscc}.
\begin{defi}[PCTL Semantics]
\ellel{def:pctl:sem}
The semantics of PCTL formulae is given below. Let
$n \in \mathbb{N}$, $\delta\in\mathbb{R}^{\geq 0}$
and $r \in \setenum{+1,-1}$.
\[
\begin{array}{ll}
\sem{n}{\delta}{r}{l}
&= \setcomp{q\in\mathcal{Q}}{l\in\ell(q)}
\\
\sem{n}{\delta}{r}{\sf true}
&= \mathcal{Q}
\\
\sem{n}{\delta}{r}{\lnot\phi}
&=
\mathcal{Q} \setminus \sem{n}{\delta}{-r}{\phi}
\\
\sem{n}{\delta}{r}{\phi_1 \land \phi_2}
&=
\sem{n}{\delta}{r}{\phi_1}
\cap
\sem{n}{\delta}{r}{\phi_2}
\\
\sem{n}{\delta}{r}{\logPr{\rhd \pi}{\psi}}
&=
\setcomp{q\in\mathcal{Q}}{
\Pr(\trStart{q} \cap \sem{n}{\delta}{r}{\psi})
+ r \cdot \delta \rhd \pi }
\\
\sem{n}{\delta}{r}{{\sf X} \phi}
&=
\setcomp{t}{t(1) \in \sem{n}{\delta}{r}{\phi}}
\\
\sem{n}{\delta}{r}{\phi_1 {\sf U} \phi_2}
&=
\setcomp{t}{
\exists i\in 0..n.\
t(i) \in \sem{n}{\delta}{r}{\phi_2}
\land
\forall j\in 0..i-1.\ t(j) \in \sem{n}{\delta}{r}{\phi_1}}
\end{array}
\]
\end{defi}
The semantics is mostly standard, except for
$\logPr{\rhd \pi}{\psi}$ and $\phi_1 {\sf U} \phi_2$.
The semantics of $\logPr{\rhd \pi}{\psi}$ adds
$r\cdot\delta$ to the probability of satisfying $\psi$, which
relaxes or strengthens (depending on $r$) the probability bound as
needed.
The semantics of $\phi_1 {\sf U} \phi_2$ uses the parameter
$n$ to bound the number of steps within which $\phi_2$ must
hold.
Our semantics enjoys monotonicity.
The semantics of state and path formulae is increasing w.r.t.\@\xspace~$\delta$
if $r = +1$, and decreasing otherwise.
The semantics also increases when moving from $r=-1$ to
$r=+1$.
\begin{lem}[Monotonicity]
\ellel{lem:pctl:monotonicity}
Whenever $\delta \leq \deltai$, we have:
\begin{align*}
& \sem{n}{\delta}{+1}{\phi} \subseteq
\sem{n}{\deltai}{+1}{\phi}
&& \sem{n}{\deltai}{-1}{\phi} \subseteq
\sem{n}{\delta}{-1}{\phi}
&& \sem{n}{\delta}{-1}{\phi} \subseteq
\sem{n}{\delta}{+1}{\phi}
\\
& \sem{n}{\delta}{+1}{\psi} \subseteq
\sem{n}{\deltai}{+1}{\psi}
&& \sem{n}{\deltai}{-1}{\psi} \subseteq
\sem{n}{\delta}{-1}{\psi}
&& \sem{n}{\delta}{-1}{\psi} \subseteq
\sem{n}{\delta}{+1}{\psi}
\end{align*}
\end{lem}
Note that monotonicity does \emph{not} hold for the parameter $n$,
i.e.\@\xspace even if $n\leqni$, we can \emph{not} conclude
$\sem{n}{\delta}{+1}{\phi} \subseteq
\sem{ni}{\delta}{+1}{\phi}$.
As a counterexample, let
$\mathcal{Q} = \setenum{q_0, q_1}$, $\ell(q_0)=\emptyset$,
$\ell(q_1)=\setenum{\sf a}$,
$\tsPr{q_0}{q_1}=\tsPr{q_1}{q_1}=1$, and
$\tsPr{q}{q'}=0$ elsewhere.
Given $\phi=\logPr{\leq 0}{{\sf true}\ {\sf U}\ {\sf a}}$, we
have $q_0 \in \sem{0}{0}{+1}{\phi}$ since in $n=0$
steps it is impossible to reach a state satisfying $\sf a$.
However, we do \emph{not} have $q_0 \in \sem{1}{0}{+1}{\phi}$
since in $ni=1$ steps we always reach $q_1$,
which satisfies $\sf a$.
\begin{figure}
\caption{A Markov chain modelling an ideal (left) and a real (right) padlock.}
\end{figure}
\begin{exa}\ellel{ex:pctl:padlock}
We compare an ideal combination padlock to a real one from the point
of view of an adversary.
The ideal padlock has a single state $q_{\sf ok}$, representing
a closed padlock that can not be opened.
Instead, the real padlock is under attack from the adversary who tries
to open the padlock by repeatedly guessing its 5-digit PIN.
At each step the adversary generates a (uniformly) random PIN,
different from all the ones which have been attempted so far, and
tries to open the padlock with it.
The states of the real padlock are ${q_0,\ldots,q_{N-1}}$
(with $N=10^5$), where $q_i$ represents the situation where $i$
unsuccessful attempts have been made, and an additional state
$q_{\sf err}$ that represents that the padlock was opened.
Since after $i$ attempts the adversary needs to guess the correct PIN
among the $N-i$ remaining combinations, the real padlock in state
$q_i$ moves to $q_{\sf err}$ with probability $1/(N-i)$,
and to $q_{i+1}$ with the complementary probability.
Summing up, we simultaneously model both the ideal and real padlock
as a single DTMC with the following transition probability function
(see~\autoref{fig:padlock}):
\[
\begin{array}{l@{\qquad}l}
\Pr(q_{\sf ok},q_{\sf ok})=1
\\
\Pr(q_{\sf err},q_{\sf err})=1
\\
\Pr(q_i,q_{\sf err}) = 1/(N-i) & 0\leq i<N
\\
\Pr(q_i,q_{i+1}) = 1-1/(N-i) & 0\leq i<N-1
\\
\Pr(q,qi) = 0 & \text{otherwise}
\end{array}
\]
We label the states with labels $\mathcal{L}=\setenum{\sf err}$ by letting
$\ell(q_{\sf err})=\setenum{\sf err}$ and
$\ell(q)=\emptyset$ for all $q\neqq_{\sf err}$.
The PCTL formula
$\phi = \logPr{\leq 0}{{\sf true}\ {\sf U}\ {\sf err}}$
models the expected behaviour of an unbreakable padlock, requiring that the
set of traces where the padlock is eventually opened has zero
probability.
| 3,999 | 39,865 |
en
|
train
|
0.102.3
|
The semantics is mostly standard, except for
$\logPr{\rhd \pi}{\psi}$ and $\phi_1 {\sf U} \phi_2$.
The semantics of $\logPr{\rhd \pi}{\psi}$ adds
$r\cdot\delta$ to the probability of satisfying $\psi$, which
relaxes or strengthens (depending on $r$) the probability bound as
needed.
The semantics of $\phi_1 {\sf U} \phi_2$ uses the parameter
$n$ to bound the number of steps within which $\phi_2$ must
hold.
Our semantics enjoys monotonicity.
The semantics of state and path formulae is increasing w.r.t.\@\xspace~$\delta$
if $r = +1$, and decreasing otherwise.
The semantics also increases when moving from $r=-1$ to
$r=+1$.
\begin{lem}[Monotonicity]
\ellel{lem:pctl:monotonicity}
Whenever $\delta \leq \deltai$, we have:
\begin{align*}
& \sem{n}{\delta}{+1}{\phi} \subseteq
\sem{n}{\deltai}{+1}{\phi}
&& \sem{n}{\deltai}{-1}{\phi} \subseteq
\sem{n}{\delta}{-1}{\phi}
&& \sem{n}{\delta}{-1}{\phi} \subseteq
\sem{n}{\delta}{+1}{\phi}
\\
& \sem{n}{\delta}{+1}{\psi} \subseteq
\sem{n}{\deltai}{+1}{\psi}
&& \sem{n}{\deltai}{-1}{\psi} \subseteq
\sem{n}{\delta}{-1}{\psi}
&& \sem{n}{\delta}{-1}{\psi} \subseteq
\sem{n}{\delta}{+1}{\psi}
\end{align*}
\end{lem}
Note that monotonicity does \emph{not} hold for the parameter $n$,
i.e.\@\xspace even if $n\leqni$, we can \emph{not} conclude
$\sem{n}{\delta}{+1}{\phi} \subseteq
\sem{ni}{\delta}{+1}{\phi}$.
As a counterexample, let
$\mathcal{Q} = \setenum{q_0, q_1}$, $\ell(q_0)=\emptyset$,
$\ell(q_1)=\setenum{\sf a}$,
$\tsPr{q_0}{q_1}=\tsPr{q_1}{q_1}=1$, and
$\tsPr{q}{q'}=0$ elsewhere.
Given $\phi=\logPr{\leq 0}{{\sf true}\ {\sf U}\ {\sf a}}$, we
have $q_0 \in \sem{0}{0}{+1}{\phi}$ since in $n=0$
steps it is impossible to reach a state satisfying $\sf a$.
However, we do \emph{not} have $q_0 \in \sem{1}{0}{+1}{\phi}$
since in $ni=1$ steps we always reach $q_1$,
which satisfies $\sf a$.
\begin{figure}
\caption{A Markov chain modelling an ideal (left) and a real (right) padlock.}
\end{figure}
\begin{exa}\ellel{ex:pctl:padlock}
We compare an ideal combination padlock to a real one from the point
of view of an adversary.
The ideal padlock has a single state $q_{\sf ok}$, representing
a closed padlock that can not be opened.
Instead, the real padlock is under attack from the adversary who tries
to open the padlock by repeatedly guessing its 5-digit PIN.
At each step the adversary generates a (uniformly) random PIN,
different from all the ones which have been attempted so far, and
tries to open the padlock with it.
The states of the real padlock are ${q_0,\ldots,q_{N-1}}$
(with $N=10^5$), where $q_i$ represents the situation where $i$
unsuccessful attempts have been made, and an additional state
$q_{\sf err}$ that represents that the padlock was opened.
Since after $i$ attempts the adversary needs to guess the correct PIN
among the $N-i$ remaining combinations, the real padlock in state
$q_i$ moves to $q_{\sf err}$ with probability $1/(N-i)$,
and to $q_{i+1}$ with the complementary probability.
Summing up, we simultaneously model both the ideal and real padlock
as a single DTMC with the following transition probability function
(see~\autoref{fig:padlock}):
\[
\begin{array}{l@{\qquad}l}
\Pr(q_{\sf ok},q_{\sf ok})=1
\\
\Pr(q_{\sf err},q_{\sf err})=1
\\
\Pr(q_i,q_{\sf err}) = 1/(N-i) & 0\leq i<N
\\
\Pr(q_i,q_{i+1}) = 1-1/(N-i) & 0\leq i<N-1
\\
\Pr(q,qi) = 0 & \text{otherwise}
\end{array}
\]
We label the states with labels $\mathcal{L}=\setenum{\sf err}$ by letting
$\ell(q_{\sf err})=\setenum{\sf err}$ and
$\ell(q)=\emptyset$ for all $q\neqq_{\sf err}$.
The PCTL formula
$\phi = \logPr{\leq 0}{{\sf true}\ {\sf U}\ {\sf err}}$
models the expected behaviour of an unbreakable padlock, requiring that the
set of traces where the padlock is eventually opened has zero
probability.
Formally, $\phi$ is satisfied by state $q$ when
\begin{align}
\nonumber
q \in \sem{n}{\delta}{+1}{\phi}
& \iff
q \in \sem{n}{\delta}{+1}{\lnot \logPr{> 0}{{\sf true}\ {\sf U}\ {\sf err}}}
\\
\nonumber
& \iff
q \notin \sem{n}{\delta}{-1}{\logPr{> 0}{{\sf true}\ {\sf U}\ {\sf err}}}
\\
\nonumber
& \iff
\lnot ( \Pr(\trStart{q} \cap \sem{n}{\delta}{-1}{{\sf true}\ {\sf U}\ {\sf err}}) - \delta > 0 )
\\
\ellel{eq:padlock-pr}
& \iff
\Pr(\trStart{q} \cap \sem{n}{\delta}{-1}{{\sf true}\ {\sf U}\ {\sf err}}) \leq \delta
\end{align}
When $q=q_{\sf ok}$ we have that
$\trStart{q_{\sf ok}} \cap\sem{n}{\delta}{-1}{{\sf true}\
{\sf U}\ {\sf err}} = \emptyset$, hence the above probability is
zero, which is surely $\leq \delta$.
Consequently, $\phi$ is satisfied by the ideal padlock
$q_{\sf ok}$, for all $n\geq 0$ and $\delta\geq 0$.
By contrast, $\phi$ is not always satisfied by the real padlock
$q=q_0$, since we have
$q_0\in \sem{n}{\delta}{+1}{\phi}$ only for some values
of $n$ and $\delta$.
To show why, we start by considering some trivial cases.
Choosing $\delta=1$ makes equation~\eqref{eq:padlock-pr}
trivially true for all $n$.
Furthermore, if we choose $n=1$,
then $\trStart{q_0} \cap\sem{n}{\delta}{-1}{{\sf true}\ {\sf U}\ {\sf err}}
= \setenum{q_0q_{\sf err}^\omega}$
is a set of traces with probability $1/N$.
Therefore, equation~\eqref{eq:padlock-pr} holds only when
$\delta\geq 1/N$.
More in general, when $n\geq 1$, we have
\[
\trStart{q_0} \cap
\sem{n}{\delta}{-1}{{\sf true}\ {\sf U}\ {\sf err}} =
\setenum{q_0q_{\sf err}^\omega,\
q_0q_1q_{\sf err}^\omega,\
q_0q_1q_2q_{\sf err}^\omega,\
\ldots,\
q_0\ldotsq_{n-1}q_{\sf err}^\omega
}
\]
The probability of the above set is the probability of guessing the
PIN within $n$ steps. The complementary event, i.e.\@\xspace not guessing
the PIN for $n$ times, has probability
\[
\dfrac{N-1}{N} \cdot
\dfrac{N-2}{N-1}
\cdots
\dfrac{N-n}{N-(n-1)} =
\dfrac{N-n}{N}
\]
Consequently, \eqref{eq:padlock-pr} simplifies to
$n/N \leq \delta$, suggesting the least value of $\delta$
(depending on $n$) for which $q_0$ satisfies $\phi$.
For instance, when $n=10^3$, this amounts to claiming that the
real padlock is secure, up to an error of
$\delta = n/N = 10^{-2}$.
\end{exa}
| 2,345 | 39,865 |
en
|
train
|
0.102.4
|
\section{Up-to-$n,\delta$ Bisimilarity}
We now define a relation on states
$q \crysim{n}{\delta} qi$ that intuitively holds
whenever $q$ and $qi$ exhibit similar behaviour for
a bounded number of steps.
The parameter $n$ controls the number of steps, while $\delta$
controls the error allowed in each step.
Note that since we only observe the first $n$ steps, our notion is
\emph{inductive}, unlike unbounded bisimilarity which is co-inductive,
similarly to~\cite{Castiglioni16qapl}.
Our notion is also inspired by~\cite{Desharnais08qest}.
\begin{defi}[Up-to-$n,\delta$ Bisimilarity]
\ellel{def:param-bisim}
We define the relation $q \crysim{n}{\delta} qi$
as follows by induction on $n$:
\begin{enumerate}
\item $q \crysim{0}{\delta} qi$
always holds
\item $q \crysim{n+1}{\delta} qi$
holds if and only if, for all $Q \subseteq \mathcal{Q}$:
\begin{enumerate}
\item\ellel{def:param-bisim:a}
$\ell(q) = \ell(qi)$
\item\ellel{def:param-bisim:b}
$\tsPr{q}{Q} \leq
\tsPr{qi}{\cryset{n}{\delta}{Q}} + \delta$
\item\ellel{def:param-bisim:c}
$\tsPr{qi}{Q} \leq
\tsPr{q}{\cryset{n}{\delta}{Q}} + \delta$
\end{enumerate}
\end{enumerate}
where $\cryset{n}{\delta}{Q} =
\setcomp{qi}{\exists q\inQ.\
q \crysim{n}{\delta} qi}$
is the image of the set $Q$ according to the bisimilarity
relation.
\end{defi}
We now establish two basic properties of the bisimilarity.
Our notion is reflexive and symmetric, and enjoys a triangular
property. Furthermore, it is monotonic on both $n$ and $\delta$.
\begin{lem}
The relation $\crysim{}{}$ satisfies:
\[
q \crysim{n}{\delta} q
\qquad\quad
q \crysim{n}{\delta} qi
\implies qi \crysim{n}{\delta} q
\qquad\quad
q \crysim{n}{\delta} qi
\land qi \crysim{n}{\deltai} qii
\implies q \crysim{n}{\delta+\deltai} qii
\]
\end{lem}
\begin{proof}
Straightforward induction on $n$.
\end{proof}
\begin{lem}[Monotonicity]
\ellel{lem:sim:monotonicity}
\begin{align*}
ni \leq n
& \;\;\implies\;\;
\crysim{n}{\delta} \;\;\subseteq\;\; \crysim{ni}{\delta}
\\
\delta \leq \deltai
& \;\;\implies\;\;
\crysim{n}{\delta} \;\;\subseteq\;\; \crysim{n}{\deltai}
\end{align*}
\end{lem}
\begin{exa}
\ellel{ex:sim:padlock}
We use up-to-$n,\delta$ bisimilarity to compare the behaviour of the
ideal padlock $q_{\sf ok}$ and the real one, in any of its
states, when observed for $n$ steps.
When $n=0$ bisimilarity trivially holds, so below we only
consider $n>0$.
We start from the simplest case: bisimilarity does not hold
between $q_{\sf ok}$ and $q_{\sf err}$.
Indeed, $q_{\sf ok}$ and $q_{\sf err}$ have distinct
labels
($\ell(q_{\sf ok})=\emptyset\neq\setenum{{\sf
err}}=\ell(q_{\sf err})$), hence we do not have
$q_{\sf ok} \crysim{n}{\delta} q_{\sf err}$, no
matter what $n>0$ and $\delta$ are.
We now compare $q_{\sf ok}$ with any $q_i$.
When $n=1$, both states have an empty label set,
i.e.~$\ell(q_{\sf ok})=\ell(q_i)=\emptyset$, hence
they are bisimilar for any error $\delta$.
We therefore can write $q_{\sf ok} \crysim{1}{\delta} q_i$ for any
$\delta\geq 0$.
When $n=2$, we need a larger error $\delta$ to make
$q_{\sf ok}$ and $q_i$ bisimilar.
Indeed, if we perform a move from $q_i$, the padlock can be
broken with probability $1/(N-i)$, in which case we reach
$q_{\sf err}$, thus violating bisimilarity.
Accounting for such probability, we only obtain
$q_{\sf ok} \crysim{2}{\delta} q_i$
for any $\delta\geq 1/(N-i)$.
When $n=3$, we need an even larger error $\delta$ to make
$q_{\sf ok}$ and $q_i$ bisimilar.
Indeed, while the first PIN guessing attempt has probability
$1/(N-i)$, in the second move the guessing probability increases to
$1/(N-i-1)$.
Choosing $\delta$ equal to the largest probability is enough to
account for both moves, hence we obtain
$q_{\sf ok} \crysim{3}{\delta} q_i$ for any
$\delta\geq 1/(N-i-1)$.
Technically, note that the denominator $N-i-1$ might be zero, since
when $i=n-1$ the first move always guesses the PIN, and the
second guess never actually happens.
In such case, we instead take $\delta=1$.
More in detail, we verify item~\eqref{def:param-bisim:b} of
\autoref{def:param-bisim} for
$q_{\sf ok} \crysim{3}{\delta} q_i$,
assuming $\delta\geq 1/(N-i-1)$.
We must prove that:
\[
\tsPr{q_{\sf ok}}{Q} \leq
\tsPr{q_i}{\cryset{2}{\delta}{Q}} + \delta
\]
When $q_{\sf ok} \not\in Q$ we have
$\tsPr{q_{\sf ok}}{Q} = 0$, hence the inequality holds trivially.
Otherwise, if $q_{\sf ok} \in Q$ we first observe that
$\tsPr{q_{\sf ok}}{Q} = 1$.
From the case $n = 2$, we have
$q_{\sf ok} \crysim{2}{\delta} q_{i+1}$,
since $\delta \geq 1/(N-(i+1))$.
Hence, $q_{i+1} \in \;\cryset{2}{\delta}{Q}$ and so:
\[
\tsPr{q_i}{\cryset{2}{\delta}{Q}} + \delta
\geq
\tsPr{q_i}{\setenum{q_{i+1}}} + \delta
=
1 - \dfrac{1}{N-i} + \delta
\geq
1 - \dfrac{1}{N-i} + \dfrac{1}{N-i-1}
\geq
1
\]
This proves item~\eqref{def:param-bisim:b};
the proof for item~\eqref{def:param-bisim:c} is similar.
More in general, for an arbitrary $n\geq 2$, we obtain
through a similar argument that
$q_{\sf ok} \crysim{n}{\delta} q_i$
for any $\delta\geq 1/(N-i-n+2)$.
Intuitively, $\delta=1/(N-i-n+2)$ is the probability of
guessing the PIN in the last attempt (the $n$-th), which is
the attempt having the highest success probability.
Again, when the denominator $N-i-n+2$ becomes zero (or
negative), we instead take $\delta=1$.
\end{exa}
Note that the DTMC of the ideal and real padlocks
(Example~\ref{ex:pctl:padlock}) has finitely many states.
Our bisimilarity notion and results, however, can also deal with DTMCs
with a countably infinite set of states, as we show in the next example.
\begin{figure}
\caption{A Markov chain modelling an unfair random generator of bit streams.}
\end{figure}
\begin{exa}
\ellel{ex:sim:pingpong}
We consider an ideal system which randomly generates bit streams
in a fair way.
We model such a system as having two states
$\setenum{q_a,q_b}$, with transition probabilities
$\Pr(x,y)=1/2$ for any $x,y \in \setenum{q_a,q_b}$,
as in~\autoref{fig:fair-coin}.
We label state $q_a$ with label $\sf a$ denoting bit $0$, and
state $q_b$ with label $\sf b$ denoting bit $1$.
We compare this ideal system with a real system which generates
bit streams in an unfair way.
At each step, the real system draws a ball from an urn, initially
having $g_0$ $\sf a$-labelled balls and $g_0$ $\sf b$-labelled
balls.
After each drawing, the ball is placed back in the urn.
However, every time an $\sf a$-labelled ball is drawn, an additional
$\sf a$-labelled ball is put in the urn, making the next drawings
more biased towards $\sf a$.
We model the real system using the infinite\footnote{
Modelling this behaviour inherently requires an \emph{infinite}
set of states, since each number of $\sf a$-labelled balls in the urn
leads to a unique transition probability function.} set of states
$\mathbb N \times \setenum{{\sf a},{\sf b}}$, whose first component
counts the number of $\sf a$-labelled balls in the urn, and the
second component is the label of the last-drawn ball.
The transition probabilities are as follows, where $g_0\in\mathbb N^+$
(see~\autoref{fig:pingpong}):
\[
\begin{array}{ll@{\qquad}l}
\Pr((g,x),(g+1,{\sf a})) &= g / (g+g_0)
\\
\Pr((g,x),(g,{\sf b})) &= g_0 / (g+g_0)
\\
\Pr((g,x),(g',x')) &= 0 & \text{otherwise}
\end{array}
\]
We label each such state with its second component.
We now compare the ideal system to the real one.
Intuitively, the ideal system, when started from state $q_a$,
produces a sequence of states whose labels are uniform independent
random values in $\setenum{{\sf a},{\sf b}}$.
Instead, the real system slowly becomes more and more biased towards
label $\sf a$.
More precisely, when started from state $(g_0, {\sf a})$, in the
first drawing the next label is uniformly distributed between
${\sf a}$ and ${\sf b}$, as in the ideal system.
When the sampled state has label $\sf a$, this causes the component
$g$ to be incremented, increasing the probability $g/(g+g_0)$ of
sampling another $\sf a$ in the next steps.
Indeed, the value $g$ is always equal to $g_0$ plus the number of
sampled $\sf a$-labelled states so far.
Therefore, unlike the ideal system, on the long run the real system
will visit $\sf a$-labelled states with very high probability, since
the $g$ component slowly but steadily increases.
While this fact makes the two systems \emph{not} bisimilar according
to the standard probabilistic bisimilarity~\cite{Larsen89popl}, if
we restrict the number of steps to $n \ll g_0$ and tolerate a
small error $\delta$, we can obtain
$q_a \crysim{n}{\delta} (g_0,{\sf a})$.
For instance, if we let $g_0=1000$, $n=100$ and $\delta=0.05$
we have $q_a \crysim{n}{\delta} (g_0,{\sf a})$.
This is because, in $n$ steps, the first component $g$ of a
real system $(g,x)$ will at most reach $1100$, making the
probability of the next step to be $(g+1,{\sf a})$ to be at most
$1100/2100\simeq 0.523$.
This differs from the ideal probability $0.5$ by less than $\delta$,
hence bisimilarity holds.
\end{exa}
| 3,440 | 39,865 |
en
|
train
|
0.102.5
|
\section{Soundness}
\ellel{sec:result}
Our soundness theorem shows that, if we consider any state $q$
satisfying $\phi$ (with steps $n$ and error $\deltai$), and
any state $qi$ which is bisimilar to $q$ (with enough
steps and error $\delta$), then $qi$ must satisfy $\phi$,
with the same number $n$ of steps, at the cost of suitably
increasing the error.
For a fixed $\phi$, the ``large enough'' number of steps and the
increase in the error depend linearly on $n$.
\begin{thm}[Soundness]
\ellel{th:soundness}
Let $k_X = \nestMax{{\sf X}}{\phi}$
be the maximum ${\sf X}$-nesting of a formula $\phi$,
and let $k_U = \nestMax{{\sf U}}{\phi}$
be the maximum ${\sf U}$-nesting of $\phi$.
Then, for all $n,\delta,\deltai$ we have:
\[
\begin{array}{c}
\cryset{nb}{\delta}{
\sem{n}{\deltai}{+1}{\phi}}
\subseteq
\sem{n}{nb\cdot\delta+\deltai}{+1}{\phi}
\tag*{ where $nb = n\cdotk_U + k_X + 1$}
\end{array}
\]
\end{thm}
\begin{exa}
\ellel{ex:results:padlock}
We apply~\autoref{th:soundness} to our padlock system
in the running example.
We take the same formula
$\phi = \logPr{\leq 0}{{\sf true}\ {\sf U}\ {\sf err}}$
of~\autoref{ex:pctl:padlock} and choose $n=10^3$ and $\deltai=0$.
Since $\phi$ has only one until operator and no next operators,
the value $nb$ in the theorem statement is
$nb = 10^3\cdot 1+0+1 = 1001$.
Therefore, from~\autoref{th:soundness} we obtain, for all $\delta$:
\[
\begin{array}{ll}
& \cryset{1001}{\delta}{
\sem{1000}{0}{+1}{\phi}}
\subseteq
\sem{1000}{1001\cdot \delta}{+1}{\phi}
\end{array}
\]
In~\autoref{ex:pctl:padlock} we discussed how the ideal padlock
$q_{\sf ok}$ satisfies the formula $\phi$ for any
number of steps and any error value.
In particular, choosing 1000 steps and zero error, we get
$q_{\sf ok}\in \sem{1000}{0}{+1}{\phi}$.
Moreover, in~\autoref{ex:sim:padlock} we observed that states
$q_{\sf ok}$ and $q_0$ are bisimilar with
$nb=1001$ and $\delta=1/(N-0-nb+2) = 1/99001$,
i.e.\@\xspace~$q_{\sf ok} \crysim{nb}{\delta} q_0$.
In such case, the theorem ensures that
$q_0\in\sem{1000}{1001/99001}{+1}{\phi}$, hence the
real padlock can be considered unbreakable if we limit our
attention to the first $n=1000$ steps, up to an error
of $1001/99001 \approx 0.010111$.
Finally, we note that such error is remarkably close to the least
value that would still make $q_0$ satisfy $\phi$, which we
computed in~\autoref{ex:pctl:padlock} as
$n/N = 10^3/10^5 = 0.01$.
\end{exa}
In the rest of this section, we describe the general structure of the
proof in a top-down fashion, leaving the detailed proof
for~\autoref{sec:proofs}.
We prove the soundness theorem by induction on the state formula
$\phi$, hence we also need to deal with path formulae $\psi$.
Note that the statement of the theorem considers the image of the
semantics of the state formula $\phi$ w.r.t.~bisimilarity (i.e.,
$\cryset{nb}{\delta}{\sem{n}{\deltai}{+1}{\phi}}$).
Analogously, to deal with path formulae we also need an analogous
notion on sets of traces.
To this purpose, we consider the set of traces in the definition of
the semantics:
$T = \trStart{p} \cap
\sem{n}{\delta}{r}{\psi}$.
Then, given a state $q$ bisimilar to $p$, we define the
set of \emph{pointwise bisimilar traces} starting from $q$,
which we denote with $\TR{n}{\delta,\stateQ}{T}$.
Technically, since $\psi$ can only observe a finite portion of a
trace, it is enough to define $\TR{n}{\delta,\stateQ}{\tilde{T}}$
on sets of \emph{trace fragments} $\tilde{T}$.
\begin{defi}
Write $\fram{q_0}{n}$ for the set of all trace fragments
of length $n$ starting from $q_0$.
Assuming $\stateP \CSim{n}{\delta} \stateQ$,
we define $\TR{n}{\delta,\stateQ}{}: \mathcal{P}(\fram{\stateP}{n})
\rightarrow \mathcal{P}(\fram{\stateQ}{n})$
as follows:
\[
\TR{n}{\delta,\stateQ}{\tilde{T}} =
\setcomp{\tilde{u} \in \fram{\stateQ}{n}}{
\exists \tilde{t} \in \tilde{T}.\, \forall 0 \leq i < n.\
\tilde{t}(i) \CSim{n-i}{\delta} \tilde{u}(i)
}
\]
\end{defi}
\noindent
In particular, notice that
$\fram{q}{1} = \setenum{q}$ (the trace fragment of length 1),
and so:
\[
\TR{1}{\delta,\stateQ}{\emptyset} = \emptyset
\qquad
\TR{1}{\delta,\stateQ}{\setenum{q}} = \setenum{q}
\]
The key inequality we exploit in the proof (\autoref{lem:traces})
compares the probability of a set of trace fragments $\tilde{T}$
starting from $\stateP$ to the one of the related set of trace fragments
$\TR{m}{\delta,\stateQ}{\tilde{T}}$ starting from a $\stateQ$ bisimilar to
$\stateP$.
We remark that the component $nb \delta$ in the error that
appears in~\autoref{th:soundness} results from the component $m \delta$
appearing in the following lemma.
\begin{lem}\ellel{lem:traces}
If $\stateP \CSim{n}{\delta} \stateQ$ and $\tilde{T}$ is a set of
trace fragments of length $m$, with $m \leq n$, starting
from $\stateP$, then:
\[
\prob{\tilde{T}}{} \leq \prob{\TR{m}{\delta,\stateQ}{\tilde{T}}}{} +
m \delta
\]
\end{lem}
\autoref{lem:traces} allows $\tilde{T}$ to be an infinite set (because
the set of states $\mathcal{Q}$ can be infinite).
We reduce this case to that in which $\tilde{T}$ is finite.
We first recall a basic calculus property: any inequality $a \leq b$
can be proved by establishing instead $a \leq b + \epsilon$ for all
$\epsilon > 0$.
Then, since the probability distribution of trace fragments of length
$m$ is discrete, for any $\epsilon>0$ we can always take a finite
subset of the infinite set $\tilde{T}$ whose probability differs
from that of $\tilde{T}$ less than $\epsilon$.
It is then enough to consider the case in which $\tilde{T}$ is
finite, as done in the following lemma.
\begin{lem}\ellel{lem:finite-traces}
If $\stateP \CSim{n}{\delta} \stateQ$ and $\tilde{T}$ is a finite
set of trace fragments of length $n > 0$ starting from $\stateP$,
then:
\[
\prob{\tilde{T}}{} \leq
\prob{\TR{n}{\delta,\stateQ}{\tilde{T}}}{} + n \delta
\]
\end{lem}
We prove~\autoref{lem:finite-traces} by induction on $n$. In the
inductive step, we partition the traces according to their first move,
i.e., on their next state after $\stateP$ (for the trace fragments in $T$)
or $\stateQ$ (for the bisimilar counterparts).
A main challenge here is caused by the probabilities of such moves
being weakly connected. Indeed, when $\stateP$ moves to $\statePi$, we might
have several states $\stateQi$, bisimilar to $\statePi$, such that $\stateQ$ moves
to $\stateQi$. Worse, when $\stateP$ moves to another state $\statePii$, we might
find that some of the states $\stateQi$ we met before are also bisimilar
to $\statePii$.
Such overlaps make it hard to connect the probability of $\stateP$ moves
to that of $\stateQ$ moves.
To overcome these issues, we exploit the technical lemma below. Let
set $A$ represent the $\stateP$ moves, and set $B$ represent the $\stateQ$
moves.
Then, associate to each set element $a\in A,b\in B$ a value
($f_A(a), f_B(b)$ in the lemma) representing the move probability.
The lemma ensures that each $f_A(a)$ can be expressed as a weighted
sum of $f_B(b)$ for the elements $b$ bisimilar to $a$. Here, the
weights $h(a,b)$ make it possible to relate a $\stateP$ move to a
``weighted set'' of $\stateQ$ moves.
Furthermore, the lemma ensures that no $b\in B$ has been cumulatively used
for more than a unit weight ($\sum_{a \in A} h(a,b) \leq 1$).
\begin{lem}\ellel{lem:matching}
Let $A$ be a finite set and $B$ be a countable set, equipped with functions $f_A: A \rightarrow \mathbb{R}_0^+$ and $f_B: B \rightarrow \mathbb{R}_0^+$.
Let $g:A \rightarrow 2^B$ be such that $\sum_{b \in g(a)} f_B(b)$
converges for all $a \in A$.
If, for all $A' \subseteq A:$
\begin{equation}\ellel{eq:matching-assumption}
\sum_{a \in A'} f_A(a) \leq \sum_{b \in \bigcup_{a \in A'} g(a)} f_B(b)
\end{equation}
then there exists $h:A \times B \rightarrow \intervalCC 0 1$ such that:
\begin{align}\ellel{eq:matching-thesis:1}
&\forall b \in B: \sum_{a \in A} h(a,b) \leq 1
\\
\ellel{eq:matching-thesis:2}
&\forall A' \subseteq A:
\sum_{a \in A'} f_A(a) = \sum_{a \in A'} \sum_{b \in g(a)} h(a,b) f_B(b)
\end{align}
\end{lem}
\begin{figure}
\caption{Graphical representation of~\autoref{lem:matching}
\end{figure}
We visualize~\autoref{lem:matching} in~\autoref{fig:matching} through an
example. The leftmost graph shows a finite set
$A=\setenum{a_1,a_2,a_3}$ where each $a_i$ is equipped with its
associated value $f_A(a_i)$ and, similarly, a finite set
$B=\setenum{b_1,\ldots,b_4}$ where each $b_i$ has its own value
$f_B(b_i)$. The function $g$ is rendered as the edges of the graph,
connecting each $a_i$ with all $b_j \in g(a_i)$.
The graph satisfies the hypotheses, as one can easily verify. For
instance, when $A' = \setenum{a_1,a_2}$ inequality
\eqref{eq:matching-assumption} simplifies to $0.3+0.5 \leq
0.5+0.6$. The thesis ensures the existence of a weight function
$h(-,-)$ whose values are shown in the graph on the left over each
edge.
These values indeed satisfy \eqref{eq:matching-thesis:1}: for
instance, if we pick $b=b_2$ the inequality reduces to
$0.5+0.1\bar 6 \leq 1$. Furthermore, \eqref{eq:matching-thesis:2} is also
satisfied: for instance, taking $A'=\setenum{a_2}$ the equation
reduces to $0.5 = 0.4\cdot 0.5+0.5\cdot 0.6$, while taking
$A'=\setenum{a_3}$ the equation reduces to
$0.2 = 0.1\bar 6\cdot 0.6+1.0\cdot 0.05+1.0\cdot 0.05$.
The rightmost graph in~\autoref{fig:matching} instead sketches how our
proof devises the desired weight function $h$, by constructing a
network flow problem, and exploiting the well-known min-cut/max-flow
theorem~\cite{MinCut}, following the approach of~\cite{Baier98thesis}.
We start by adding a source node to the right (white bullet in the
figure), connected to nodes in $B$, and a sink node to the left,
connected to nodes in $A$.
We write the capacity over each edge: we use $f_B(b_i)$ for the edges
connected to the source, $f_A(a_i)$ for the edges connected to the
sink, and $+\infty$ for the other edges in the middle.
Then, we argue that the leftmost cut $C$ shown in the figure is a
min-cut. Intuitively, if we take another cut $C'$ not including some
edge in $C$, then $C'$ has to include other edges making $C'$ not any
better than $C$.
Indeed, $C'$ can surely not include any edge in the middle, since they
have $+\infty$ capacity.
Therefore, if $C'$ does not include an edge from some $a_i$ to the
sink, it has to include all the edges from the source to each
$b_j \in g(a_i)$.
In this case, hypothesis \eqref{eq:matching-assumption} ensures that
doing so does not lead to a better cut.
Hence, $C$ is indeed a min-cut.
From the max-flow corresponding to the min-cut, we derive the values
for $h(-,-)$.
Thesis \eqref{eq:matching-thesis:1} follows from the flow conservation
law on each $b_i$, and the fact that the incoming flow of each $b_j$
from the source is bounded by the capacity of the related
edge.
Thesis \eqref{eq:matching-thesis:2} follows from the flow
conservation law on each $a_i$, and the fact that the outgoing flow of
each $a_i$ to the sink is exactly the capacity of the related edge,
since the edge is on a min-cut.
| 4,025 | 39,865 |
en
|
train
|
0.102.6
|
\section{Asymptotic equivalence}
\ellel{sec:asymptotic}
In this section we transport the notion of bisimilarity and the
semantics of PCTL to \emph{families} of states, thus reasoning on
their asymptotic behaviours.
More precisely, given a state-labelled DTMC $\mathcal{Q}$, we define a family
of states to be an infinite sequence $\Xi: \mathbb{N}\to\mathcal{Q}$.
Intuitively, $\Xi(\eta)$ can describe the behaviour of a
probabilistic system depending on a security parameter $\eta \in \mathbb{N}$.
When using bisimilarity (\autoref{def:param-bisim}) to relate
two given states $Q_1$ and $Q_2$, we have to provide a
number of steps $n$ and a probability error $\delta$.
By contrast, when relating two families $\Xi_1$ and $\Xi_2$ we
want to focus on their asymptotic behaviour, and obtain an equivalence
that does not depend on specific values of $n$ and
$\delta$.
To do so, we start by recalling the standard definition of
\emph{negligible function}:
\begin{defi}[Negligible Function]
A function $f : \mathbb{N}\to\mathbb{R}$ is said to be negligible
whenever
\[
\forall c\in\mathbb{N}.\
\exists \bar \eta.\
\forall \eta\geq\bar \eta.\
|f(\eta)| \leq \eta^{-c}
\]
\end{defi}
We say that $\Xi_1$ and $\Xi_2$ are asymptotically equivalent
($\Xi_1 \equiv \Xi_2$) when the families are asymptotically
pointwise bisimilar with a negligible error $\delta(\eta)$ whenever
$n(\eta)$ is a polynomial.
\begin{defi}[Asymptotic Equivalence]\ellel{def:crysim}
Given $\Xi_1,\Xi_2 : \mathbb{N}\to\mathcal{Q}$, we write
$\Xi_1 \equiv \Xi_2$ if and only if
for each polynomial $n(-)$
there exists a negligible function $\delta(-)$
and $\bar \eta \in \mathbb{N}$
such that for all $\eta\geq \bar \eta$
we have
$\Xi_1(\eta) \crysim{n(\eta)}{\delta(\eta)} \Xi_2(\eta)$
\end{defi}
\begin{lem}
$\equiv$ is an equivalence relation.
\end{lem}
\begin{proof}
Reflexivity and symmetry are trivial. For transitivity, given
a polynomial
$n(-)$, let $\delta_1(-),\delta_2(-)$ be the negligible
functions resulting from the hypotheses $\Xi_1 \equiv \Xi_2$
and $\Xi_2 \equiv \Xi_3$, respectively.
Asymptotically, we obtain
\[
\Xi_1(\eta) \crysim{n(\eta)}{\delta_1(\eta)} \Xi_2(\eta)
\qquad\land\qquad
\Xi_2(\eta) \crysim{n(\eta)}{\delta_2(\eta)} \Xi_3(\eta)
\]
By the transitivity of $\crysim{}{}$, we get
\[
\Xi_1(\eta)
\crysim{n(\eta)}{\delta_1(\eta)+\delta_2(\eta)}
\Xi_3(\eta)
\]
Hence we obtain the thesis since the sum of negligible functions
$\delta_1(\eta)+\delta_2(\eta)$ is negligible.
\end{proof}
We now provide an asymptotic semantics for PCTL, by defining its
satisfaction relation $\Xi \models \phi$.
As done above, this notion does not depend on specific values for
$n$ and $\delta$ (unlike the semantics in
\autoref{def:pctl:sem}), but instead considers the asymptotic
behaviour of the family.
\begin{defi}[Asymptotic Satisfaction Relation]
We write $\Xi \models \phi$ when there exists a polynomial
$\barn(-)$ such that for each polynomial
$n(-) \geq \barn(-)$ there exists a negligible function
$\delta(-)$ and $\bar\eta \in \mathbb{N}$ such that for all
$\eta\geq \bar\eta$ we have
$\Xi(\eta) \in \sem{n(\eta)}{\delta(\eta)}{+1}{\phi}$
\end{defi}
In the above definition, we only consider polynomials greater than a
threshold $\barn(-)$.
This is because a family $\Xi$ representing, say, a protocol could
require a given (polynomial) number of steps to complete its execution.
It is reasonable, for instance, that $\Xi(\eta)$ needs to exchange
$\eta^2$ messages over a network to perform its task.
In such cases, we still want to make $\Xi$ satisfy a formula
$\phi$ stating that the task is eventually completed with high
probability.
If we quantified over all polynomials $n(-)$, we would also
allow choosing small polynomials like $n(\eta)=\eta$ or even
$n(\eta)=1$, which would not provide $\Xi$ enough time to complete.
Using a (polynomial) threshold $\barn(-)$, instead, we always
provide enough time.
We now establish the main result of this section, asymptotic
soundness, stating that equivalent families of states asymptotically
satisfy the same PCTL formulae.
The proof relies on our previous soundness~\autoref{th:soundness}.
\begin{thm}[Asymptotic Soundness]
\ellel{th:asymptotic}
Let $\Xi_1,\Xi_2$ be families of states such that
$\Xi_1 \equiv \Xi_2$. For every PCTL formula $\phi$:
\[
\Xi_1 \models \phi \iff
\Xi_2 \models \phi
\]
\end{thm}
\begin{proof}
Assuming $\Xi_1 \models \phi$ and
$\Xi_1 \equiv \Xi_2$, we prove
$\Xi_2 \models \phi$.
Let $k_X = \nestMax{{\sf X}}{\phi}$
be the maximum ${\sf X}$-nesting of $\phi$,
and let $k_U = \nestMax{{\sf U}}{\phi}$
be the maximum ${\sf U}$-nesting of $\phi$.
Let $\barn_1(-)$ as in the definition of the hypothesis
$\Xi_1 \models \phi$.
To prove the thesis $\Xi_2 \models \phi$, we choose
$\barn_2(-) = \barn_1(-)$, and we consider an arbitrary
$n_2(-)\geq\barn_2(-)=\barn_1(-)$. We can then
choose $n_1(-) = n_2(-)$ in the same hypothesis, and
obtain a negligible $\delta_1(-)$ and $\bar\eta_1$, where for any
$\eta \geq \bar\eta_1$ we have
\begin{equation}
\ellel{eq:fam1-hp}
\Xi_1(\eta) \in \sem{n_2(\eta)}{\delta_1(\eta)}{+1}{\phi}
\end{equation}
We now exploit the other hypothesis $\Xi_1 \equiv \Xi_2$,
choosing the polynomial
\begin{equation}
\ellel{eq:n-eta}
n(\eta) = n_2(\eta) \cdot k_U + k_X + 1
\end{equation}
and obtaining a negligible $\delta(-)$ and $\bar\eta$ where
for any $\eta\geq\bar\eta$ we have
\begin{equation}
\ellel{eq:sim-hp}
\Xi_1(\eta) \crysim{n(\eta)}{\delta(\eta)} \Xi_2(\eta)
\end{equation}
To prove the thesis, we finally choose the negligible function
$\delta_2(\eta) = n(\eta)\cdot\delta(\eta)+\delta_1(\eta)$ and
$\bar\eta_2 = \max(\bar\eta_1, \bar\eta)$.
By~\autoref{th:soundness} we have that for any
$\eta\geq\bar\eta_2$:
\[
\cryset{n(\eta)}{\delta(\eta)}{
\sem{n_2(\eta)}{\delta_1(\eta)}{+1}{\phi}}
\subseteq
\sem{n_2(\eta)}{n(\eta)\cdot\delta(\eta)+\delta_1(\eta)}{+1}{\phi}
\mbox{ where $n(\eta)$ is as in \eqref{eq:n-eta}.}
\]
Applying this to \eqref{eq:fam1-hp} and \eqref{eq:sim-hp} we then
have that, for any $\eta\geq\bar\eta_2$:
\[
\Xi_2(\eta) \in \sem{n_2(\eta)}{n(\eta)\cdot\delta(\eta)+\delta_1(\eta)}{+1}{\phi}
\]
which is our thesis
\[
\Xi_2(\eta) \in \sem{n_2(\eta)}{\delta_2(\eta)}{+1}{\phi}
\qedhere
\]
\end{proof}
\begin{exa}\ellel{ex:asymptotic:padlock}
We now return to the padlock examples \ref{ex:pctl:padlock} and
\ref{ex:sim:padlock}.
We again consider an ideal padlock modelled by a state
$q_{\sf ok}$, but also a sequence of padlocks
having an increasing number of digits $\eta$, hence an increasing
number $N=10^\eta$ of combinations.
We assume that state $q_{i,10^\eta}$ models the state of a
padlock having $\eta$ digits where the adversary has already made
$i$ brute force attempts, following the same strategy as in the
previous examples.
The transition probabilities are also similarly defined.
In this scenario, we can define two state families.
Family $\Xi_1(\eta) = q_{\sf ok}$ represents a (constant)
sequence of ideal padlocks, while family
$\Xi_2(\eta) = q_{0,10^\eta}$ represents a sequence of
realistic padlocks with no previous brute force attempt ($i=0$),
in increasing order of robustness.
Indeed, as $\eta$ increases, the padlock becomes harder
to break by brute force since the number of combinations $N=10^\eta$ grows.
In~\autoref{ex:sim:padlock}, we have seen that
\[
\Xi_1(\eta) \crysim{n(\eta)}{\delta(\eta)} \Xi_2(\eta)
\qquad
\mbox{ where }
\delta(\eta) =
\dfrac{1}{N-0-n(\eta)+2} =
\dfrac{1}{10^\eta-n(\eta)+2}
\]
and we can observe that the above $\delta(\eta)$ is indeed
negligible when $n(\eta)$ is a polynomial.
This means that $\Xi_1 \equiv \Xi_2$ holds, hence we can apply
\autoref{th:asymptotic} and conclude that the families $\Xi_1$
and $\Xi_2$ asymptotically satisfy the same PCTL formulae.
This is intuitive since, when the adversary can only attempt a
polynomial number of brute force attacks, and when the number of
combinations increases exponentially, the robustness of the
realistic padlocks effectively approaches that of the ideal one.
\end{exa}
We now discuss how~\autoref{th:asymptotic} could be applied to
a broad class of systems.
Consider the execution of an ideal cryptographic protocol, modelled
as a DTMC starting from the initial state $q_i$.
This model could represent, for instance, the semantics of a formal,
symbolic system such as those that can be expressed using process
algebras.
In this scenario, the underlying cryptographic primitives can be
\emph{perfect}, in the sense that ciphertexts reveal no information
to whom does not know the decryption key, signatures can never be
forged, hash preimages can never be found, and so on, despite the
amount of computational resources available to the adversary.
Given such a model, it is then possible to refine it making the
cryptographic primitives more realistic, allowing an adversary to
attempt attacks such as decryptions and signature forgeries, which
however succeed only with negligible probability w.r.t.\@\xspace a security
parameter $\eta$.
This more realistic system can be modelled using a distinct DTMC
state $q^\eta_r$ whose behaviour is similar to that of
$q_i$: the state transitions are essentially the same, except
for the cases in which the adversary is successful in attacking the
cryptographic primitives.
Therefore, the transition probabilities are almost the same,
differing only by a negligible quantity.
Therefore, we can let $\Xi_1(\eta)=q_i$ and
$\Xi_2(\eta)=q^\eta_r$, and observe that they are indeed
asymptotically equivalent.
Note that this holds in general by construction, no matter what is
the behaviour of the ideal system $q_i$ we started from.
Finally, by~\autoref{th:asymptotic} we can claim that both
families $\Xi_1,\Xi_2$ asymptotically satisfy the same PCTL
formulas.
This makes it possible, in general, to prove properties on the
simpler $q_i$ system, possibly even using some automatic
verification tools, and transfer these results in the more realistic
setting $q^\eta_r$.
A special case of this fact was originally studied
in~\cite{ZuninoD05}, which however only considered reachability
properties.
By comparison, \autoref{th:asymptotic} is much more general,
allowing one to transfer any property that can be expressed using a
PCTL formula.
The construction above allows one to refine an ideal system
$q_i$ into a more realistic one $q^\eta_r$ by taking
certain adversaries into account.
However, if our goal were to study the security of the system against
\emph{all} reasonable adversaries, then the above approach would not
be applicable.
Indeed, it is easy to find an ideal system and a corresponding
realistic refinement, comprising a reasonable adversary,
where the asymptotic equivalence does not hold.
For instance, consider an ideal protocol where Alice and Bob exchange
ten messages, after which Alice randomly chooses and exchanges a
single bit.
To assess the security of a realistic implementation, we might want to
consider the case where Alice is an adversary.
In such case, a malicious Alice could exchange the first two messages,
then flip a coin $b \leftarrow \{0,1\}$ in secret, exchange the other
eight messages, and finally send the value $b$.
The behaviour of such realistic system differs from the ideal one,
since the ideal one has a probabilistic choice point only at the end,
while the realistic system anticipates it after the first two moves.
It is easy to check (and well known) that moving choices to an earlier
point makes standard bisimilarity fail, and this is the case also for
asymptotic equivalence.
The failure of asymptotic equivalence prevents us from applying the
asymptotic soundness theorem.
In particular, assume that we have proved that the ideal system enjoys
certain specifications expressed as PCTL formulae.
We can not exploit the theorem to show that also the realistic system
with the adversary enjoys the same specifications.
| 3,984 | 39,865 |
en
|
train
|
0.102.7
|
\section{Conclusions}
In this paper we studied how the (relaxed) semantics of PCTL formulae
interacts with (approximate) probabilistic bisimulation. In the
regular, non relaxed case, it is well-known that when a state
$q$ satisfies a PCTL formula $\phi$, then all the states that
are probabilistic-bisimilar to $q$ also satisfy $\phi$
(\cite{Desharnais10iandc}).
\autoref{th:soundness} extends this to the relaxed semantics, establishing that when a state $q$ satisfies a PCTL formula $\phi$ up-to $n$ steps and error $\delta$, then all the states that are approximately probabilistic bisimilar to $q$ with error $\deltai$ (and enough steps) also satisfy $\phi$ up-to $n$ steps and suitably increased error.
We provide a way to compute the new error in terms of $n, \delta, \deltai$.
\autoref{th:asymptotic} extends such soundness result to the
asymptotic behaviour where the error becomes negligible when the
number of steps is polynomially bounded.
Our results are a first step towards a novel approach to the security
analysis of cryptographic protocols using probabilistic bisimulations.
When one is able to prove that a real-world specification of a
cryptographic protocol is asymptotically equivalent to an ideal one,
then one can invoke~\autoref{th:asymptotic} and claim that the two
models satisfy the same PCTL formulae, essentially reducing the
security proof of the cryptographic protocol to verifying the ideal
model. A relevant line for future work is to study the applicability
of our theory in this setting.
As discussed in~\autoref{sec:asymptotic}, our approach is not
applicable to all protocols and all adversaries.
A relevant line of research could be the study of larger asymptotic
equivalences, which allow to transfer properties from ideal to
realistic systems.
This could be achieved, e.g., by considering weaker logics than PCTL,
or moving to linear temporal logics.
Another possible line of research would be investigating proof
techniques for establishing approximate bisimilarity and
refinement~\cite{Jonsson91lics}, as well as devising algorithms for
approximate bisimilarity, along the lines
of~\cite{BreugelW14birthday,ChenBW12fossacs,Fu12icalp,TangB16concur,TangB17concur,TangB18cav}.
This direction, however, would require restricting our theory to
finite-state systems, which contrasts with our general motivation
coming from cryptographic security. Indeed, in the analysis of
cryptographic protocols, security is usually to be proven against an
arbitrary adversary, hence also against infinite-state ones. Hence,
model-checking of finite-state systems would not directly be
applicable in this setting.
\paragraph{Acknowledgements}
Massimo Bartoletti is partially supported by
Conv.\ Fondazione di Sardegna \& Atenei Sardi project
F75F21001220007 \emph{ASTRID}.
Maurizio Murgia and Roberto Zunino are partially supported PON \textit{Distributed Ledgers for Secure Open Communities}.
Maurizio Murgia is partially supported by MUR PON REACT EU DM 1062/21.
\appendix
| 846 | 39,865 |
en
|
train
|
0.102.8
|
\section{Proofs} \ellel{sec:proofs}
\begin{proofof}{Lemma}{lem:pctl:monotonicity}
We simultaneously prove the whole statement by induction on the structure
of the formulae $\phi$ and $\psi$.
The cases $\phi=l$ and $\phi=\sf true$ result
in trivial equalities.
For the case $\phi=\lnot\phii$ we need to prove
\begin{align*}
& \sem{n}{\delta}{+1}{\lnot\phii} \subseteq
\sem{n}{\deltai}{+1}{\lnot\phii}
\\
& \sem{n}{\deltai}{-1}{\lnot\phii} \subseteq
\sem{n}{\delta}{-1}{\lnot\phii}
\\
& \sem{n}{\delta}{-1}{\lnot\phii} \subseteq
\sem{n}{\delta}{+1}{\lnot\phii}
\end{align*}
which is equivalent to
\begin{align*}
& \mathcal{Q}\setminus\sem{n}{\delta}{-1}{\phii} \subseteq
\mathcal{Q}\setminus\sem{n}{\deltai}{-1}{\phii}
\\
& \mathcal{Q}\setminus\sem{n}{\deltai}{+1}{\phii} \subseteq
\mathcal{Q}\setminus\sem{n}{\delta}{+1}{\phii}
\\
& \mathcal{Q}\setminus\sem{n}{\delta}{+1}{\phii} \subseteq
\mathcal{Q}\setminus\sem{n}{\delta}{-1}{\phii}
\end{align*}
which, in turn, is equivalent to
\begin{align*}
& \sem{n}{\deltai}{-1}{\phii} \subseteq
\sem{n}{\delta}{-1}{\phii}
\\
& \sem{n}{\delta}{+1}{\phii} \subseteq
\sem{n}{\deltai}{+1}{\phii}
\\
& \sem{n}{\delta}{-1}{\phii} \subseteq
\sem{n}{\delta}{+1}{\phii}
\end{align*}
which is the induction hypothesis.
\noindent
For the case $\phi=\phi_1 \land \phi_2$ we need to prove
\begin{align*}
& \sem{n}{\delta}{+1}{\phi_1\land\phi_2} \subseteq
\sem{n}{\deltai}{+1}{\phi_1\land\phi_2}
\\
& \sem{n}{\deltai}{-1}{\phi_1\land\phi_2} \subseteq
\sem{n}{\delta}{-1}{\phi_1\land\phi_2}
\\
& \sem{n}{\delta}{-1}{\phi_1\land\phi_2} \subseteq
\sem{n}{\delta}{+1}{\phi_1\land\phi_2}
\end{align*}
which is equivalent to
\begin{align*}
& \sem{n}{\delta}{+1}{\phi_1}
\cap \sem{n}{\delta}{+1}{\phi_2}
\subseteq
\sem{n}{\deltai}{+1}{\phi_1}
\cap \sem{n}{\deltai}{+1}{\phi_2}
\\
& \sem{n}{\deltai}{-1}{\phi_1}
\cap \sem{n}{\deltai}{-1}{\phi_2}
\subseteq
\sem{n}{\delta}{-1}{\phi_1}
\cap \sem{n}{\delta}{-1}{\phi_2}
\\
& \sem{n}{\delta}{-1}{\phi_1}
\cap \sem{n}{\delta}{-1}{\phi_2}
\subseteq
\sem{n}{\delta}{+1}{\phi_1}
\cap \sem{n}{\delta}{+1}{\phi_2}
\end{align*}
which immediately follows from the induction hypothesis on
$\phi_1$ and $\phi_2$.
For the case $\phi=\logPr{\rhd \pi}{\psi}$ we need to prove
\begin{align*}
& \sem{n}{\delta}{+1}{\logPr{\rhd \pi}{\psi}} \subseteq
\sem{n}{\deltai}{+1}{\logPr{\rhd \pi}{\psi}}
\\
& \sem{n}{\deltai}{-1}{\logPr{\rhd \pi}{\psi}} \subseteq
\sem{n}{\delta}{-1}{\logPr{\rhd \pi}{\psi}}
\\
& \sem{n}{\delta}{-1}{\logPr{\rhd \pi}{\psi}} \subseteq
\sem{n}{\delta}{+1}{\logPr{\rhd \pi}{\psi}}
\end{align*}
The first inclusion follows from
\begin{align*}
\sem{n}{\delta}{+1}{\logPr{\rhd \pi}{\psi}}
& = \setcomp{q\in\mathcal{Q}}{
\Pr(\trStart{q} \cap \sem{n}{\delta}{+1}{\psi})
+ \delta \rhd \pi }
\\
& \subseteq
\setcomp{q\in\mathcal{Q}}{
\Pr(\trStart{q} \cap \sem{n}{\deltai}{+1}{\psi})
+ \deltai \rhd \pi }
\\
& =
\sem{n}{\deltai}{+1}{\logPr{\rhd \pi}{\psi}}
\end{align*}
where we exploited $\delta\leq\deltai$,
the induction hypothesis
$\sem{n}{\delta}{+1}{\psi} \subseteq
\sem{n}{\deltai}{+1}{\psi}$, the monotonicity
of $\Pr(-)$, and the fact that $\geq\circ\,\rhd \subseteq \rhd$.
The second inclusion follows from an analogous argument:
\begin{align*}
\sem{n}{\deltai}{-1}{\logPr{\rhd \pi}{\psi}}
& =
\setcomp{q\in\mathcal{Q}}{
\Pr(\trStart{q} \cap \sem{n}{\deltai}{-1}{\psi})
- \deltai \rhd \pi }
\\
& \subseteq
\setcomp{q\in\mathcal{Q}}{
\Pr(\trStart{q} \cap \sem{n}{\delta}{-1}{\psi})
- \delta \rhd \pi }
\\
& =
\sem{n}{\delta}{-1}{\logPr{\rhd \pi}{\psi}}
\end{align*}
where we exploited $-\deltai\leq-\delta$,
the induction hypothesis
$\sem{n}{\deltai}{-1}{\psi} \subseteq
\sem{n}{\delta}{-1}{\psi}$, the monotonicity
of $\Pr(-)$, and the fact that $\geq\circ\,\rhd \subseteq \rhd$.
For $\psi = {\sf X} \phi$, we can observe that
$\sem{n}{\delta}{r}{{\sf X} \phi}
= f(\sem{n}{\delta}{r}{\phi})$
where $f$ is a monotonic function mapping sets of states
to sets of traces, which does not depend on $\delta,r,n$.
Hence, the thesis follows from the set inclusions about
the semantics of $\phi$ in the induction hypothesis.
Similarly, for $\psi = \phi_1 {\sf U} \phi_2$, we can
observe that
\(
\sem{n}{\delta}{r}{\phi_1 {\sf U} \phi_2} =
g_n(\sem{n}{\delta}{r}{\phi_1},
\sem{n}{\delta}{r}{\phi_2})
\)
where $g_n$ is a monotonic
function mapping pairs of sets of states to sets of traces, which
does not depend on $\delta,r$ (but only on $n$).
Hence, the thesis follows from the set inclusions about the
semantics of $\phi_1$ and $\phi_2$ in the induction
hypothesis.
\qed
\end{proofof}
\begin{proofof}{Lemma}{lem:sim:monotonicity}
The statement follows by induction on $n-ni$
from the following properties:
\begin{align}
\ellel{eq:sim:monotonicity:1}
& \delta \leq \deltai
\;\land\;
p \crysim{n}{\delta} q
\implies
p \crysim{n}{\deltai} q
\\
\ellel{eq:sim:monotonicity:2}
& p \crysim{n+1}{\delta} q
\implies
p \crysim{n}{\delta} q
\end{align}
To prove \eqref{eq:sim:monotonicity:1} we proceed by induction on
$n$.
In the base case $n = 0$ the thesis trivially follows by the
first case of Definition~\ref{def:param-bisim}.
For the inductive case, we assume \eqref{eq:sim:monotonicity:1}
holds for $n$, and prove it for $n+1$.
Therefore, we assume $p \crysim{n+1}{\delta} q$
and prove $p \crysim{n+1}{\deltai} q$.
To prove the thesis, we must show that all the items of
\autoref{def:param-bisim} hold.
Item~\eqref{def:param-bisim:a} directly follows from the
hypothesis.
For item~\eqref{def:param-bisim:b} we have
\[
\tsPr{p}{Q}
\leq \tsPr{q}{\cryset{n}{\delta}{Q}} + \delta
\leq \tsPr{q}{\cryset{n}{\deltai}{Q}} + \deltai
\]
where the first inequality follows from the hypothesis
$p \crysim{n+1}{\delta} q$, while the second one
follows from the induction hypothesis (which implies
$\cryset{n}{\delta}{Q} \subseteq
\cryset{n}{\deltai}{Q}$) and $\delta\leq\deltai$.
Item~\eqref{def:param-bisim:c} is analogous.
We now prove \eqref{eq:sim:monotonicity:2}, proceeding by induction
on $n$.
In the base case $n=0$, the thesis trivially follows by the
first case of~\autoref{def:param-bisim}.
For the inductive case, we assume the statement holds for $n$,
and we prove it for $n+1$.
Therefore, we assume $p \crysim{n+2}{\delta} q$
and prove $p \crysim{n+1}{\delta} q$.
To prove the thesis, we must show that all the items of
\autoref{def:param-bisim} hold.
Item~\eqref{def:param-bisim:a} directly follows from the hypothesis.
For item~\eqref{def:param-bisim:b} of the thesis we have
\[
\tsPr{p}{Q}
\leq \tsPr{q}{\cryset{n+1}{\delta}{Q}} + \delta
\leq \tsPr{q}{\cryset{n}{\delta}{Q}} + \delta
\]
where the first inequality follows from the hypothesis
$p \crysim{n+2}{\delta} q$, while the second one
follows from the induction hypothesis (which implies
$\cryset{n+1}{\delta}{Q} \subseteq
\cryset{n}{\delta}{Q}$).
Item~\eqref{def:param-bisim:c} is analogous.
\qed
\end{proofof}
\begin{samepage}
\begin{applemma}\ellel{lem:leq-eps-implies-leq}
Let $a,b \in \mathbb{R}$. If $\forall \epsilon > 0: a \leq b + \epsilon$ then $a \leq b$.
\end{applemma}
\begin{proof}
If $a>b$, taking $\epsilon=(a-b)/2$ contradicts the hypothesis.
\end{proof}
| 3,109 | 39,865 |
en
|
train
|
0.102.9
|
\end{samepage}
\begin{proofof}{Lemma}{lem:traces}
By Lemma~\ref{lem:sim:monotonicity} we have that $\stateP \CSim{m}{\delta} \stateQ$.
If $T$ is finite the thesis follows from Lemma~\ref{lem:finite-traces}.
If $T$ is infinite, it must be countable: this follows by the fact that
Markov chains states are countable and the length of the traces in $T$ is finite.
So, let $\tilde{t}_0 \tilde{t}_1 \hdots$ be an enumeration of $T$.
By definition of infinite sum, we have that:
\[
\prob{T}{} = \lim_{k \to \infty} {\sum_{i = 0}^k \prob{\tilde{t}_i}{}}
\]
By definition of limit of a sequence, we have that for all $\epsilon > 0$ there exists $v \in \mathbb{N}$ such that for all $k > v$:
\[
\abs{\prob{T}{} - \sum_{i = 0}^k \prob{\tilde{t}_i}{}} < \epsilon
\]
Since $\prob{\tilde{t}_i}{} \geq 0$ for all $i$, we can drop the absolute value and we get:
\begin{equation}\ellel{lem:traces:eq1}
\prob{T}{} - \sum_{i = 0}^k \prob{\tilde{t}_i}{} < \epsilon
\end{equation}
By Lemma~\ref{lem:leq-eps-implies-leq} it suffice to show $\prob{T}{} \leq \prob{\TR{m}{\delta,\stateQ}{T}}{} + \delta m + \epsilon$ for all $\epsilon > 0$, or equivalently:
\[
\prob{T}{} - \epsilon \leq \prob{\TR{m}{\delta,\stateQ}{T}}{} + \delta m
\]
So, let $\epsilon > 0$ and let $k$ be such that Lemma~\ref{lem:traces:eq1} holds.
Then we have:
\[
\prob{T}{} - \epsilon < \sum_{i = 0}^k \prob{\tilde{t}_i}{}
\]
Let $T' = \setcomp{\tilde{t}_i}{i \leq k}$.
Since $\sum_{i = 0}^k \prob{\tilde{t}_i}{} = \prob{T'}{}$ and $T'$ is finite, by Lemma~\ref{lem:finite-traces} we have:
\[
\sum_{i = 0}^k \prob{\tilde{t}_i}{} \leq \prob{\TR{m}{\delta,\stateQ}{T'}}{} + \delta m
\]
Since $\TR{m}{\delta,\stateQ}{T'} \subseteq \TR{m}{\delta,\stateQ}{T}$ we have that:
\[
\prob{\TR{m}{\delta,\stateQ}{T'}}{} + \delta m \leq \prob{\TR{m}{\delta,\stateQ}{T}}{} + \delta m
\]
Summing up, we have that
$\prob{T}{} - \epsilon \leq \prob{\TR{m}{\delta,\stateQ}{T}}{} + \delta m$
for all $\epsilon > 0$.
By Lemma~\ref{lem:leq-eps-implies-leq} it follows that
$\prob{T}{} \leq \prob{\TR{m}{\delta,\stateQ}{T}}{} + \delta m$ as required.
\qed
\end{proofof}
\begin{proofof}{Lemma}{lem:matching}
Without loss of generality, we prove the statement under the following additional assumptions:
\begin{align}
\ellel{eq:matching-aux2}
& \forall b \in B: f_B(b) > 0
\\
\ellel{eq:matching-aux1}
& \forall b \in B: \setcomp{a \in A}{b \in g(a)} \neq \emptyset \qquad \text{and}
\\
\nonumber
& \qquad \forall b_1,b_2 \in B: \setcomp{a \in A}{b_1 \in g(a)} = \setcomp{a \in A}{b_2 \in g(a)} \implies b_1 = b_2
\end{align}
If $B$ does not satisfy \autoref{eq:matching-aux2}, just remove from $B$ the elements $b$ such that $f_B(b) = 0$ adjust $g$ accordingly, and set $h(a,b) = 0$.
\autoref{eq:matching-assumption} still holds since we removed only elements whose value is zero.
If $B$ does not satisfy \autoref{eq:matching-aux1}, it can be transformed to a set that does. To see why,
let $\equiv \subseteq B \times B$ be defined as:
\[
b \equiv b'
\text{ iff }
\setcomp{a \in A}{b \in g(a)} = \setcomp{a \in A}{b' \in g(a)}
\]
Let $\hat{B}$ be the set of equivalence classes w.r.t.\@\xspace $\equiv$.
For an equivalence class $[b]$, define:
\[
f_{\hat{B}}([b]) = \sum_{b' \in [b]}{f_B(b')}
\qquad\qquad
g'(a) = \setcomp{[b]}{b \in g(a)}
\]
It is easy to verify that~\eqref{eq:matching-aux1} is satisfied.
Notice that $\sum_{[b] \in g'(a)} f_{\hat{B}}([b])$ converges, since:
\[
\sum_{[b] \in g'(a)} f_{\hat{B}}([b]) = \sum_{[b] \in g'(a)} \sum_{b' \in [b]}f_{B}(b) = \sum_{b \in g(a)}f_{B}(b)
\]
We now show that $A,\hat{B}$ and $g'$ satisfy \autoref{eq:matching-assumption}. We have that, for all $b \in B$,
$f_B(b) \leq f_{\hat{B}}([b])$ and $b \in g(a) \implies [b] \in g'(a)$.
Therefore, for all $A' \subseteq A$:
\[
\sum_{a \in A'} f_A(a) \leq \sum_{b \in \bigcup_{a \in A'} g(a)} f_B(b) \leq \sum_{[b] \in \bigcup_{a \in A'} g'(a)} f_{\hat{B}}([b])
\]
From a function $h'$ satisfying
\autoref{eq:matching-thesis:1} and~\autoref{eq:matching-thesis:2}
for $A, \hat{B}$
and $g'$ we can easily obtain a function $h$ for $A, B$ and $g$:
e.g.\@\xspace, set $h(a,b) = h'(a,[b])\frac{f_B(b)}{f_{\hat{B}}([b])}$.
Notice that $f_{\hat{B}}([b]) > 0$ by \autoref{eq:matching-aux2}, and
that if $B$ satisfies \autoref{eq:matching-aux1} it then holds that
$\card{B} < 2^{\card{A}}$, and so $B$ is finite. That said, we show
that the thesis holds by reducing to the max-flow problem~\cite{MinCut}.
Assume w.l.o.g.\ that $A$ and $B$ are disjoint.
Let $N = (V,E)$ be a directed graph, where
$V = A \cup B \cup \setenum{s,t}$ with $s,t \not\in A \cup B$ and:
\[
E = \setcomp{(s,b)}{b \in B} \cup \setcomp{(b,a)}{a \in A, b \in g(a)} \cup \setcomp{(a,t)}{a \in A}
\]
Define edge capacity $w: E \rightarrow \mathbb{R}_0^+ \cup \setenum{\infty}$ as follows:
\[
w(s,b) = f_B(b)
\qquad
w(b,a) = \infty
\qquad
w(a,t) = f_A(a)
\]
Consider the cut $C = \setcomp{(a,t)}{a \in A}$ associated with partition $(V \setminus \setenum{t},\setenum{t})$. Such cut has capacity
$\sum_{a \in A} f_A(a)$ and we argue it is minimum. Take a cut $C'$ of the network. First notice that if $C'$ contains edges of the form
$(b,a)$ its capacity would be infinite.
We can therefore consider only cuts whose elements
are of the form $(s,b)$ or $(a,t)$, and thus for all $a \in A$ we have that $a$ and the elements of $g(a)$ are in the same partition.
In other words, $s$ partition is of the form $A' \cup \bigcup_{a \in A'} g(a) \cup \setenum{s}$, $t$ partition is of the form
$A \setminus A' \cup \bigcup_{a \in (A \setminus A')} g(a) \cup \setenum{t}$, where $A' \subseteq A$.
So capacity of $C'$ is $\sum_{a \in A'} f_A(a) + \sum_{b \in g(A \setminus A')} f_B(b)$. Now, capacity of $C$ is
$\sum_{a \in A'} f_A(a) + \sum_{a \in (A \setminus A')} f_A(a)$. Since $\sum_{a \in (A \setminus A')} f_A(a) \leq \sum_{b \in g(A \setminus A')} f_B(b)$
by assumption \autoref{eq:matching-assumption}, we have that capacity of $C$ is minimal.
By the min-cut max-flow theorem \cite{MinCut},
we have that the max flow of the network has capacity $\sum_{a \in A} f_A(a)$.
Let $\mathit{flow}: E \rightarrow \mathbb{R}_0^+$ be the a flow associated to such cut.
Consequently, we have that $\mathit{flow}(a,t) = f_A(a)$ for all $a \in A$.
Define:
\[
h(a,b) =
\begin{cases}
\frac{\mathit{flow}(b,a)}{f_B(b)} & \text{ if } b \in g(a)\\
0 & \text{ otherwise}
\end{cases}
\]
We have to show that $h$ satisfies
\autoref{eq:matching-thesis:1} and~\autoref{eq:matching-thesis:2}.
Let $A' \subseteq A$. We have that:
\begin{align*}
\sum_{a \in A'} \sum_{b \in g(a)} h(a,b) f_B(b)
& = \sum_{a \in A'} \sum_{b \in g(a)} \frac{\mathit{flow}(b,a)}{f_B(b)} f_B(b)
\\
& = \sum_{a \in A'} \sum_{b \in g(a)} \mathit{flow}(b,a)
\end{align*}
By the conservation of flow constraint, we have that:
\begin{align*}
\sum_{a \in A'} \sum_{b \in g(a)} \mathit{flow}(b,a)
& =
\sum_{a \in A'} \mathit{flow}(a,t)
\\
& =
\sum_{a \in A'} f_A(a)
\end{align*}
So summing up we have that:
\[
\sum_{a \in A'} \sum_{b \in g(a)} h(a,b) f_B(b) = \sum_{a \in A'} f_A(a)
\]
For the remaining part, let $b \in B$. We have that:
\begin{align*}
\sum_{a \in A} h(a,b)
& = \sum_{a \in \setcomp{a'}{b \in g(a')}} h(a,b)
\\
& = \sum_{a \in \setcomp{a'}{b \in g(a')}} \frac{\mathit{flow}(b,a)}{f_B(b)}
\\
& = \frac{1}{f_B(b)} {\sum_{a \in \setcomp{a'}{b \in g(a')}} \mathit{flow}(b,a)}
\\
& \leq\; \frac{f_B(b)}{f_B(b)}
\\
& = \; 1
\tag*{\qed
| 3,334 | 39,865 |
en
|
train
|
0.102.10
|
}
\end{align*}
\end{proofof}
\begin{proofof}{Lemma}{lem:finite-traces}
By induction on $n$.
The base case ($n = 1$) is trivial as $T = \{\stateP\}$ and $\TR{n}{\delta,\stateQ}{T} = \{\stateQ\}$,
or $T = \emptyset$ and $\TR{n}{\delta,\stateQ}{T} = \emptyset$.
Therefore,
$\prob{T}{} = \prob{\TR{n}{\delta,\stateQ}{T}}{} = \card{T}$.
For the inductive case, first notice that:
\[
\prob{T}{}\;\;= \;\;
\sum_{\tilde{t} \in T} \prob{\stateP}{\tilde{t}(1)}\prob{\tilde{t}(1 .. n - 1)}{}
\]
Referring to Lemma~\ref{lem:matching},
let $A = \setcomp{\tilde{t}(1)}{\tilde{t} \in T}$,
$B = \setcomp{\stateQi}{\statePi \CSim{n - 1}{\delta} \stateQi \text{ for some } \statePi \in A} \cup \setenum{D}$,
where $D$ is a special element not occurring in $A \cup B$.
Let $f_A(\statePi) = \prob{\stateP}{\statePi}$, $f_B(\stateQi) = \prob{\stateQ}{\stateQi}$ and
$f_B(D) = \delta$.
Finally, let $g(\statePi) = \;\R{n-1}{\delta}{\statePi} \cup \setenum{D}$.
By~\autoref{def:param-bisim},
we have that $A, B, f_A, f_B$ and $g$ satisfy
\autoref{eq:matching-assumption} of \autoref{lem:matching}.
Indeed, for all $A' \subseteq A$, we have that:
\[
\sum_{a \in A'} f_A(a) = \prob{\stateP}{A'} \leq \prob{\stateQ}{\cryset{n - 1}{\delta}{A'}} + \delta = \sum_{b \in \bigcup_{a \in A'} g(a)} f_B(b)
\]
We can then conclude that there exist $h$ such that,
for all $A' \subseteq A$:
\[
\prob{\stateP}{A'} = \sum_{\statePi \in A'} \Big( h(\statePi,D) \delta \;\;+
\sum_{\stateQi \in \R{n-1}{\delta}{\statePi}} h(\statePi,\stateQi)\prob{\stateQ}{\stateQi}\Big)
\]
Let $T_{P} = \setcomp{\tilde{t}(1..n - 1)}{\tilde{t} \in T \,\land\, \tilde{t}(1) \in P}$
where $P \subseteq A$.
We simply write $T_{\statePi}$ if $P = \{\statePi\}$.
So, we have that:
\begin{align*}
\prob{T}{}
& \;\;=\;\;\sum_{\tilde{t} \in T} \prob{\stateP}{\tilde{t}(1)} \prob{\tilde{t}(1.. n - 1)}{}
\\
& \;\;= \;\; \sum_{\statePi \in A} \prob{\stateP}{\statePi}\prob{T_{\statePi}}{}
\\
& \;\;= \;\; \sum_{\statePi \in A} \prob{T_{\statePi}}{}\Big(h(\statePi,D) \delta \;\;+ \sum_{\stateQi \in \R{n-1}{\delta}{\statePi}} h(\statePi,\stateQi)\prob{\stateQ}{\stateQi}\Big)
\\
& \;\;\leq \;\; \delta + \sum_{\statePi \in A} \prob{T_{\statePi}}{}\sum_{\stateQi \in \R{n-1}{\delta}{\statePi}} h(\statePi,\stateQi)\prob{\stateQ}{\stateQi}
\\
& \;\;= \;\; \delta + \sum_{\statePi \in A} \sum_{\stateQi \in \R{n-1}{\delta}{\statePi}} h(\statePi,\stateQi)\prob{\stateQ}{\stateQi}\prob{T_{\statePi}}{}
\\
& \;\;\leq \;\; \delta + \sum_{\statePi \in A} \sum_{\stateQi \in \R{n-1}{\delta}{\statePi}} h(\statePi,\stateQi)\prob{\stateQ}{\stateQi}\Big(\prob{\TR{n-1}{\delta,\stateQi}{T_{\statePi}}}{} + \delta(n - 1)\Big)
\\
& \;\;= \;\; \delta + s_1 + s_2
\end{align*}
where:
\begin{align*}
s_1 & = \sum_{\statePi \in A} \sum_{\stateQi \in \R{n-1}{\delta}{\statePi}} h(\statePi,\stateQi)\prob{\stateQ}{\stateQi} \delta (n - 1)
\\
s_2 & = \sum_{\statePi \in A} \sum_{\stateQi \in \R{n-1}{\delta}{\statePi}} h(\statePi,\stateQi)\prob{\stateQ}{\stateQi}\prob{\TR{n-1}{\delta,\stateQi}{T_{\statePi}}}{}
\end{align*}
Now:
\begin{align*}
s_1
& = \delta (n - 1)\sum_{\statePi \in A} \sum_{\stateQi \in \R{n-1}{\delta}{\statePi}} h(\statePi,\stateQi)\prob{\stateQ}{\stateQi}
\\
& \leq \delta (n - 1)\prob{\stateP}{A}
\\
& \leq \delta (n - 1)
\end{align*}
Therefore $\delta + s_1 \leq \delta n$.
It remains to show that $s_2 \leq \prob{\TR{n}{\delta,\stateQ}{T}}{}$.
First notice that $s_2$ can be rewritten as follows by a simple reordering of terms:
\[
s_2 = \sum_{\stateQi \in \R{n - 1}{\delta}{A}} \sum_{\statePi \in A \cap \R{n - 1}{\delta}{\stateQi}} h(\statePi,\stateQi)\prob{\stateQ}{\stateQi}\prob{\TR{n-1}{\delta,\stateQi}{T_{\statePi}}}{}
\]
So:
\begin{align*}
s_2
& = \sum_{\stateQi \in \R{n - 1}{\delta}{A}}
\quad \sum_{\statePi \in A \cap \R{n - 1}{\delta}{\stateQi}}
h(\statePi,\stateQi)\prob{\stateQ}{\stateQi}\prob{\TR{n-1}{\delta,\stateQi}{T_{\statePi}}}{}
\\
& \leq \sum_{\stateQi \in \R{n - 1}{\delta}{A}}
\quad \sum_{\statePi \in A \cap \R{n - 1}{\delta}{\stateQi}}
h(\statePi,\stateQi)\prob{\stateQ}{\stateQi}
\prob{\TR{n-1}{\delta,\stateQi}{T_{A \cap \R{n - 1}{\delta}{\stateQi}}}}{}
\\
& \leq \sum_{\stateQi \in \R{n - 1}{\delta}{A}} \prob{\stateQ}{\stateQi}\prob{\TR{n-1}{\delta,\stateQi}{T_{A \cap \R{n - 1}{\delta}{\stateQi}}}}{}
\sum_{\statePi \in A \cap \R{n - 1}{\delta}{\stateQi}} h(\statePi,\stateQi)
\\
& \leq \sum_{\stateQi \in \R{n - 1}{\delta}{A}} \prob{\stateQ}{\stateQi}\prob{\TR{n-1}{\delta,\stateQi}{T_{A \cap \R{n - 1}{\delta}{\stateQi}}}}{}
\\
& = \prob{\TR{n}{\delta,\stateQ}{T}}{}
\end{align*}
The last equality follows by partitioning $\TR{n}{\delta,\stateQ}{T}$
according to the second state of each trace $\stateQi$. The set of all
such second states is the set of those bisimilar to (some state of)
$A$, namely $\R{n - 1}{\delta}{A}$.
Given any such $\stateQi$, the probability of its partition is
$\prob{\stateQ}{\stateQi}\prob{U_{\stateQi}}{}$ where $U_{\stateQi}$ is the set of
the \emph{tails} of $\TR{n}{\delta,\stateQ}{T}$ starting from $\stateQi$.
Since this set is defined taking pointwise bisimilar traces, we can
equivalently express $U_{\stateQi}$ by first taking the tails of
$T$ (i.e., $T_A$), and then considering the bisimilar traces:
in other words, we have $U_{\stateQi} = \TR{n-1}{\delta,\stateQi}{T_{A}}$.
Note that the states in $A$ which are not bisimilar to $\stateQi$ do not
contribute to $\TR{n-1}{\delta,\stateQi}{T_{A}}$ in any way, so we can also
write the desired
$U_{\stateQi} = \TR{n-1}{\delta,\stateQi}{T_{A \cap \R{n - 1}{\delta}{\stateQi}}}$.
\qed
\end{proofof}
\begin{applemma}\ellel{lem:logic-finite-traces-next}
Let $T = \setcomp{t}{t(0) = \stateP \,\land\, t \sat{\delta}{n}{r} \logNext \sFormula}$ for some $\stateP,\sFormula$, and let $m \geq 2$. Then:
\[
\prob{T}{} = \prob{\setcomp{\tilde{t}}{\card{t} = m \,\land\, \tilde{t}(0) = \stateP \,\land\, \tilde{t} \sat{\delta}{n}{r} \logNext \sFormula}}{}
\]
\end{applemma}
\begin{proof}
Trivial.
\end{proof}
| 2,668 | 39,865 |
en
|
train
|
0.102.11
|
\begin{applemma}\ellel{lem:logic-finite-traces}
Let $T = \setcomp{t}{t(0) = \stateP \,\land\, t \sat{\delta}{n}{r} \sFormula[1] \logUntil \sFormula[2]}$ for some
$\stateP,\sFormula[1], \sFormula[2]$, and let $m \geq n + 1$. Then:
\[
\prob{T}{}
=
\prob{\setcomp{\tilde{t}}{\card{\tilde{t}} = m \,\land\, \tilde{t}(0) = \stateP \,\land\, \tilde{t} \sat{\delta}{n}{r} \sFormula[1] \logUntil \sFormula[2]}}{}
\]
\end{applemma}
\begin{proof}
(Sketch)
Let $\tilde{T} = \setcomp{\tilde{t}}{\card{\tilde{t}} = m \,\land\, \tilde{t}(0) = \stateP \,\land\, \tilde{t} \sat{\delta}{n}{r} \sFormula[1] \logUntil \sFormula[2]}$.
The thesis follows from the fact that $T = \bigcup_{\tilde{t} \in \tilde{T}}{\cyl{\tilde{t}}}$.
\end{proof}
| 332 | 39,865 |
en
|
train
|
0.102.12
|
\noindent
For notational convenience, hereafter we will often write
$q \sat{\delta}{n}{r} \phi$ instead of
$q \in \sem{n}{\delta}{r}{\phi}$.
\begin{applemma}\ellel{lem:bisimi-implies-prop-preserv}
Let $k$ and $n$ be, respectively, the maximum nesting level of $\logUntil$ and of $\logNext$ in $\sFormula$, and let
$\stateP \CSim{mk + n + 1}{\delta_1} \stateQ$. Then:
\begin{enumerate}
\item \ellel{lem:bisimi-implies-prop-preserv:item1}
$\stateP \sat{\delta_2}{m}{+1} \sFormula \implies \stateQ \sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \sFormula$
\item \ellel{lem:bisimi-implies-prop-preserv:item2}
$\stateP \not\sat{\delta_2}{m}{-1} \sFormula \implies \stateQ \not\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{-1} \sFormula$
\end{enumerate}
\end{applemma}
\begin{proof}
By induction on $\sFormula$. The cases $\logTrue$ and $\atomA$ are trivial.
\begin{itemize}
\item $\mathtt{\neg} \sFormulai$.
We only show \autoref{lem:bisimi-implies-prop-preserv:item1} as the other item is similar.
So, suppose $\stateP \sat{\delta_2}{m}{+1} \mathtt{\neg} \sFormulai$.
Then, $\stateP \not\sat{\delta_2}{m}{-1} \sFormula$.
By the induction hypothesis we have that
$\stateQ \not\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{-1} \sFormula$, and
hence $\stateQ \sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \mathtt{\neg} \sFormulai$ as required.
\item $\sFormula[1] \,\mathtt{\land}\, \sFormula[2]$.
We only show \autoref{lem:bisimi-implies-prop-preserv:item1} as the other item is similar.
So, suppose $\stateP \sat{\delta_2}{m}{+1} \sFormula[1] \,\mathtt{\land}\, \sFormula[2]$. Then $\stateP \sat{\delta_2}{m}{+1} \sFormula[1]$ and $\stateP \sat{\delta_2}{m}{+1} \sFormula[2]$.
By the induction hypothesis $\stateQ \sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \sFormula[1]$ and
$\stateQ \sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \sFormula[2]$.
Therefore
$\stateQ \sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \sFormula[1] \,\mathtt{\land}\, \sFormula[2]$ as required.
\item $\logPr{\rhd \pi}{\pFormula}$.
For \autoref{lem:bisimi-implies-prop-preserv:item1},
suppose that $\stateP \sat{\delta_2}{m}{+1} \logPr{\rhd \pi}{\pFormula}$.
We only deal with the case $\rhd = \,\geq$,
since the case $\rhd = \, >$ is analogous.
Let:
\[
T = \setcomp{\tilde{t}}{\card{\tilde{t}} = mk + n + 1 \,\land\, \tilde{t}(0) = \stateP \,\land\, \tilde{t} \sat{\delta_2}{m}{+1} \pFormula}{}
\]
We start by proving that:
\begin{equation}\ellel{lem:bisimi-implies-prop-preserv:eq1}
\forall \tilde{u} \in \TR{mk + n + 1}{\delta_1,\stateQ}{T}
\;\; : \;\;
\tilde{u} \sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \pFormula
\end{equation}
Let $\tilde{u} \in \TR{m k + n + 1}{\delta_1,\stateQ}{T}$. Then, there is $\tilde{t} \in T$ such that,
for all $0 \leq i < mk + n + 1$:
\[\tilde{t}(i)
\CSim{mk + n + 1-i}{\delta_1} \tilde{u}(i)\]
We proceed by cases on $\pFormula$.
\begin{itemize}
\item
$\sFormula[1] \logUntil \sFormula[2]$.
First notice that $mk + n + 1 \geq m + 1$, and hence by \autoref{lem:logic-finite-traces} we have that:
\[\prob{T}{} = \prob{\setcomp{t}{t(0) = \stateP \,\land\, t \sat{\delta_2}{m}{+1} \sFormula[1] \logUntil \sFormula[2]}}{}\]
We then have $\prob{T}{} + \delta_2 \geq \pi$.
Since $\tilde{t} \sat{\delta_2}{m}{+1} \sFormula[1] \logUntil \sFormula[2]$, we have that:
\[
\exists i \leq m:
\tilde{t}(i) \sat{\delta_2}{m}{+1} \sFormula[2] \,\land\, \forall j < i: \tilde{t}(j) \sat{\delta_2}{m}{+1} \sFormula[1]
\]
Let $n'$ be the maximum nesting level of $\logNext$ in $\sFormula[2]$.
We know that:
\[
\tilde{t}(i) \CSim{mk + n + 1-i}{\delta_1} \tilde{u}(i) \,\land\, mk + n + 1 - i > m(k - 1) + n' + 1
\]
Then, by Lemma~\ref{lem:sim:monotonicity}
(monotonicity of $\CSim{}{}$), we have that:
\[
\tilde{t}(i) \CSim{m(k - 1) + n' + 1}{\delta_1} \tilde{u}(i)
\]
Then, by the induction hypothesis, we have that:
\[
\tilde{u}(i) \sat{\delta_2 + \delta_1(m(k - 1) + n' + 1)}{m}{+1} \sFormula[2]
\]
By Lemma~\ref{lem:pctl:monotonicity} (monotonicity of $\sat{}{}{}$)
it follows that:
\[
\tilde{u}(i) \sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \sFormula[2]
\]
With a similar argument we can conclude that, for all $j < i$:
\[
\tilde{u}(j) \sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \sFormula[1]
\]
Hence \autoref{lem:bisimi-implies-prop-preserv:eq1} holds.
\item
$\logNext \sFormula[1]$.
First notice that $mk + n + 1 \geq 2$, and hence by \autoref{lem:logic-finite-traces-next} we have that:
\[
\prob{T}{} = \prob{\setcomp{t}{t(0) = \stateP \,\land\, t \sat{\delta_2}{m}{+1} \logNext \sFormula[1]}}{}
\]
Then, $\prob{T}{} + \delta_2 \geq \pi$.
Since $\tilde{t} \sat{\delta_2}{m}{+1} \logNext \sFormula[1]$, we have that
\( \tilde{t}(1) \sat{\delta_2}{m}{+1} \sFormula[1] \).
We know that
\(
\tilde{u}(1) \CSim{mk + n}{\delta_1} \tilde{t}(1)
\).
By the induction hypothesis,
\(
\tilde{u}(1) \sat{\delta_2 + \delta_1(mk + n)}{m}{+1} \sFormula[1]
\).
By Lemma~\ref{lem:pctl:monotonicity} (monotonicity of $\sat{}{}{}$)
it follows that:
\( \tilde{u}(i) \sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \sFormula[1] \).
Hence, \eqref{lem:bisimi-implies-prop-preserv:eq1} holds.
\end{itemize}
Back to the main statement, we have that, by Lemma~\ref{lem:traces}:
\[
\prob{\TR{mk + n + 1}{\delta_1,\stateQ}{T}}{} + \delta_2 + \delta_1 (mk + n + 1)
\geq
\prob{T}{} + \delta_2
\]
So, summing up:
\begin{align*}
& \prob{\setcomp{t}{t(0) = \stateQ \;\land\; t\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \pFormula}}{} + \delta_2 + \delta_1 (mk + n + 1)
\\
= \; & \prob{\setcomp{\tilde{t}}{\card{\tilde{t}} = mk + n + 1 \;\land\;
\tilde{t}(0) = \stateQ \;\land\; \tilde{t}\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{+1} \pFormula}}{}
\\
& \;\;\;\; + \delta_2 + \delta_1 (mk + n + 1) \\
\geq \; &
\prob{\TR{mk + n + 1}{\delta_1,\stateQ}{T}}{} + \delta_2 + \delta_1 (mk + n + 1)
\\
\geq \; & \prob{T}{} + \delta_2
\\
\geq \; & \pi
\end{align*}
Therefore,
$\stateQ \sat{\delta_2 + \delta_1(mk + n)}{m}{+1} \logPr{\geq \pi}{\pFormula}$.
For \autoref{lem:bisimi-implies-prop-preserv:item2}, suppose that
$\stateP \not\sat{\delta_2}{m}{-1} \logPr{\geq \pi}{\pFormula}$.
Then:
\[
\prob{\setcomp{t}{t(0) = \stateP \,\land\, t \sat{\delta_2}{m}{-1} \pFormula}{}}{} - \delta_2 < \pi
\]
From the above, by a case analysis on $\pFormula$, and exploiting
\autoref{lem:logic-finite-traces} and \autoref{lem:logic-finite-traces-next},
we conclude that $\prob{T}{} - \delta_2 < \pi$, where:
\[
T = \setcomp{\tilde{t}}{\card{\tilde{t}} = mk + n + 1 \,\land\, t(0) = \stateP \,\land\, t \sat{\delta_2}{m}{-1}
\pFormula}
\]
Let:
\[\bar T = \setcomp{\tilde{t}}{\card{\tilde{t}} = mk + n + 1 \,\land\, t(0) = \stateP \,\land\, \tilde{t} \not\sat{\delta_2}{m}{-1}
\pFormula}{}\]
We have that $1 - \prob{\bar T}{} = \prob{T}{}$.
We start by proving that:
\[
\forall \tilde{u} \in \TR{mk + n + 1}{\delta_1,\stateQ}{\bar T}
\;\; : \;\;
\tilde{u} \not\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{-1} \pFormula
\]
Let $\tilde{u} \in \TR{mk + n + 1}{\delta_1,\stateQ}{\bar T}$.
Then, there exist $\tilde{t} \in \bar{T}$ such that,
for all $0 \leq i < mk + n + 1$:
\[\tilde{t}(i) \CSim{mk + n-i}{\delta_1} \tilde{u}(i)\]
We proceed by cases on $\pFormula$.
\begin{itemize}
\item $\sFormula[1] \logUntil \sFormula[2]$.
Since $\tilde{t} \not\sat{\delta_2}{m}{-1} \sFormula[1] \logUntil \sFormula[2]$, we have that:
\[
\forall i \leq m: \tilde{t}(i) \not\sat{\delta_1}{m}{-1} \sFormula[2] \lor \exists j < i: \tilde{t}(j) \not\sat{\delta_2}{m}{-1} \sFormula[1]
\]
Take $i \leq m$.
Let $n'$ be the maximum nesting level of $\logNext$ in $\sFormula[2]$.
If $\tilde{t}(i) \not\sat{\delta_1}{m}{-1} \sFormula[2]$, since
\[
\tilde{t}(i) \CSim{mk + n + 1 - i}{\delta_1} \tilde{u}(i) \,\land\, mk + n + 1 - i > m(k - 1) + n' + 1
\]
by Lemma~\ref{lem:sim:monotonicity} (monotonicity of $\CSim{}{}$)
we have that:
\[
\tilde{t}(i) \CSim{m(k - 1) + n' + 1}{\delta_1} \tilde{u}(i)
\]
By the induction hypothesis we have that:
\[
\tilde{u}(i) \not\sat{\delta_2 + \delta_1(m(k - 1) + n' + 1)}{m}{-1} \sFormula[2]
\]
By Lemma~\ref{lem:pctl:monotonicity} (monotonicity of $\sat{}{}{}$) it follows:
\[
\tilde{u}(i) \not\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{-1} \sFormula[2]
\]
If $\tilde{t}(j) \not\sat{\delta_1}{m}{-1} \sFormula[1]$ for
some $j < i$, with a similar argument we can conclude that:
\[
\tilde{u}(j) \not\sat{\delta_2 + \delta_1(m(k - 1) + n + 1)}{m}{-1} \sFormula[1]
\]
| 3,996 | 39,865 |
en
|
train
|
0.102.13
|
For \autoref{lem:bisimi-implies-prop-preserv:item2}, suppose that
$\stateP \not\sat{\delta_2}{m}{-1} \logPr{\geq \pi}{\pFormula}$.
Then:
\[
\prob{\setcomp{t}{t(0) = \stateP \,\land\, t \sat{\delta_2}{m}{-1} \pFormula}{}}{} - \delta_2 < \pi
\]
From the above, by a case analysis on $\pFormula$, and exploiting
\autoref{lem:logic-finite-traces} and \autoref{lem:logic-finite-traces-next},
we conclude that $\prob{T}{} - \delta_2 < \pi$, where:
\[
T = \setcomp{\tilde{t}}{\card{\tilde{t}} = mk + n + 1 \,\land\, t(0) = \stateP \,\land\, t \sat{\delta_2}{m}{-1}
\pFormula}
\]
Let:
\[\bar T = \setcomp{\tilde{t}}{\card{\tilde{t}} = mk + n + 1 \,\land\, t(0) = \stateP \,\land\, \tilde{t} \not\sat{\delta_2}{m}{-1}
\pFormula}{}\]
We have that $1 - \prob{\bar T}{} = \prob{T}{}$.
We start by proving that:
\[
\forall \tilde{u} \in \TR{mk + n + 1}{\delta_1,\stateQ}{\bar T}
\;\; : \;\;
\tilde{u} \not\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{-1} \pFormula
\]
Let $\tilde{u} \in \TR{mk + n + 1}{\delta_1,\stateQ}{\bar T}$.
Then, there exist $\tilde{t} \in \bar{T}$ such that,
for all $0 \leq i < mk + n + 1$:
\[\tilde{t}(i) \CSim{mk + n-i}{\delta_1} \tilde{u}(i)\]
We proceed by cases on $\pFormula$.
\begin{itemize}
\item $\sFormula[1] \logUntil \sFormula[2]$.
Since $\tilde{t} \not\sat{\delta_2}{m}{-1} \sFormula[1] \logUntil \sFormula[2]$, we have that:
\[
\forall i \leq m: \tilde{t}(i) \not\sat{\delta_1}{m}{-1} \sFormula[2] \lor \exists j < i: \tilde{t}(j) \not\sat{\delta_2}{m}{-1} \sFormula[1]
\]
Take $i \leq m$.
Let $n'$ be the maximum nesting level of $\logNext$ in $\sFormula[2]$.
If $\tilde{t}(i) \not\sat{\delta_1}{m}{-1} \sFormula[2]$, since
\[
\tilde{t}(i) \CSim{mk + n + 1 - i}{\delta_1} \tilde{u}(i) \,\land\, mk + n + 1 - i > m(k - 1) + n' + 1
\]
by Lemma~\ref{lem:sim:monotonicity} (monotonicity of $\CSim{}{}$)
we have that:
\[
\tilde{t}(i) \CSim{m(k - 1) + n' + 1}{\delta_1} \tilde{u}(i)
\]
By the induction hypothesis we have that:
\[
\tilde{u}(i) \not\sat{\delta_2 + \delta_1(m(k - 1) + n' + 1)}{m}{-1} \sFormula[2]
\]
By Lemma~\ref{lem:pctl:monotonicity} (monotonicity of $\sat{}{}{}$) it follows:
\[
\tilde{u}(i) \not\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{-1} \sFormula[2]
\]
If $\tilde{t}(j) \not\sat{\delta_1}{m}{-1} \sFormula[1]$ for
some $j < i$, with a similar argument we can conclude that:
\[
\tilde{u}(j) \not\sat{\delta_2 + \delta_1(m(k - 1) + n + 1)}{m}{-1} \sFormula[1]
\]
\item $\logNext \sFormula[1]$.
Since $\tilde{t} \not\sat{\delta_2}{m}{-1} \logNext \sFormula[1]$,
we have that:
\(
\tilde{t}(1) \not\sat{\delta_2}{m}{-1} \sFormula[1]
\).
Since $\tilde{t}(1) \CSim{mk + n}{\delta_1} \tilde{u}(1)$,
by the induction hypothesis we have
\( \tilde{u}(i) \not\sat{\delta_2 + \delta_1(mk + n)}{m}{-1} \sFormula[1] \).
By Lemma~\ref{lem:pctl:monotonicity} it follows that:
\[
\tilde{u}(i) \not\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{-1} \sFormula[1]
\]
\end{itemize}
Back to the main statement, by Lemma~\ref{lem:traces} we have that:
\[\prob{\bar T}{} \leq \prob{\TR{mk + n + 1}{\delta_1,\stateQ}{\bar T}}{} + \delta_1(mk + n + 1)\]
Summing up, we have that:
\begin{align*}
\hspace{-12pt}
& \prob{\setcomp{t}{t(0) = \stateQ \,\land\, t\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{-1} \pFormula}}{} - \delta_2 - \delta_1 (mk + n + 1)
\\
& = \prob{\setcomp{\tilde{t}}{\card{\tilde{t}} = \card{\tilde{t}} = mk + n + 1 \,\land\, \tilde{t}(0) = \stateQ \,\land\, \tilde{t}\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{-1} \pFormula}}{}
\\
& \hspace{12pt} - \delta_2 - \delta_1 (mk + n + 1)
\\
& = 1 - \prob{\setcomp{\tilde{t}}{\card{\tilde{t}} = mk + n + 1 \,\land\, \tilde{t}(0) = \stateQ \,\land\, \tilde{t}\not\sat{\delta_2 + \delta_1(mk + n + 1)}{m}{-1} \pFormula}}{}
\\
& \hspace{12pt} - \delta_2 - \delta_1 (mk + n + 1)
\\
& \leq
1 - \prob{\TR{mk + n}{\delta_1,\stateQ}{\bar T}}{} - \delta_2 - \delta_1 (mk + n + 1)
\\
& \leq
1 - \prob{\bar T}{} - \delta_2
\\
& = \prob{T}{} - \delta_2
<
\pi
\end{align*}
Therefore,
$\stateQ \not\sat{\delta_2 + \delta_1(mk + n)}{m}{-1} \logPr{\geq \pi}{\pFormula}$.
\qed
| 2,016 | 39,865 |
en
|
train
|
0.102.14
|
here
\end{itemize}
\end{proof}
| 14 | 39,865 |
en
|
train
|
0.102.15
|
\begin{proofof}{Theorem}{th:soundness}
Immediate consequence of~\autoref{lem:bisimi-implies-prop-preserv}.\qed
\
\end{proofof}
\end{document}
| 64 | 39,865 |
en
|
train
|
0.103.0
|
\begin{document}
\title[Kauffman-Vogel and Murakami-Ohtsuki-Yamada Polynomials]{On the Kauffman-Vogel and the Murakami-Ohtsuki-Yamada Graph Polynomials}
\author{Hao Wu}
\address{Department of Mathematics, The George Washington University, Monroe Hall, Room 240, 2115 G Street, NW, Washington DC 20052}
\email{[email protected]}
\subjclass[2000]{Primary 57M25}
\keywords{Kauffman polynomial, HOMFLY-PT polynomial}
\begin{abstract}
This paper consists of three parts.
First, we generalize the Jaeger Formula to express the Kauffman-Vogel graph polynomial as a state sum of the Murakami-Ohtsuki-Yamada graph polynomial.
Then, we demonstrate that reversing the orientation and the color of a MOY graph along a simple circuit does not change the $\mathfrak{sl}(N)$ Murakami-Ohtsuki-Yamada polynomial or the $\mathfrak{sl}(N)$ homology of this MOY graph. In fact, reversing the orientation and the color of a component of a colored link only changes the $\mathfrak{sl}(N)$ homology by an overall grading shift.
Finally, as an application of the first two parts, we prove that the $\mathfrak{so}(6)$ Kauffman polynomial is equal to the $2$-colored $\mathfrak{sl}(4)$ Reshetikhin-Turaev link polynomial, which implies that the $2$-colored $\mathfrak{sl}(4)$ link homology categorifies the $\mathfrak{so}(6)$ Kauffman polynomial.
\end{abstract}
\maketitle
\tableofcontents
| 447 | 69,513 |
en
|
train
|
0.103.1
|
\section{The Jaeger Formula of the Kauffman-Vogel Polynomial}\label{sec-Jaeger}
\subsection{The Kauffman and the HOMFLY-PT link polynomials} The Kauffman polynomial $P(K)(q,a)$ defined in \cite{Kauffman} is an invariant of unoriented framed link in $S^3$. Here, we use the following normalization of the Kauffman polynomial.
\begin{equation}\label{Kauffman-skein}
\begin{cases}
P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\circle{15}}
\end{picture}) = \frac{a-a^{-1}}{q-q^{-1}} +1 \\
P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\line(1,1){20}}
\put(-2,12){\line(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}) - P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,0){\line(-1,1){20}}
\put(2,12){\line(1,1){8}}
\put(-2,8){\line(-1,-1){8}}
\end{picture}) = (q-q^{-1})(P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}) - P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture})) \\
P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\line(1,1){12}}
\put(-2,12){\line(-1,1){8}}
\qbezier(2,12)(10,20)(10,10)
\qbezier(2,8)(10,0)(10,10)
\end{picture}) = a P(\setlength{\unitlength}{.75pt}
\begin{picture}(15,20)(-10,7)
\qbezier(-10,0)(10,10)(-10,20)
\end{picture})
\end{cases}
\end{equation}
The $\mathfrak{so}(N)$ Kauffman polynomial $P_{N}(K)(q)$ is defined to be the specialization
\begin{equation}\label{Kauffman-N-def}
P_{N}(K)(q)= P(K)(q,q^{N-1}).
\end{equation}
The HOMFLY-PT polynomial $R(K)(q,a)$ defined in \cite{HOMFLY,PT} is an invariant of oriented framed link in $S^3$. Here, we use the following normalization of the HOMFLY-PT polynomial.
\begin{equation}\label{HOMFLY-skein}
\begin{cases}
R(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\circle{15}}
\end{picture}) = \frac{a-a^{-1}}{q-q^{-1}} \\
R(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}) - R(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,0){\varepsilonctor(-1,1){20}}
\put(2,12){\varepsilonctor(1,1){8}}
\put(-2,8){\line(-1,-1){8}}
\end{picture}) = (q-q^{-1})R(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}) \\
R(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\line(1,1){12}}
\put(-2,12){\line(-1,1){8}}
\qbezier(2,12)(10,20)(10,10)
\qbezier(2,8)(10,0)(10,10)
\end{picture}) = a R(\setlength{\unitlength}{.75pt}
\begin{picture}(15,20)(-10,7)
\qbezier(-10,0)(10,10)(-10,20)
\end{picture})
\end{cases}
\end{equation}
The $\mathfrak{sl}(N)$ HOMFLY-PT polynomial $R_{N}(K)(q)$ is defined to be the specialization
\begin{equation}\label{HOMFLY-N-def}
R_{N}(K)(q)= R(K)(q,q^{N}).
\end{equation}
It is easy to renormalize $P(K)(q,a)$ and $R(K)(q,a)$ to make them invariant under Reidemeister move (I) too.
\subsection{The Jaeger Formula} The Jaeger Formula can be found in, for example, \cite{Ferrand, Kauffman-book}. Here, we give it a slightly different formulation.
Given an unoriented link diagram $D$, we call a segment of the link between two adjacent crossings an edge of this diagram $D$. An edge orientation of $D$ is an orientation of all the edges of $D$. We say that an edge orientation of $D$ is balanced if, at every crossing, two edges point inward and two edges point outward. Up to rotation, there are four possible balanced edge orientations near a crossing. (See Figure \ref{balanced-orientation-crossing-fig}.)
\begin{figure}
\caption{Balanced edge orientations near a crossing}
\label{balanced-orientation-crossing-fig}
\end{figure}
Denote by $\widetilde{\mathcal{O}}(D)$ the set of all balanced edge orientations of $D$. Equipping $D$ with $\varrho \in \widetilde{\mathcal{O}}(D)$, we get an edge-oriented diagram $D_\varrho$. We say that $\varrho$ is admissible if $D_\varrho$ does not contain a top inward crossing. We denote by $\mathcal{O}(D)$ the subset of $\widetilde{\mathcal{O}}(D)$ consisting of all admissible balanced edge orientations of $D$.
\begin{figure}
\caption{Resolutions of a top outward crossing}
\label{res-top-out-fig}
\end{figure}
For $\varrho \in \mathcal{O}(D)$, we allow the two resolutions in Figure \ref{res-top-out-fig} at each top outward crossing of $D_\varrho$. A resolution $\varsigma$ of $D_\varrho$ is a choice of $A$ or $B$ resolution of every top outward crossing of $D_\varrho$. Denote by $\Sigma(D_\varrho)$ the set of all resolutions of $D_\varrho$.
For each $\varsigma \in \Sigma(D_\varrho)$ and each top outward crossing $c$ of $D_\varrho$, we define a local weight
\begin{equation}\label{local-weight-crossing-Jaeger}
[D_\varrho,\varsigma;c]= \begin{cases}
q-q^{-1} & \text{if } \varsigma \text{ applies } A \text{ to } c, \\
-q+q^{-1} & \text{if } \varsigma \text{ applies } B \text{ to } c.
\end{cases}
\end{equation}
The total weight $[D_\varrho,\varsigma]$ of the resolution $\varsigma$ is defined to be
\begin{equation}\label{weight-link-Jaeger}
[D_\varrho,\varsigma]= \prod_c [D_\varrho,\varsigma;c],
\end{equation}
where $c$ runs through all top outward crossings of $D_\varrho$.
For $\varsigma \in \Sigma(D_\varrho)$, denote by $D_{\varrho,\varsigma}$ the oriented link diagram (in the usual sense) obtained by applying $\varsigma$ to $D_\varrho$. As an immersed curve in $\mathbb{R}^2$, $D_{\varrho,\varsigma}$ has a rotation number $\mathrm{rot}(D_{\varrho,\varsigma})$ (which is also known as the Whitney index or the degree of the Gauss map.)
The following is our formulation of the Jaeger Formula, which is easily shown to be equivalent to the Jaeger Formula given in \cite{Ferrand, Kauffman-book}.
\begin{equation}\label{eq-Jaeger-formula}
P(D)(q,a^2q^{-1}) = \sum_{\varrho \in \mathcal{O}(D)} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a).
\end{equation}
Plugging $a=q^N$ into the above formula, we get
\begin{equation}\label{eq-Jaeger-formula-N}
P_{2N}(D)(q) = \sum_{\varrho \in \mathcal{O}(D)} \sum_{\varsigma \in \Sigma(D_\varrho)} q^{-(N-1)\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R_N(D_{\varrho,\varsigma})(q).
\end{equation}
| 2,730 | 69,513 |
en
|
train
|
0.103.2
|
\subsection{The Kauffman-Vogel polynomial, the Murakami-Ohtsuki-Yamada polynomial and the Jaeger Formula}\label{subsec-Jaeger-graph} The first objective of the present paper is to generalize the Jaeger Formula \eqref{eq-Jaeger-formula} to express the Kauffman-Vogel polynomial as a state sum in terms of the Murakami-Ohtsuki-Yamada polynomial of (uncolored) oriented knotted $4$-valent graphs.
A knotted $4$-valent graph is an immersion of an abstract $4$-valent graph into $\mathbb{R}^2$ whose only singularities are finitely many crossings away from vertices. Here, a crossing is a transversal double point with one intersecting branch specified as upper and the other as lower. Two knotted $4$-valent graph are equivalent if they are isotopic to each other via a rigid vertex isotopy. (See \cite[Section 1]{KV} for the definition of rigid vertex isotopies.)
\begin{figure}
\caption{Vertex of an oriented knotted $4$-valent graph}
\label{oriented-vertex-fig}
\end{figure}
We say that a knotted $4$-valent graph is oriented if the underlying abstract $4$-valent graph is oriented in such a way that, up to rotation, very vertex in the knotted $4$-valent graph looks like the one in Figure \ref{oriented-vertex-fig}. We say that a knotted $4$-valent graph is unoriented if the underlying abstract $4$-valent graph is unoriented. Note that some orientations of the underlying abstract $4$-valent graph do not give rise to orientations of the knotted $4$-valent graph.
The Kauffman-Vogel polynomial $P(D)(q,a)$ defined in \cite{KV} is an invariant of unoriented knotted $4$-valent graphs under regular rigid vertex isotopy. It is defined by the skein relations \eqref{Kauffman-skein} of the Kauffman polynomial plus the following additional relation.
\begin{equation}\label{Kauffman-skein-vertex}
P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\line(1,1){10}}
\put(-10,0){\line(1,1){10}}
\put(0,10){\line(-1,1){10}}
\put(10,0){\line(-1,1){10}}
\end{picture})= - P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\line(1,1){20}}
\put(-2,12){\line(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}) + q P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}) +q^{-1} P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture}) = - P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,0){\line(-1,1){20}}
\put(2,12){\line(1,1){8}}
\put(-2,8){\line(-1,-1){8}}
\end{picture}) + q^{-1} P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}) + q P(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture}).
\end{equation}
The $\mathfrak{so}(N)$ Kauffman-Vogel polynomial $P_{N}(D)(q)$ is defined to be the specialization
\begin{equation}\label{Kauffman-Vogel-2N-def}
P_{N}(D)(q)= P(D)(q,q^{N-1}).
\end{equation}
The Murakami-Ohtsuki-Yamada polynomial\footnote{For oriented knotted $4$-valent graphs, the Murakami-Ohtsuki-Yamada polynomial was first defined by Kauffman and Vogel \cite{KV}. Murakami, Ohtsuki and Yamada \cite{MOY} generalized it to knotted MOY graphs and used it to recover the Reshetikhin-Turaev $\mathfrak{sl}(N)$ polynomial of links colored by wedge powers of the defining representation.} $R(D)(q,a)$ of oriented knotted $4$-valent graphs is an invariant under regular rigid vertex isotopy. It is defined by the skein relations \eqref{HOMFLY-skein} of the HOMFLY-PT polynomial plus the following additional relation.
\begin{equation}\label{HOMFLY-skein-vertex}
R(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\end{picture}) = - R(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}) + q R(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}) = - R(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,0){\varepsilonctor(-1,1){20}}
\put(2,12){\varepsilonctor(1,1){8}}
\put(-2,8){\line(-1,-1){8}}
\end{picture}) + q^{-1} R(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}).
\end{equation}
The $\mathfrak{sl}(N)$ Murakami-Ohtsuki-Yamada polynomial $R_{N}(D)(q)$ is defined to be the specialization
\begin{equation}\label{MOY-uncolored-N-def}
R_{N}(D)(q)= R(D)(q,q^{N}).
\end{equation}
From now on, we will refer to the Kauffman-Vogel polynomial as the KV polynomial and the Murakami-Ohtsuki-Yamada polynomial as the MOY polynomial.
Given a knotted $4$-valent graph $D$, we call a segment of $D$ between two adjacent vertices or crossings an edge. (An edge can have a crossing and a vertex as its end points. Note that an edge of the underlying abstract $4$-valent graph may be divided into several edges in $D$ by crossings.) An edge orientation of $D$ is an orientation of all the edges of $D$. We say that an edge orientation of $D$ is balanced if, at every crossing and every vertex, two edges point inward and two edges point outward. As before, up to rotation, there are four possible balanced edge orientations near a crossing. (See Figure \ref{balanced-orientation-crossing-fig}.) Up to rotation, there are two possible balanced edge orientations near a vertex. (See Figure \ref{balanced-orientation-vertex-fig}.)
\begin{figure}
\caption{Balanced edge orientations near a vertex}
\label{balanced-orientation-vertex-fig}
\end{figure}
Denote by $\widetilde{\mathcal{O}}(D)$ the set of all balanced edge orientations of $D$. Equipping $D$ with $\varrho \in \widetilde{\mathcal{O}}(D)$, we get an edge-oriented diagram $D_\varrho$. We say that $\varrho$ is admissible if $D_\varrho$ does not contain a top inward crossing. We denote by $\mathcal{O}(D)$ the subset of $\widetilde{\mathcal{O}}(D)$ consisting of all admissible balanced edge orientations of $D$.
For $\varrho \in \mathcal{O}(D)$, we allow the two resolutions in Figure \ref{res-top-out-fig} at each top outward crossing of $D_\varrho$ and the two resolutions in Figure \ref{res-non-crossing-vertex-fig} at each non-crossing-like vertex. A resolution $\varsigma$ of $D_\varrho$ is a choice of $A$ or $B$ resolution of every top outward crossing of $D_\varrho$ and $L$ or $R$ resolution of every non-crossing-like vertex. Denote by $\Sigma(D_\varrho)$ the set of all resolutions of $D_\varrho$.
\begin{figure}
\caption{Resolutions of a non-crossing-like vertex}
\label{res-non-crossing-vertex-fig}
\end{figure}
Fix a $\varsigma \in \Sigma(D_\varrho)$. For each top outward crossing $c$ of $D_\varrho$, the local weight $[D_\varrho,\varsigma;c]$ is defined as in \eqref{local-weight-crossing-Jaeger}. For each non-crossing-like vertex $v$, we define a local weight $[D_\varrho,\varsigma;v]$ by the following equation.
\begin{equation}\label{local-weight-vertex-Jaeger}
[D_\varrho,\varsigma;v]= \begin{cases}
q & \text{if } \varsigma \text{ applies } L \text{ to } v, \\
q^{-1} & \text{if } \varsigma \text{ applies } R \text{ to } v.
\end{cases}
\end{equation}
The total weight $[D_\varrho,\varsigma]$ of the resolution $\varsigma$ is defined to be
\begin{equation}\label{weight-graph-Jaeger}
[D_\varrho,\varsigma]= \left(\prod_c [D_\varrho,\varsigma;c]\right) \cdot \left(\prod_v [D_\varrho,\varsigma;v]\right),
\end{equation}
where $c$ runs through all top outward crossings of $D_\varrho$ and $v$ runs through all non-crossing-like vertices of $D_\varrho$.
The following theorem is our generalization of the Jaeger Formula to knotted $4$-valent graphs.
\begin{theorem}\label{thm-Jaeger-formula-graph}
\begin{equation}\label{eq-Jaeger-formula-graph}
P(D)(q,a^2q^{-1}) = \sum_{\varrho \in \mathcal{O}(D)} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a).
\end{equation}
Consequently, for $N\geq 1$,
\begin{equation}\label{eq-Jaeger-formula-N-graph}
P_{2N}(D)(q) = \sum_{\varrho \in \mathcal{O}(D)} \sum_{\varsigma \in \Sigma(D_\varrho)} q^{-(N-1)\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R_N(D_{\varrho,\varsigma})(q).
\end{equation}
\end{theorem}
\begin{remark}
Murakami, Ohtsuki and Yamada \cite{MOY} established a state sum formula for the $\mathfrak{sl}(N)$ MOY polynomial $R_N$. Combining that with \eqref{eq-Jaeger-formula-N-graph}, we get a state sum formula for the $\mathfrak{so}(2N)$ KV polynomial. Specially, note that the $\mathfrak{sl}(1)$ MOY polynomial of a $4$-valent graph $D$ \textbf{embedded} in $\mathbb{R}^2$ is simply given by
\begin{equation}\label{eq-sl-1}
R_1(D) = \begin{cases}
1 & \text{if } D \text{ has no vertex}, \\
0 & \text{otherwise}.
\end{cases}
\end{equation}
Using \eqref{eq-sl-1} and \eqref{eq-Jaeger-formula-N-graph}, it is straightforward to recover the formula of the $\mathfrak{so}(2)$ KV polynomial of a planar $4$-valent graph given by Carpentier \cite[Theorem 4]{Carpentier1} and Caprau, Tipton \cite[Theorem 4]{Caprau-Tipton}.
We would also like to point out that the concept of balanced edge orientation is implicit in \cite{Carpentier2}, in which Carpentier gave an alternative proof of \cite[Theorem 4]{Carpentier1}.
\end{remark}
\begin{proof}[Proof of Theorem \ref{thm-Jaeger-formula-graph}]
We prove Theorem \ref{thm-Jaeger-formula-graph} by inducting on the number of vertices in $D$. The proof comes down to a straightforward but rather lengthy tabulation of all admissible balanced edge orientations of $D$.
If $D$ contains no vertex, then \eqref{eq-Jaeger-formula-graph} becomes \eqref{eq-Jaeger-formula}, which is known to be true. Assume that \eqref{eq-Jaeger-formula-graph} is true if $D$ has at most $n-1$ vertices. Now let $D$ be a knotted $4$-valent graph with $n$ vertices.
\begin{figure}\label{D-hatD-DA-DB-fig}
\end{figure}
| 3,916 | 69,513 |
en
|
train
|
0.103.3
|
Denote by $\widetilde{\mathcal{O}}(D)$ the set of all balanced edge orientations of $D$. Equipping $D$ with $\varrho \in \widetilde{\mathcal{O}}(D)$, we get an edge-oriented diagram $D_\varrho$. We say that $\varrho$ is admissible if $D_\varrho$ does not contain a top inward crossing. We denote by $\mathcal{O}(D)$ the subset of $\widetilde{\mathcal{O}}(D)$ consisting of all admissible balanced edge orientations of $D$.
For $\varrho \in \mathcal{O}(D)$, we allow the two resolutions in Figure \ref{res-top-out-fig} at each top outward crossing of $D_\varrho$ and the two resolutions in Figure \ref{res-non-crossing-vertex-fig} at each non-crossing-like vertex. A resolution $\varsigma$ of $D_\varrho$ is a choice of $A$ or $B$ resolution of every top outward crossing of $D_\varrho$ and $L$ or $R$ resolution of every non-crossing-like vertex. Denote by $\Sigma(D_\varrho)$ the set of all resolutions of $D_\varrho$.
\begin{figure}
\caption{Resolutions of a non-crossing-like vertex}
\label{res-non-crossing-vertex-fig}
\end{figure}
Fix a $\varsigma \in \Sigma(D_\varrho)$. For each top outward crossing $c$ of $D_\varrho$, the local weight $[D_\varrho,\varsigma;c]$ is defined as in \eqref{local-weight-crossing-Jaeger}. For each non-crossing-like vertex $v$, we define a local weight $[D_\varrho,\varsigma;v]$ by the following equation.
\begin{equation}\label{local-weight-vertex-Jaeger}
[D_\varrho,\varsigma;v]= \begin{cases}
q & \text{if } \varsigma \text{ applies } L \text{ to } v, \\
q^{-1} & \text{if } \varsigma \text{ applies } R \text{ to } v.
\end{cases}
\end{equation}
The total weight $[D_\varrho,\varsigma]$ of the resolution $\varsigma$ is defined to be
\begin{equation}\label{weight-graph-Jaeger}
[D_\varrho,\varsigma]= \left(\prod_c [D_\varrho,\varsigma;c]\right) \cdot \left(\prod_v [D_\varrho,\varsigma;v]\right),
\end{equation}
where $c$ runs through all top outward crossings of $D_\varrho$ and $v$ runs through all non-crossing-like vertices of $D_\varrho$.
The following theorem is our generalization of the Jaeger Formula to knotted $4$-valent graphs.
\begin{theorem}\label{thm-Jaeger-formula-graph}
\begin{equation}\label{eq-Jaeger-formula-graph}
P(D)(q,a^2q^{-1}) = \sum_{\varrho \in \mathcal{O}(D)} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a).
\end{equation}
Consequently, for $N\geq 1$,
\begin{equation}\label{eq-Jaeger-formula-N-graph}
P_{2N}(D)(q) = \sum_{\varrho \in \mathcal{O}(D)} \sum_{\varsigma \in \Sigma(D_\varrho)} q^{-(N-1)\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R_N(D_{\varrho,\varsigma})(q).
\end{equation}
\end{theorem}
\begin{remark}
Murakami, Ohtsuki and Yamada \cite{MOY} established a state sum formula for the $\mathfrak{sl}(N)$ MOY polynomial $R_N$. Combining that with \eqref{eq-Jaeger-formula-N-graph}, we get a state sum formula for the $\mathfrak{so}(2N)$ KV polynomial. Specially, note that the $\mathfrak{sl}(1)$ MOY polynomial of a $4$-valent graph $D$ \textbf{embedded} in $\mathbb{R}^2$ is simply given by
\begin{equation}\label{eq-sl-1}
R_1(D) = \begin{cases}
1 & \text{if } D \text{ has no vertex}, \\
0 & \text{otherwise}.
\end{cases}
\end{equation}
Using \eqref{eq-sl-1} and \eqref{eq-Jaeger-formula-N-graph}, it is straightforward to recover the formula of the $\mathfrak{so}(2)$ KV polynomial of a planar $4$-valent graph given by Carpentier \cite[Theorem 4]{Carpentier1} and Caprau, Tipton \cite[Theorem 4]{Caprau-Tipton}.
We would also like to point out that the concept of balanced edge orientation is implicit in \cite{Carpentier2}, in which Carpentier gave an alternative proof of \cite[Theorem 4]{Carpentier1}.
\end{remark}
\begin{proof}[Proof of Theorem \ref{thm-Jaeger-formula-graph}]
We prove Theorem \ref{thm-Jaeger-formula-graph} by inducting on the number of vertices in $D$. The proof comes down to a straightforward but rather lengthy tabulation of all admissible balanced edge orientations of $D$.
If $D$ contains no vertex, then \eqref{eq-Jaeger-formula-graph} becomes \eqref{eq-Jaeger-formula}, which is known to be true. Assume that \eqref{eq-Jaeger-formula-graph} is true if $D$ has at most $n-1$ vertices. Now let $D$ be a knotted $4$-valent graph with $n$ vertices.
\begin{figure}\label{D-hatD-DA-DB-fig}
\end{figure}
Choose a vertex $v$ of $D$. Define $\widehat{D}$, $D^A$ and $D^B$ to be the knotted $4$-valent graphs obtained from $D$ by replacing $v$ by the local configurations in Figure \ref{D-hatD-DA-DB-fig}. By skein relation \eqref{Kauffman-skein-vertex}, we have
\begin{equation}\label{eq-D-hatD-DA-DB}
P(D)=-P(\widehat{D})+q P(D^A) +q^{-1} P(D^B).
\end{equation}
Note that each of $\widehat{D}$, $D^A$ and $D^B$ has only $n-1$ vertices. So \eqref{eq-Jaeger-formula-graph} is true for $\widehat{D}$, $D^A$ and $D^B$. Thus, by \eqref{eq-D-hatD-DA-DB}, to prove \eqref{eq-Jaeger-formula-graph} for $D$, we only need to check that
\begin{eqnarray}
\label{eq-Jaeger-D-hatD-DA-DB} && \sum_{\varrho \in \mathcal{O}(D)} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & -\sum_{\varrho \in \mathcal{O}(\widehat{D})} \sum_{\varsigma \in \Sigma(\widehat{D}_\varrho)} (a^{-1}q)^{\mathrm{rot}(\widehat{D}_{\varrho,\varsigma})} [\widehat{D}_\varrho,\varsigma] R(\widehat{D}_{\varrho,\varsigma})(q,a) \nonumber \\
&& + q \sum_{\varrho \in \mathcal{O}(D^A)} \sum_{\varsigma \in \Sigma(D^A_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^A_{\varrho,\varsigma})} [D^A_\varrho,\varsigma] R(D^A_{\varrho,\varsigma})(q,a) \nonumber \\
&& +q^{-1} \sum_{\varrho \in \mathcal{O}(D^B)} \sum_{\varsigma \in \Sigma(D^B_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^B_{\varrho,\varsigma})} [D^B_\varrho,\varsigma] R(D^B_{\varrho,\varsigma})(q,a)\nonumber.
\end{eqnarray}
According to the orientations of the four edges of $D$ incidental at $v$, we divide $\mathcal{O}(D)$ into six disjoint subsets
\begin{equation}\label{O-D-Subsets}
\mathcal{O}(D)=\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})\sqcup\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(0,10){\varepsilonctor(1,-1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) \sqcup\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(0,10){\varepsilonctor(1,-1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) \sqcup\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) \sqcup\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) \sqcup\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(0,10){\varepsilonctor(1,-1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}),
\end{equation}
where $\ast$ in $\mathcal{O}(D;\ast)$ specifies the edge orientation near $v$. Note that, depending on $D$, some of these subsets may be empty. Using similar notations, we have the following partitions of $\mathcal{O}(\widehat{D})$, $\mathcal{O}(D^A)$ and $\mathcal{O}(D^B)$.
\begin{eqnarray}
\label{O-hatD-Subsets}&& \mathcal{O}(\widehat{D})= \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}) \sqcup\mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-10,20){\line(1,-1){8}}
\put(2,8){\varepsilonctor(1,-1){8}}
\end{picture}) \sqcup\mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){20}}
\put(-10,20){\line(1,-1){8}}
\put(2,8){\varepsilonctor(1,-1){8}}
\end{picture}) \sqcup\mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}) \sqcup\mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,20){\varepsilonctor(1,-1){8}}
\put(10,0){\varepsilonctor(-1,1){8}}
\end{picture}), \\
\label{O-DA-Subsets}&& \mathcal{O}(D^A) = \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}) \sqcup \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(-1,-1){0}}
\put(10,0){\varepsilonctor(1,-1){0}}
\qbezier(-10,0)(0,10)(-10,20)
| 3,962 | 69,513 |
en
|
train
|
0.103.4
|
\end{picture}) \sqcup\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(0,10){\varepsilonctor(1,-1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) \sqcup\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) \sqcup\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) \sqcup\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(0,10){\varepsilonctor(1,-1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}),
\end{equation}
where $\ast$ in $\mathcal{O}(D;\ast)$ specifies the edge orientation near $v$. Note that, depending on $D$, some of these subsets may be empty. Using similar notations, we have the following partitions of $\mathcal{O}(\widehat{D})$, $\mathcal{O}(D^A)$ and $\mathcal{O}(D^B)$.
\begin{eqnarray}
\label{O-hatD-Subsets}&& \mathcal{O}(\widehat{D})= \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}) \sqcup\mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-10,20){\line(1,-1){8}}
\put(2,8){\varepsilonctor(1,-1){8}}
\end{picture}) \sqcup\mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){20}}
\put(-10,20){\line(1,-1){8}}
\put(2,8){\varepsilonctor(1,-1){8}}
\end{picture}) \sqcup\mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}) \sqcup\mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,20){\varepsilonctor(1,-1){8}}
\put(10,0){\varepsilonctor(-1,1){8}}
\end{picture}), \\
\label{O-DA-Subsets}&& \mathcal{O}(D^A) = \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}) \sqcup \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(-1,-1){0}}
\put(10,0){\varepsilonctor(1,-1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}) \sqcup \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(-1,-1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}) \sqcup \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,0){\varepsilonctor(1,-1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}), \\
\label{O-DB-Subsets}&& \mathcal{O}(D^B) = \mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(1,1){0}}
\put(10,0){\varepsilonctor(1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture}) \sqcup\mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(-10,0){\varepsilonctor(-1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture}) \sqcup\mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(1,1){0}}
\put(-10,0){\varepsilonctor(-1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture}) \sqcup\mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,0){\varepsilonctor(1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture}).
\end{eqnarray}
First, let us consider the subset $\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})$. There are obvious bijections
\begin{eqnarray*}
\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) & \xrightarrow{\varphi} & \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}), \\
\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) & \xrightarrow{\psi} & \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture})
\end{eqnarray*}
that preserve the orientations of corresponding edges. Moreover, for each $\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})$, there are obvious bijections
\begin{eqnarray*}
\Sigma(D_\varrho) \xrightarrow{\varphi_\varrho} \Sigma(\widehat{D}_{\varphi(\varrho)}), \\
\Sigma(D_\varrho) \xrightarrow{\psi_\varrho} \Sigma(D^A_{\psi(\varrho)})
\end{eqnarray*}
such that, for any $\varsigma \in \Sigma(D_\varrho)$, $\varsigma$, $\varphi_\varrho(\varsigma)$ and $\psi_\varrho(\varsigma)$ are identical outside the parts shown in Figure \ref{D-hatD-DA-DB-fig}. Note that the four edges at $v$ are oriented in a crossing-like way. So $\varsigma$ (resp. $\varphi_\varrho(\varsigma)$ and $\psi_\varrho(\varsigma)$) does not change the part of $D_\varrho$ (resp. $\widehat{D}_{\varphi(\varrho)}$ and $D^A_{\psi(\varrho)}$) shown in Figure \ref{D-hatD-DA-DB-fig}. This implies that
\begin{equation}\label{weight-D-hatD-DA}
[D_\varrho,\varsigma] =[\widehat{D}_{\varphi(\varrho)},\varphi_\varrho(\varsigma)] =[D^A_{\psi(\varrho)},\psi_\varrho(\varsigma)].
\end{equation}
It is also easy to see that
\begin{equation}\label{rot-D-hatD-DA}
\mathrm{rot}(D_{\varrho,\varsigma}) =\mathrm{rot}(\widehat{D}_{\varphi(\varrho),\varphi_\varrho(\varsigma)}) = \mathrm{rot}(D^A_{\psi(\varrho),\psi_\varrho(\varsigma)}).
\end{equation}
By the skein relation \eqref{HOMFLY-skein-vertex}, we know that
\begin{equation}\label{HOMFLY-D-hatD-DA}
R(D_{\varrho,\varsigma}) = -R(\widehat{D}_{\varphi(\varrho),\varphi_\varrho(\varsigma)}) +q R(D^A_{\psi(\varrho),\psi_\varrho(\varsigma)}).
\end{equation}
Combining equations \eqref{weight-D-hatD-DA}, \eqref{rot-D-hatD-DA} and \eqref{HOMFLY-D-hatD-DA}, we get
\begin{eqnarray}
\label{eq-Jaeger-D-hatD-DA-DB-u} && \sum_{\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
| 3,899 | 69,513 |
en
|
train
|
0.103.5
|
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})$. There are obvious bijections
\begin{eqnarray*}
\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) & \xrightarrow{\varphi} & \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}), \\
\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) & \xrightarrow{\psi} & \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture})
\end{eqnarray*}
that preserve the orientations of corresponding edges. Moreover, for each $\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})$, there are obvious bijections
\begin{eqnarray*}
\Sigma(D_\varrho) \xrightarrow{\varphi_\varrho} \Sigma(\widehat{D}_{\varphi(\varrho)}), \\
\Sigma(D_\varrho) \xrightarrow{\psi_\varrho} \Sigma(D^A_{\psi(\varrho)})
\end{eqnarray*}
such that, for any $\varsigma \in \Sigma(D_\varrho)$, $\varsigma$, $\varphi_\varrho(\varsigma)$ and $\psi_\varrho(\varsigma)$ are identical outside the parts shown in Figure \ref{D-hatD-DA-DB-fig}. Note that the four edges at $v$ are oriented in a crossing-like way. So $\varsigma$ (resp. $\varphi_\varrho(\varsigma)$ and $\psi_\varrho(\varsigma)$) does not change the part of $D_\varrho$ (resp. $\widehat{D}_{\varphi(\varrho)}$ and $D^A_{\psi(\varrho)}$) shown in Figure \ref{D-hatD-DA-DB-fig}. This implies that
\begin{equation}\label{weight-D-hatD-DA}
[D_\varrho,\varsigma] =[\widehat{D}_{\varphi(\varrho)},\varphi_\varrho(\varsigma)] =[D^A_{\psi(\varrho)},\psi_\varrho(\varsigma)].
\end{equation}
It is also easy to see that
\begin{equation}\label{rot-D-hatD-DA}
\mathrm{rot}(D_{\varrho,\varsigma}) =\mathrm{rot}(\widehat{D}_{\varphi(\varrho),\varphi_\varrho(\varsigma)}) = \mathrm{rot}(D^A_{\psi(\varrho),\psi_\varrho(\varsigma)}).
\end{equation}
By the skein relation \eqref{HOMFLY-skein-vertex}, we know that
\begin{equation}\label{HOMFLY-D-hatD-DA}
R(D_{\varrho,\varsigma}) = -R(\widehat{D}_{\varphi(\varrho),\varphi_\varrho(\varsigma)}) +q R(D^A_{\psi(\varrho),\psi_\varrho(\varsigma)}).
\end{equation}
Combining equations \eqref{weight-D-hatD-DA}, \eqref{rot-D-hatD-DA} and \eqref{HOMFLY-D-hatD-DA}, we get
\begin{eqnarray}
\label{eq-Jaeger-D-hatD-DA-DB-u} && \sum_{\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & -\sum_{\varrho \in \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture})} \sum_{\varsigma \in \Sigma(\widehat{D}_\varrho)} (a^{-1}q)^{\mathrm{rot}(\widehat{D}_{\varrho,\varsigma})} [\widehat{D}_\varrho,\varsigma] R(\widehat{D}_{\varrho,\varsigma})(q,a) \nonumber \\
&& + q \sum_{\varrho \in \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^A_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^A_{\varrho,\varsigma})} [D^A_\varrho,\varsigma] R(D^A_{\varrho,\varsigma})(q,a). \nonumber
\end{eqnarray}
One can similarly deduce that
\begin{eqnarray}
\label{eq-Jaeger-D-hatD-DA-DB-r} && \sum_{\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(0,10){\varepsilonctor(1,-1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & -\sum_{\varrho \in \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-10,20){\line(1,-1){8}}
\put(2,8){\varepsilonctor(1,-1){8}}
\end{picture})} \sum_{\varsigma \in \Sigma(\widehat{D}_\varrho)} (a^{-1}q)^{\mathrm{rot}(\widehat{D}_{\varrho,\varsigma})} [\widehat{D}_\varrho,\varsigma] R(\widehat{D}_{\varrho,\varsigma})(q,a) \nonumber \\
&& + q^{-1} \sum_{\varrho \in \mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(1,1){0}}
\put(10,0){\varepsilonctor(1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^B_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^B_{\varrho,\varsigma})} [D^B_\varrho,\varsigma] R(D^B_{\varrho,\varsigma})(q,a), \nonumber
\end{eqnarray}
\begin{eqnarray}
\label{eq-Jaeger-D-hatD-DA-DB-d} && \sum_{\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(0,10){\varepsilonctor(1,-1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & -\sum_{\varrho \in \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){20}}
\put(-10,20){\line(1,-1){8}}
\put(2,8){\varepsilonctor(1,-1){8}}
\end{picture})} \sum_{\varsigma \in \Sigma(\widehat{D}_\varrho)} (a^{-1}q)^{\mathrm{rot}(\widehat{D}_{\varrho,\varsigma})} [\widehat{D}_\varrho,\varsigma] R(\widehat{D}_{\varrho,\varsigma})(q,a) \nonumber \\
&& + q \sum_{\varrho \in \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(-1,-1){0}}
\put(10,0){\varepsilonctor(1,-1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^A_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^A_{\varrho,\varsigma})} [D^A_\varrho,\varsigma] R(D^A_{\varrho,\varsigma})(q,a), \nonumber
\end{eqnarray}
\begin{eqnarray}
\label{eq-Jaeger-D-hatD-DA-DB-l} && \sum_{\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & -\sum_{\varrho \in \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture})} \sum_{\varsigma \in \Sigma(\widehat{D}_\varrho)} (a^{-1}q)^{\mathrm{rot}(\widehat{D}_{\varrho,\varsigma})} [\widehat{D}_\varrho,\varsigma] R(\widehat{D}_{\varrho,\varsigma})(q,a) \nonumber \\
&& + q^{-1} \sum_{\varrho \in \mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(-10,0){\varepsilonctor(-1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^B_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^B_{\varrho,\varsigma})} [D^B_\varrho,\varsigma] R(D^B_{\varrho,\varsigma})(q,a). \nonumber
\end{eqnarray}
Now we consider $\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
| 3,980 | 69,513 |
en
|
train
|
0.103.6
|
\qbezier(-10,20)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^B_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^B_{\varrho,\varsigma})} [D^B_\varrho,\varsigma] R(D^B_{\varrho,\varsigma})(q,a), \nonumber
\end{eqnarray}
\begin{eqnarray}
\label{eq-Jaeger-D-hatD-DA-DB-d} && \sum_{\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(0,10){\varepsilonctor(1,-1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & -\sum_{\varrho \in \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){20}}
\put(-10,20){\line(1,-1){8}}
\put(2,8){\varepsilonctor(1,-1){8}}
\end{picture})} \sum_{\varsigma \in \Sigma(\widehat{D}_\varrho)} (a^{-1}q)^{\mathrm{rot}(\widehat{D}_{\varrho,\varsigma})} [\widehat{D}_\varrho,\varsigma] R(\widehat{D}_{\varrho,\varsigma})(q,a) \nonumber \\
&& + q \sum_{\varrho \in \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(-1,-1){0}}
\put(10,0){\varepsilonctor(1,-1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^A_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^A_{\varrho,\varsigma})} [D^A_\varrho,\varsigma] R(D^A_{\varrho,\varsigma})(q,a), \nonumber
\end{eqnarray}
\begin{eqnarray}
\label{eq-Jaeger-D-hatD-DA-DB-l} && \sum_{\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & -\sum_{\varrho \in \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture})} \sum_{\varsigma \in \Sigma(\widehat{D}_\varrho)} (a^{-1}q)^{\mathrm{rot}(\widehat{D}_{\varrho,\varsigma})} [\widehat{D}_\varrho,\varsigma] R(\widehat{D}_{\varrho,\varsigma})(q,a) \nonumber \\
&& + q^{-1} \sum_{\varrho \in \mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(-10,0){\varepsilonctor(-1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^B_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^B_{\varrho,\varsigma})} [D^B_\varrho,\varsigma] R(D^B_{\varrho,\varsigma})(q,a). \nonumber
\end{eqnarray}
Now we consider $\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})$. There are obvious bijections
\begin{eqnarray*}
\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) & \xrightarrow{\varphi} & \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,20){\varepsilonctor(1,-1){8}}
\put(10,0){\varepsilonctor(-1,1){8}}
\end{picture}), \\
\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) & \xrightarrow{\psi^A} & \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(-1,-1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}), \\
\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) & \xrightarrow{\psi^B} & \mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(1,1){0}}
\put(-10,0){\varepsilonctor(-1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture})
\end{eqnarray*}
that preserve the orientations of corresponding edges.
Given a $\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})$, there are partitions
\begin{eqnarray}
\label{partition-D-out} \Sigma(D_\varrho) & = & \Sigma^R(D_\varrho) \sqcup \Sigma^L(D_\varrho), \\
\label{partition-hatD-out} \Sigma(\widehat{D}_{\varphi(\varrho)}) & = & \Sigma^A(\widehat{D}_{\varphi(\varrho)}) \sqcup \Sigma^B(\widehat{D}_{\varphi(\varrho)})
\end{eqnarray}
according to what local resolution is applied to $v$ and the corresponding crossing in $\widehat{D}$. There are bijections
\begin{eqnarray*}
\Sigma^R(D_\varrho) & \xrightarrow{\varphi_\varrho^A} & \Sigma^A(\widehat{D}_{\varphi(\varrho)}), \\
\Sigma^L(D_\varrho) & \xrightarrow{\varphi_\varrho^B} & \Sigma^B(\widehat{D}_{\varphi(\varrho)}), \\
\Sigma^R(D_\varrho) & \xrightarrow{\psi_\varrho^A} & \Sigma(D_{\psi^A(\varrho)}^A), \\
\Sigma^L(D_\varrho) & \xrightarrow{\psi_\varrho^B} & \Sigma(D_{\psi^B(\varrho)}^B)
\end{eqnarray*}
such that the corresponding resolutions are identical outside the parts shown in Figure \ref{D-hatD-DA-DB-fig}.
For a $\varsigma \in \Sigma^R(D_\varrho)$, it is easy to see that $D_{\varrho,\varsigma} = \widehat{D}_{\varphi(\varrho), \varphi_\varrho^A(\varsigma)} = D_{\psi^A(\varrho), \psi_\varrho^A(\varsigma)}^A$. So
\begin{eqnarray}
\label{eq-rot-D-hatD-DA-out} \mathrm{rot}(D_{\varrho,\varsigma}) & = & \mathrm{rot}(\widehat{D}_{\varphi(\varrho), \varphi_\varrho^A(\varsigma)}) = \mathrm{rot}(D_{\psi^A(\varrho), \psi_\varrho^A(\varsigma)}^A), \\
\label{eq-HOMFLY-D-hatD-DA-out} R(D_{\varrho,\varsigma}) & = & R(\widehat{D}_{\varphi(\varrho), \varphi_\varrho^A(\varsigma)}) = R(D_{\psi^A(\varrho), \psi_\varrho^A(\varsigma)}^A).
\end{eqnarray}
One can also easily check that the weights satisfy
\begin{equation}\label{eq-weight-D-hatD-DA-out}
[D_{\varrho},\varsigma] = \frac{q^{-1}}{q-q^{-1}}[\widehat{D}_{\varphi(\varrho)}, \varphi_\varrho^A(\varsigma)] = q^{-1}[D_{\psi^A(\varrho)}^A, \psi_\varrho^A(\varsigma)].
\end{equation}
So
\begin{equation}\label{eq-weight-D-hatD-DA-out-2}
[D_{\varrho},\varsigma] = -[\widehat{D}_{\varphi(\varrho)}, \varphi_\varrho^A(\varsigma)] +q[D_{\psi^A(\varrho)}^A, \psi_\varrho^A(\varsigma)].
\end{equation}
| 3,320 | 69,513 |
en
|
train
|
0.103.7
|
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) & \xrightarrow{\psi^A} & \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(-1,-1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}), \\
\mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture}) & \xrightarrow{\psi^B} & \mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(1,1){0}}
\put(-10,0){\varepsilonctor(-1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture})
\end{eqnarray*}
that preserve the orientations of corresponding edges.
Given a $\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})$, there are partitions
\begin{eqnarray}
\label{partition-D-out} \Sigma(D_\varrho) & = & \Sigma^R(D_\varrho) \sqcup \Sigma^L(D_\varrho), \\
\label{partition-hatD-out} \Sigma(\widehat{D}_{\varphi(\varrho)}) & = & \Sigma^A(\widehat{D}_{\varphi(\varrho)}) \sqcup \Sigma^B(\widehat{D}_{\varphi(\varrho)})
\end{eqnarray}
according to what local resolution is applied to $v$ and the corresponding crossing in $\widehat{D}$. There are bijections
\begin{eqnarray*}
\Sigma^R(D_\varrho) & \xrightarrow{\varphi_\varrho^A} & \Sigma^A(\widehat{D}_{\varphi(\varrho)}), \\
\Sigma^L(D_\varrho) & \xrightarrow{\varphi_\varrho^B} & \Sigma^B(\widehat{D}_{\varphi(\varrho)}), \\
\Sigma^R(D_\varrho) & \xrightarrow{\psi_\varrho^A} & \Sigma(D_{\psi^A(\varrho)}^A), \\
\Sigma^L(D_\varrho) & \xrightarrow{\psi_\varrho^B} & \Sigma(D_{\psi^B(\varrho)}^B)
\end{eqnarray*}
such that the corresponding resolutions are identical outside the parts shown in Figure \ref{D-hatD-DA-DB-fig}.
For a $\varsigma \in \Sigma^R(D_\varrho)$, it is easy to see that $D_{\varrho,\varsigma} = \widehat{D}_{\varphi(\varrho), \varphi_\varrho^A(\varsigma)} = D_{\psi^A(\varrho), \psi_\varrho^A(\varsigma)}^A$. So
\begin{eqnarray}
\label{eq-rot-D-hatD-DA-out} \mathrm{rot}(D_{\varrho,\varsigma}) & = & \mathrm{rot}(\widehat{D}_{\varphi(\varrho), \varphi_\varrho^A(\varsigma)}) = \mathrm{rot}(D_{\psi^A(\varrho), \psi_\varrho^A(\varsigma)}^A), \\
\label{eq-HOMFLY-D-hatD-DA-out} R(D_{\varrho,\varsigma}) & = & R(\widehat{D}_{\varphi(\varrho), \varphi_\varrho^A(\varsigma)}) = R(D_{\psi^A(\varrho), \psi_\varrho^A(\varsigma)}^A).
\end{eqnarray}
One can also easily check that the weights satisfy
\begin{equation}\label{eq-weight-D-hatD-DA-out}
[D_{\varrho},\varsigma] = \frac{q^{-1}}{q-q^{-1}}[\widehat{D}_{\varphi(\varrho)}, \varphi_\varrho^A(\varsigma)] = q^{-1}[D_{\psi^A(\varrho)}^A, \psi_\varrho^A(\varsigma)].
\end{equation}
So
\begin{equation}\label{eq-weight-D-hatD-DA-out-2}
[D_{\varrho},\varsigma] = -[\widehat{D}_{\varphi(\varrho)}, \varphi_\varrho^A(\varsigma)] +q[D_{\psi^A(\varrho)}^A, \psi_\varrho^A(\varsigma)].
\end{equation}
Combining equations \eqref{eq-rot-D-hatD-DA-out}, \eqref{eq-HOMFLY-D-hatD-DA-out} and \eqref{eq-weight-D-hatD-DA-out-2}, we get
\begin{eqnarray}
\label{eq-sum-D-hatD-DA-out} && \sum_{\varsigma \in \Sigma^R(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & -\sum_{\varsigma \in \Sigma^A(\widehat{D}_{\varphi(\varrho)})} (a^{-1}q)^{\mathrm{rot}(\widehat{D}_{{\varphi(\varrho)},\varsigma})} [\widehat{D}_{\varphi(\varrho)},\varsigma] R(\widehat{D}_{\varphi(\varrho),\varsigma})(q,a) \nonumber \\
&& + q \sum_{\varsigma \in \Sigma(D^A_{\psi^A(\varrho)})} (a^{-1}q)^{\mathrm{rot}(D^A_{\psi^A(\varrho),\varsigma})} [D^A_{\psi^A(\varrho)},\varsigma] R(D^A_{\psi^A(\varrho),\varsigma})(q,a). \nonumber
\end{eqnarray}
Similarly, one gets
\begin{eqnarray}
\label{eq-sum-D-hatD-DB-out} && \sum_{\varsigma \in \Sigma^L(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & -\sum_{\varsigma \in \Sigma^B(\widehat{D}_{\varphi(\varrho)})} (a^{-1}q)^{\mathrm{rot}(\widehat{D}_{\varphi(\varrho),\varsigma})} [\widehat{D}_{\varphi(\varrho)},\varsigma] R(\widehat{D}_{\varphi(\varrho),\varsigma})(q,a) \nonumber \\
&& + q^{-1} \sum_{\varsigma \in \Sigma(D^B_{\psi^B(\varrho)})} (a^{-1}q)^{\mathrm{rot}(D^B_{\psi^B(\varrho),\varsigma})} [D^B_{\psi^B(\varrho)},\varsigma] R(D^B_{\psi^B(\varrho),\varsigma})(q,a). \nonumber
\end{eqnarray}
Equations \eqref{eq-sum-D-hatD-DA-out} and \eqref{eq-sum-D-hatD-DB-out} imply that
\begin{eqnarray}
\label{eq-Jaeger-D-hatD-DA-DB-out} && \sum_{\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(-10,20){\varepsilonctor(1,-1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & -\sum_{\varrho \in \mathcal{O}(\widehat{D};\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(-1,-1){10}}
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,20){\varepsilonctor(1,-1){8}}
\put(10,0){\varepsilonctor(-1,1){8}}
\end{picture})} \sum_{\varsigma \in \Sigma(\widehat{D}_\varrho)} (a^{-1}q)^{\mathrm{rot}(\widehat{D}_{\varrho,\varsigma})} [\widehat{D}_\varrho,\varsigma] R(\widehat{D}_{\varrho,\varsigma})(q,a) \nonumber \\
&& + q \sum_{\varrho \in \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(-1,-1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^A_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^A_{\varrho,\varsigma})} [D^A_\varrho,\varsigma] R(D^A_{\varrho,\varsigma})(q,a) \nonumber \\
&& +q^{-1} \sum_{\varrho \in \mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(1,1){0}}
\put(-10,0){\varepsilonctor(-1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^B_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^B_{\varrho,\varsigma})} [D^B_\varrho,\varsigma] R(D^B_{\varrho,\varsigma})(q,a)\nonumber.
\end{eqnarray}
A similar argument shows that
\begin{eqnarray}
\label{eq-Jaeger-D-hatD-DA-DB-in} && \sum_{\varrho \in \mathcal{O}(D;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,20){\varepsilonctor(-1,-1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(0,10){\varepsilonctor(1,-1){10}}
\put(4,8){\tiny{$v$}}
\end{picture})} \sum_{\varsigma \in \Sigma(D_\varrho)} (a^{-1}q)^{\mathrm{rot}(D_{\varrho,\varsigma})} [D_\varrho,\varsigma] R(D_{\varrho,\varsigma})(q,a)\\
& = & q \sum_{\varrho \in \mathcal{O}(D^A;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,0){\varepsilonctor(1,-1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^A_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^A_{\varrho,\varsigma})} [D^A_\varrho,\varsigma] R(D^A_{\varrho,\varsigma})(q,a) \nonumber \\
&& +q^{-1} \sum_{\varrho \in \mathcal{O}(D^B;\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,0){\varepsilonctor(1,-1){0}}
\qbezier(-10,0)(0,10)(10,0)
\qbezier(-10,20)(0,10)(10,20)
\end{picture})} \sum_{\varsigma \in \Sigma(D^B_\varrho)} (a^{-1}q)^{\mathrm{rot}(D^B_{\varrho,\varsigma})} [D^B_\varrho,\varsigma] R(D^B_{\varrho,\varsigma})(q,a)\nonumber.
\end{eqnarray}
From partitions \eqref{O-D-Subsets}, \eqref{O-hatD-Subsets}, \eqref{O-DA-Subsets} and \eqref{O-DB-Subsets}, we know that equations \eqref{eq-Jaeger-D-hatD-DA-DB-u}, \eqref{eq-Jaeger-D-hatD-DA-DB-r}, \eqref{eq-Jaeger-D-hatD-DA-DB-d}, \eqref{eq-Jaeger-D-hatD-DA-DB-l}, \eqref{eq-Jaeger-D-hatD-DA-DB-out} and \eqref{eq-Jaeger-D-hatD-DA-DB-in} imply that \eqref{eq-Jaeger-D-hatD-DA-DB} is true. This proves that \eqref{eq-Jaeger-formula-graph} is true for $D$. So we have completed the induction and proved \eqref{eq-Jaeger-formula-graph}. Plugging $a=q^N$ into equation \eqref{eq-Jaeger-formula-graph}, we get \eqref{eq-Jaeger-formula-N-graph}.
\end{proof}
| 3,946 | 69,513 |
en
|
train
|
0.103.8
|
\section{Color and Orientation in the $\mathfrak{sl}(N)$ MOY Polynomial} \label{sec-MOY}
In Section \ref{sec-Jaeger}, we only discussed a very special case of the MOY graph polynomial. In this section, we review the $\mathfrak{sl}(N)$ MOY polynomial in its full generality and prove that it is invariant under certain changes of color and orientation. In fact, such invariance holds for the colored $\mathfrak{sl}(N)$ homology too.
| 117 | 69,513 |
en
|
train
|
0.103.9
|
\subsection{The $\mathfrak{sl}(N)$ MOY graph polynomial} In this subsection, We review the $\mathfrak{sl}(N)$ MOY graph polynomial defined \cite{MOY}. Our notations and normalizations are slightly different from that used in \cite{MOY}.
\begin{figure}\label{fig-MOY-vertex}
\end{figure}
\begin{definition}\label{def-MOY}
A MOY coloring of an oriented trivalent graph is a function from the set of edges of this graph to the set of non-negative integers such that every vertex of the colored graph is of one of the two types in Figure \ref{fig-MOY-vertex}.
A MOY graph is an oriented trivalent graph equipped with a MOY coloring \textbf{embedded} in the plane.
A knotted MOY graph is an oriented trivalent graph equipped with a MOY coloring \textbf{immersed} in the plane such that
\begin{itemize}
\item the set of singularities consists of finitely many transversal double points away from vertices,
\item at each of these transversal double points, we specify the upper- and the lower- branches (which makes it a crossing.)
\end{itemize}
\end{definition}
Fix a positive integer $N$. Define $\mathcal{N}= \{2k-N+1|k=0,1,\dots, N-1\}$ and denote by $\mathcal{P}(\mathcal{N})$ the power set of $\mathcal{N}$.
Let $\Gamma$ be a MOY graph. Denote by $E(\Gamma)$ the set of edges of $\Gamma$, by $V(\Gamma)$ the set of vertices of $\Gamma$ and by $\mathsf{c}:E(\Gamma) \rightarrow \mathbb{Z}_{\geq 0}$ the color function of $\Gamma$. That is, for every edge $e$ of $\Gamma$, $\mathsf{c}(e) \in \mathbb{Z}_{\geq 0}$ is the color of $e$.
\begin{definition}\label{MOY-state-def}
A state of $\Gamma$ is a function $\varphi: E(\Gamma) \rightarrow \mathcal{P}(\mathcal{N})$ such that
\begin{enumerate}[(i)]
\item for every edge $e$ of $\Gamma$, $\#\varphi(e) = \mathsf{c}(e)$,
\item for every vertex $v$ of $\Gamma$, as depicted in Figure \ref{fig-MOY-vertex}, we have $\varphi(e)=\varphi(e_1) \cup \varphi(e_2)$.
\end{enumerate}
Note that (i) and (ii) imply that $\varphi(e_1) \cap \varphi(e_2)=\emptyset$.
Denote by $\mathcal{S}_N(\Gamma)$ the set of states of $\Gamma$.
\end{definition}
Define a function $\pi:\mathcal{P}(\mathcal{N}) \times \mathcal{P}(\mathcal{N}) \rightarrow \mathbb{Z}_{\geq 0}$ by
\begin{equation}\label{eq-def-pi}
\pi (A_1, A_2) = \# \{(a_1,a_2) \in A_1 \times A_2 ~|~ a_1>a_2\} \text{ for } A_1,~A_2 \in \mathcal{P}(\mathcal{N}).
\end{equation}
Let $\varphi$ be a state of $\Gamma$. For a vertex $v$ of $\Gamma$ (as depicted in Figure \ref{fig-MOY-vertex}), the weight of $v$ with respect to $\varphi$ is defined to be
\begin{equation}\label{eq-weight-vertex}
\mathrm{wt}(v;\varphi) = \frac{\mathsf{c}(e_1)\mathsf{c}(e_2)}{2} - \pi(\varphi(e_1),\varphi(e_2)).
\end{equation}
Next, replace each edge $e$ of $\Gamma$ by $\mathsf{c}(e)$ parallel edges, assign to each of these new edges a different element of $\varphi(e)$ and, at every vertex, connect each pair of new edges assigned the same element of $\mathcal{N}$. This changes $\Gamma$ into a collection $\mathcal{C}_\varphi$ of embedded oriented circles, each of which is assigned an element of $\mathcal{N}$. By abusing notation, we denote by $\varphi(C)$ the element of $\mathcal{N}$ assigned to $C\in \mathcal{C}_\varphi$. Note that:
\begin{itemize}
\item There may be intersections between different circles in $\mathcal{C}_\varphi$. But, each circle in $\mathcal{C}_\varphi$ is embedded, that is, it has no self-intersection or self-tangency.
\item There may be more than one way to do this. But if we view $\mathcal{C}_\varphi$ as a virtual link and the intersection points between different elements of $\mathcal{C}_\varphi$ are virtual crossings, then the above construction is unique up to purely virtual regular Reidemeister moves.
\end{itemize}
The rotation number $\mathrm{rot}(\varphi)$ of $\varphi$ is then defined to be
\begin{equation}\label{eq-rot-state}
\mathrm{rot}(\varphi) = \sum_{C\in \mathcal{C}_\varphi} \varphi(C) \mathrm{rot}(C).
\end{equation}
Note that the sum $\sum_{C\in \mathcal{C}_\varphi} \mathrm{rot}(C)$ is independent of the choice of $\varphi \in \mathcal{S}_N(\Gamma)$. We call this sum the rotation number of $\Gamma$. That is,
\begin{equation}\label{eq-rot-gamma}
\mathrm{rot}(\Gamma) := \sum_{C\in \mathcal{C}_\varphi} \mathrm{rot}(C).
\end{equation}
\begin{definition}\label{def-MOY-graph-poly}\cite{MOY}
The $\mathfrak{sl}(N)$ MOY graph polynomial of $\Gamma$ is defined to be
\begin{equation}\label{MOY-bracket-def}
\left\langle \Gamma \right\rangle_N := \begin{cases}
\sum_{\varphi \in \mathcal{S}_N(\Gamma)} \left(\prod_{v \in V(\Gamma)} q^{\mathrm{wt}(v;\varphi)}\right) q^{\mathrm{rot}(\varphi)} & \text{if } 0\leq \mathsf{c}(e) \leq N ~\forall ~e \in E(\Gamma), \\
0 & \text{otherwise}.
\end{cases}
\end{equation}
For a knotted MOY graph $D$, define the $\mathfrak{sl}(N)$ MOY polynomial $\left\langle D \right\rangle_N$ of $D$ by applying the following skein sum at every crossing of $D$.
\begin{equation}\label{MOY-skein-general-+}
\left\langle \setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,0)
\put(-20,-20){\varepsilonctor(1,1){40}}
\put(20,-20){\line(-1,1){15}}
\put(-5,5){\varepsilonctor(-1,1){15}}
\put(-11,15){\tiny{$_m$}}
\put(9,15){\tiny{$_n$}}
\end{picture} \right\rangle_N = \sum_{k=\max\{0,m-n\}}^{m} (-1)^{m-k} q^{k-m}\left\langle \setlength{\unitlength}{1pt}
\begin{picture}(70,60)(-35,30)
\put(-15,0){\varepsilonctor(0,1){20}}
\put(-15,20){\varepsilonctor(0,1){20}}
\put(-15,40){\varepsilonctor(0,1){20}}
\put(15,0){\varepsilonctor(0,1){20}}
\put(15,20){\varepsilonctor(0,1){20}}
\put(15,40){\varepsilonctor(0,1){20}}
\put(15,20){\varepsilonctor(-1,0){30}}
\put(-15,40){\varepsilonctor(1,0){30}}
\put(-25,5){\tiny{$_{n}$}}
\put(-25,55){\tiny{$_{m}$}}
\put(-30,30){\tiny{$_{n+k}$}}
\put(-2,15){\tiny{$_{k}$}}
\put(-12,43){\tiny{$_{n+k-m}$}}
\put(18,5){\tiny{$_{m}$}}
\put(18,55){\tiny{$_{n}$}}
\put(18,30){\tiny{$_{m-k}$}}
\end{picture}\right\rangle_N,
\end{equation}
\begin{equation}\label{MOY-skein-general--}
\left\langle \setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,0)
\put(20,-20){\varepsilonctor(-1,1){40}}
\put(-20,-20){\line(1,1){15}}
\put(5,5){\varepsilonctor(1,1){15}}
\put(-11,15){\tiny{$_m$}}
\put(9,15){\tiny{$_n$}}
\end{picture} \right\rangle_N = \sum_{k=\max\{0,m-n\}}^{m} (-1)^{k-m} q^{m-k}\left\langle \setlength{\unitlength}{1pt}
\begin{picture}(70,60)(-35,30)
\put(-15,0){\varepsilonctor(0,1){20}}
\put(-15,20){\varepsilonctor(0,1){20}}
\put(-15,40){\varepsilonctor(0,1){20}}
\put(15,0){\varepsilonctor(0,1){20}}
\put(15,20){\varepsilonctor(0,1){20}}
\put(15,40){\varepsilonctor(0,1){20}}
\put(15,20){\varepsilonctor(-1,0){30}}
\put(-15,40){\varepsilonctor(1,0){30}}
\put(-25,5){\tiny{$_{n}$}}
\put(-25,55){\tiny{$_{m}$}}
\put(-30,30){\tiny{$_{n+k}$}}
\put(-2,15){\tiny{$_{k}$}}
\put(-12,43){\tiny{$_{n+k-m}$}}
\put(18,5){\tiny{$_{m}$}}
\put(18,55){\tiny{$_{n}$}}
\put(18,30){\tiny{$_{m-k}$}}
\end{picture}\right\rangle_N.
\end{equation}
\end{definition}
\begin{theorem}\cite{MOY}
$\left\langle D \right\rangle_N$ is invariant under Reidemeister (II), (III) moves and changes under Reidemeister (I) moves only by a factor of $\pm q^k$, which depends on the color of the edge involved in the Reidemeister (I) move.
\end{theorem}
As pointed out in \cite{MOY}, if $D$ is a link diagram colored by positive integers, then $\left\langle D \right\rangle_N$ is the Reshetikhin-Turaev $\mathfrak{sl}(N)$ polynomial of the link colored by corresponding wedge powers of the defining representation of $\mathfrak{sl}(N;\mathbb{C})$.
\begin{figure}\label{4-valent-to-MOY-fig}
\end{figure}
Let $D$ be an oriented knotted $4$-valent graph as defined in Subsection \ref{subsec-Jaeger-graph}. We color all edges of $D$ by $1$ and modify its vertices as in Figure \ref{4-valent-to-MOY-fig}. This gives us a MOY graph, which we identify with $D$. Thus, $\left\langle D\right\rangle_N$ is now defined for any oriented knotted $4$-valent graph $D$. Moreover, it was established in \cite{MOY} that
\begin{equation}\label{MOY-skein-special}
\begin{cases}
\left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\circle{15}}
\end{picture}\right\rangle_N = \frac{q^N-q^{-N}}{q-q^{-1}} \\
\left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}\right\rangle_N - \left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,0){\varepsilonctor(-1,1){20}}
\put(2,12){\varepsilonctor(1,1){8}}
\put(-2,8){\line(-1,-1){8}}
\end{picture}\right\rangle_N = (q-q^{-1})\left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}\right\rangle_N \\
\left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\line(1,1){12}}
\put(-2,12){\line(-1,1){8}}
\qbezier(2,12)(10,20)(10,10)
\qbezier(2,8)(10,0)(10,10)
\end{picture}\right\rangle_N = -q^{-N} \left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(15,20)(-10,7)
\qbezier(-10,0)(10,10)(-10,20)
\end{picture}\right\rangle_N \\
\left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\end{picture}\right\rangle_N = \left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}\right\rangle_N + q^{-1} \left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
| 3,987 | 69,513 |
en
|
train
|
0.103.10
|
\put(-25,5){\tiny{$_{n}$}}
\put(-25,55){\tiny{$_{m}$}}
\put(-30,30){\tiny{$_{n+k}$}}
\put(-2,15){\tiny{$_{k}$}}
\put(-12,43){\tiny{$_{n+k-m}$}}
\put(18,5){\tiny{$_{m}$}}
\put(18,55){\tiny{$_{n}$}}
\put(18,30){\tiny{$_{m-k}$}}
\end{picture}\right\rangle_N.
\end{equation}
\end{definition}
\begin{theorem}\cite{MOY}
$\left\langle D \right\rangle_N$ is invariant under Reidemeister (II), (III) moves and changes under Reidemeister (I) moves only by a factor of $\pm q^k$, which depends on the color of the edge involved in the Reidemeister (I) move.
\end{theorem}
As pointed out in \cite{MOY}, if $D$ is a link diagram colored by positive integers, then $\left\langle D \right\rangle_N$ is the Reshetikhin-Turaev $\mathfrak{sl}(N)$ polynomial of the link colored by corresponding wedge powers of the defining representation of $\mathfrak{sl}(N;\mathbb{C})$.
\begin{figure}\label{4-valent-to-MOY-fig}
\end{figure}
Let $D$ be an oriented knotted $4$-valent graph as defined in Subsection \ref{subsec-Jaeger-graph}. We color all edges of $D$ by $1$ and modify its vertices as in Figure \ref{4-valent-to-MOY-fig}. This gives us a MOY graph, which we identify with $D$. Thus, $\left\langle D\right\rangle_N$ is now defined for any oriented knotted $4$-valent graph $D$. Moreover, it was established in \cite{MOY} that
\begin{equation}\label{MOY-skein-special}
\begin{cases}
\left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\circle{15}}
\end{picture}\right\rangle_N = \frac{q^N-q^{-N}}{q-q^{-1}} \\
\left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}\right\rangle_N - \left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,0){\varepsilonctor(-1,1){20}}
\put(2,12){\varepsilonctor(1,1){8}}
\put(-2,8){\line(-1,-1){8}}
\end{picture}\right\rangle_N = (q-q^{-1})\left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}\right\rangle_N \\
\left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\line(1,1){12}}
\put(-2,12){\line(-1,1){8}}
\qbezier(2,12)(10,20)(10,10)
\qbezier(2,8)(10,0)(10,10)
\end{picture}\right\rangle_N = -q^{-N} \left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(15,20)(-10,7)
\qbezier(-10,0)(10,10)(-10,20)
\end{picture}\right\rangle_N \\
\left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(0,10){\varepsilonctor(1,1){10}}
\put(-10,0){\varepsilonctor(1,1){10}}
\put(0,10){\varepsilonctor(-1,1){10}}
\put(10,0){\varepsilonctor(-1,1){10}}
\end{picture}\right\rangle_N = \left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,0){\varepsilonctor(1,1){20}}
\put(-2,12){\varepsilonctor(-1,1){8}}
\put(2,8){\line(1,-1){8}}
\end{picture}\right\rangle_N + q^{-1} \left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}\right\rangle_N = \left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(10,0){\varepsilonctor(-1,1){20}}
\put(2,12){\varepsilonctor(1,1){8}}
\put(-2,8){\line(-1,-1){8}}
\end{picture} \right\rangle_N + q \left\langle \setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\put(-10,20){\varepsilonctor(-1,1){0}}
\put(10,20){\varepsilonctor(1,1){0}}
\qbezier(-10,0)(0,10)(-10,20)
\qbezier(10,0)(0,10)(10,20)
\end{picture}\right\rangle_N
\end{cases}
\end{equation}
(Note that our normalizetion of $\left\langle D\right\rangle_N$ is different from that in \cite{MOY}. Please refer to \cite[Theorem 14.2]{Wu-color} to see how the skein relations in \cite{MOY} translate to our normalization.)
Comparing skein relation \eqref{MOY-skein-special} to skein relations \eqref{HOMFLY-skein} and \eqref{HOMFLY-skein-vertex}, one can see that
\begin{equation}\label{eq-MOY-HOMFLY}
\left\langle D \right\rangle_N = (-1)^m R_N(\overline{D}),
\end{equation}
where $D$ is an oriented knotted $4$-valent graph, $m$ is the number of crossings in $D$, and $\overline{D}$ is the oriented knotted $4$-valent graph obtained from $D$ by switching the upper- and the lower-branches at every crossing of $D$.
| 1,992 | 69,513 |
en
|
train
|
0.103.11
|
\subsection{Reversing the orientation and the color along a simple circuit} In the remainder of this section, we fix a positive integer $N$.
Let $\Gamma$ be a MOY graph and $\Delta$ a simple circuit of $\Gamma$. That is, $\Delta$ is a subgraph of $\Gamma$ such that
\begin{enumerate}[(i)]
\item $\Delta$ is a (piecewise smoothly) embedded circle in $\mathbb{R}^2$;
\item the orientations of all edges of $\Delta$ coincide with the same orientation of this embedded circle.
\end{enumerate}
We call the color change $k \leadsto N-k$ a reversal of color. It is easy to see that, if we reverse both the orientation and the color of the edges along $\Delta$, then we get another MOY graph $\Gamma'$. We have the following theorem.
\begin{theorem}\label{thm-oc-reverse}
\begin{equation}\label{eq-oc-reverse}
\left\langle \Gamma \right\rangle_N = \left\langle \Gamma' \right\rangle_N
\end{equation}
\end{theorem}
\begin{proof}
We prove equation \eqref{eq-oc-reverse} using a localized formulation of the state sum \eqref{MOY-bracket-def}.
Cut each edge of $\Gamma$ at one point in its interior. This divides $\Gamma$ into a collection of neighborhoods of its vertices, each of which is a vertex with three adjacent half-edges. (See Figure \ref{fig-MOY-vertex-angles}, where $e$, $e_1$ and $e_2$ are the three half-edges.)
\begin{figure}\label{fig-MOY-vertex-angles}
\end{figure}
Let $\varphi \in \mathcal{S}_N(\Gamma)$. For a vertex of $\Gamma$, if it is of the form $v$ in Figure \ref{fig-MOY-vertex-angles}, we denote by $\alphapha$ the directed angle from $e_1$ to $e$ and by $\beta$ the directed angle from $e_2$ to $e$. We define
\begin{eqnarray}
\label{rot-def-local-v} \mathrm{rot}(v;\varphi)
& = & \frac{1}{2\pi} \int_{e}\kappa ds \cdot \sum \varphi(e) +\frac{1}{2\pi}\left(\alphapha + \int_{e_1}\kappa ds\right) \cdot \sum \varphi(e_1) \\
&& + \frac{1}{2\pi}\left(\beta + \int_{e_2}\kappa ds\right) \cdot \sum \varphi(e_2), \nonumber
\end{eqnarray}
where $\kappa$ is the signed curvature of a plane curve and $\sum A:=\sum_{a\in A} a$ for a subset $A$ of $\mathcal{N}= \{2k-N+1|k=0,1,\dots, N-1\}$.
If the vertex is of the form $\hat{v}$ in Figure \ref{fig-MOY-vertex-angles}, we denote by $\hat{\alphapha}$ the directed angle from $e$ to $e_1$ and by $\hat{\beta}$ the directed angle from $e$ to $e_2$. We define
\begin{eqnarray}
\label{rot-def-local-v-prime} \mathrm{rot}(\hat{v};\varphi)
& = & \frac{1}{2\pi}\int_{e}\kappa ds \cdot \sum \varphi(e) + \frac{1}{2\pi} \left(\hat{\alphapha}+\int_{e_1}\kappa ds\right) \cdot \sum \varphi(e_1) \\
&& + \frac{1}{2\pi}\left(\hat{\beta}+\int_{e_2}\kappa ds\right) \cdot \sum \varphi(e_2) . \nonumber
\end{eqnarray}
Using the Gauss-Bonnet Theorem, one can easily check that
\begin{equation} \label{eq-rot-sum}
\mathrm{rot}(\varphi) = \sum_{v \in V(\Gamma)} \mathrm{rot}(v;\varphi).
\end{equation}
So, by Definition \ref{def-MOY-graph-poly}, we have
\begin{equation} \label{eq-MOY-local}
\left\langle \Gamma \right\rangle_N = \sum_{\varphi \in \mathcal{S}_N(\Gamma)}\prod_{v \in V(\Gamma)} q^{\mathrm{wt}(v;\varphi)+\mathrm{rot}(v;\varphi)}.
\end{equation}
Since $\Gamma'$ is obtained from $\Gamma$ by reversing the orientation and the color of the edges along $\Delta$, there are natural bijections between $V(\Gamma)$ and $V(\Gamma')$ and between $E(\Gamma)$ and $E(\Gamma')$. Basically, every vertex corresponds to itself and every edge corresponds to itself (with reversed color and orientation if the edge belongs to $\Delta$.) For a vertex $v$ of $\Gamma$, we denote by $v'$ the vertex of $\Gamma'$ corresponding to $v$. For an edge $e$ of $\Gamma$, we denote by $e'$ the edge of $\Gamma'$ corresponding to $e$. Given a $\varphi \in \mathcal{S}_N(\Gamma)$, we define $\varphi': E(\Gamma') \rightarrow \mathcal{P(N)}$ by
\begin{equation}\label{eq-varphi-prime-def}
\varphi'(e') = \begin{cases}
\varphi(e) & \text{if } e \notin E(\Delta); \\
\mathcal{N} \setminus \varphi(e) & \text{if } e \in E(\Delta).
\end{cases}
\end{equation}
It is easy to see that $\varphi' \in \mathcal{S}_N(\Gamma')$ and that $\varphi \mapsto \varphi'$ is a bijection from $\mathcal{S}_N(\Gamma)$ to $\mathcal{S}_N(\Gamma')$.
We claim that, for all $v \in V(\Gamma)$ and $\varphi \in \mathcal{S}_N(\Gamma)$,
\begin{equation}\label{eq-local-state-match}
\mathrm{wt}(v;\varphi)+ \mathrm{rot}(v;\varphi) = \mathrm{wt}(v';\varphi') + \mathrm{rot}(v';\varphi').
\end{equation}
From equation \eqref{eq-MOY-local}, one can see that equation \eqref{eq-local-state-match} implies Theorem \ref{thm-oc-reverse}.
To prove equation \eqref{eq-local-state-match}, we need to consider how the change $\Gamma \leadsto \Gamma'$ affects the vertex $v$. If $v$ is not a vertex of $\Delta$, then none of the three edges incidental at $v$ is changed. So equation \eqref{eq-local-state-match} is trivially true. If $v$ is a vertex of $\Delta$, then exactly two edges incidental at $v$ are changed, and one of these changed edge must be the edge $e$ in Figure \ref{fig-MOY-vertex-angles} (for $v$ in either form.) So, counting the choices of the form of $v$ and the choices of the other changed edge, there are four possible ways to change $v$ if $v$ is a vertex of $\Delta$. (See Figure \ref{rotation-numbers-oc-reverse-index-fig} below.) The proofs of \eqref{eq-local-state-match} in these four cases are very similar. So we only give the details for the case in Figure \ref{fig-MOY-vertex-change} and leave the other cases to the reader.
\begin{figure}\label{fig-MOY-vertex-change}
\end{figure}
First, let us consider $\mathrm{wt}(v;\varphi)$ and $\mathrm{wt}(v';\varphi')$.
\begin{eqnarray*}
\mathrm{wt}(v';\varphi') & = & \frac{n(N-m-n)}{2} -\pi(\varphi'(e_2'),\varphi'(e')) \\
& = & \frac{n(N-m-n)}{2} - (n(N-m-n) - \pi(\varphi'(e'),\varphi'(e_2'))) \\
& = & \pi(\mathcal{N}\setminus\varphi(e),\varphi(e_2)) - \frac{n(N-m-n)}{2}.
\end{eqnarray*}
Note that $\pi(\varphi(e_1),\varphi(e_2)) + \pi(\mathcal{N}\setminus\varphi(e),\varphi(e_2)) = \pi(\mathcal{N}\setminus\varphi(e_2),\varphi(e_2))$. The above implies
\begin{eqnarray*}
&& \mathrm{wt}(v';\varphi') - \mathrm{wt}(v;\varphi) \\
& = & \pi(\mathcal{N}\setminus\varphi(e),\varphi(e_2)) - \frac{n(N-m-n)}{2} - \frac{mn}{2} + \pi(\varphi(e_1),\varphi(e_2)) \\
& = & \pi(\mathcal{N}\setminus\varphi(e_2),\varphi(e_2)) - \frac{n(N-n)}{2}
\end{eqnarray*}
Write $\varphi(e_2) =\{j_1,\dots,j_n\} \subset \mathcal{N}$, where $j_1<j_2<\cdots <j_n$. Then
\[
\pi(\mathcal{N}\setminus\varphi(e_2),\varphi(e_2)) = \sum_{l=1}^n [\frac{1}{2}(N-1-j_l)-(n-l)] = \frac{n(N-n)}{2} - \frac{1}{2}\sum \varphi(e_2).
\]
Altogether, we get
\begin{equation}\label{eq-weight-change}
\mathrm{wt}(v';\varphi') = \mathrm{wt}(v;\varphi) - \frac{1}{2}\sum \varphi(e_2).
\end{equation}
Now we compare $\mathrm{rot}(v;\varphi)$ to $\mathrm{rot}(v';\varphi')$. As before, denote by $\alphapha$ the directed angle from $e_1$ to $e$ and by $\beta$ the directed angle from $e_2$ to $e$. Also denote by $\gamma$ the directed angle from $e_2'$ to $e_1'$. Note that the directed angle from $e'$ to $e_1'$ is $-\alphapha$. Since $\sum\mathcal{N}=0$, we have $\sum (\mathcal{N} \setminus A) = - \sum A$ for any $A\subset \mathcal{N}$. Moreover, note that reversing the orientation of a plane curve changes the sign of its curvature $\kappa$.
\begin{eqnarray*}
\mathrm{rot}(v';\varphi')
& = & \frac{1}{2\pi} \int_{e_1'}\kappa ds \cdot \sum \varphi'(e_1') +\frac{1}{2\pi}\left(-\alphapha + \int_{e'}\kappa ds\right) \cdot \sum \varphi'(e') \\
&& + \frac{1}{2\pi}\left(\gamma + \int_{e_2'}\kappa ds\right) \cdot \sum \varphi'(e_2') \\
& = & \frac{1}{2\pi} \int_{e_1}\kappa ds \cdot \sum \varphi(e_1) +\frac{1}{2\pi}\left(\alphapha + \int_{e}\kappa ds\right) \cdot \sum \varphi(e) \\
&& + \frac{1}{2\pi}\left(\gamma + \int_{e_2}\kappa ds\right) \cdot \sum \varphi(e_2) \\
& = & \frac{1}{2\pi} \left( \alphapha+ \int_{e_1}\kappa ds\right) \cdot \sum \varphi(e_1) +\frac{1}{2\pi}\int_{e}\kappa ds \cdot \sum \varphi(e) \\
&& + \frac{1}{2\pi}\left(\alphapha+\gamma + \int_{e_2}\kappa ds\right) \cdot \sum \varphi(e_2) \\
& = & \mathrm{rot}(v;\varphi) + \frac{\alphapha-\beta+\gamma}{2\pi} \cdot \sum \varphi(e_2).
\end{eqnarray*}
Note that $\alphapha-\beta+\gamma =\pi$. The above shows that
\begin{equation}\label{rotation-local-change}
\mathrm{rot}(v';\varphi') = \mathrm{rot}(v;\varphi) + \frac{1}{2} \sum \varphi(e_2).
\end{equation}
Equation \eqref{eq-local-state-match} follows easily from equations \eqref{eq-weight-change} and \eqref{rotation-local-change}.
\end{proof}
| 3,091 | 69,513 |
en
|
train
|
0.103.12
|
\subsection{The colored $\mathfrak{sl}(N)$ homology} Theorem \ref{thm-oc-reverse} is also true for the colored $\mathfrak{sl}(N)$ homology for MOY graphs defined in \cite{Wu-color}. More precisely, reversing the orientation and the color along a simple circuit in a MOY graph does not change the homotopy type of the matrix factorization associated to this MOY graph. To prove this, we first recall some basic properties of the matrix factorizations associated to MOY graphs.
We denote by $C_N(\Gamma)$ the $\mathbb{Z}_2\oplus\mathbb{Z}$-graded matrix factorization associated a MOY graph $\Gamma$ defined in \cite[Definition 5.5]{Wu-color} and by $\hat{C}_N(D)$ the $\mathbb{Z}_2\oplus\mathbb{Z}\oplus\mathbb{Z}$-graded unnormalized chain complex associated to a knotted MOY graph $D$ defined in \cite[Definitions 11.4 and 11.16]{Wu-color}. Recall that:
\begin{itemize}
\item The $\mathbb{Z}_2$-grading of $C_N(\Gamma)$ and $\hat{C}_N(D)$ comes from the definition of matrix factorizations and is trivial on the homology $H_N(\Gamma)$ and $\hat{H}_N(D)$ of $C_N(\Gamma)$ and $\hat{C}_N(D)$. (See \cite[Theorems 1.3 and 14.7]{Wu-color}.)
\item The $\mathbb{Z}$-grading of $C_N(\Gamma)$ comes from the polynomial grading of the base ring and is called the quantum grading. The homology $H_N(\Gamma)$ inherits this quantum grading.
\item One $\mathbb{Z}$-grading of $\hat{C}_N(D)$ is the quantum grading induced by the quantum grading of matrix factorizations of MOY graphs. The other $\mathbb{Z}$-grading of $\hat{C}_N(D)$ is the homological grading. $\hat{H}_N(D)$ inherits both of these gradings.
\end{itemize}
Also note that, for a MOY graph $\Gamma$, $C_N(\Gamma)= \hat{C}_N(\Gamma)$.
\begin{theorem}\cite{Wu-color}\label{thm-MOY-calculus}
\begin{enumerate}
\item $\hat{C}_N( \bigcirc_m ) \simeq \mathbb{C}\{\qb{N}{m}\}$, where $\bigcirc_m$ is a circle colored by $m$.
\item $\hat{C}_N( \setlength{\unitlength}{1pt}
\begin{picture}(50,50)(-80,20)
\put(-60,10){\varepsilonctor(0,1){10}}
\put(-60,20){\varepsilonctor(-1,1){20}}
\put(-60,20){\varepsilonctor(1,1){10}}
\put(-50,30){\varepsilonctor(-1,1){10}}
\put(-50,30){\varepsilonctor(1,1){10}}
\put(-75,3){\tiny{$i+j+k$}}
\put(-55,21){\tiny{$j+k$}}
\put(-80,42){\tiny{$i$}}
\put(-60,42){\tiny{$j$}}
\put(-40,42){\tiny{$k$}}
\end{picture}) \simeq \hat{C}_N( \setlength{\unitlength}{1pt}
\begin{picture}(50,50)(40,20)
\put(60,10){\varepsilonctor(0,1){10}}
\put(60,20){\varepsilonctor(1,1){20}}
\put(60,20){\varepsilonctor(-1,1){10}}
\put(50,30){\varepsilonctor(1,1){10}}
\put(50,30){\varepsilonctor(-1,1){10}}
\put(45,3){\tiny{$i+j+k$}}
\put(38,21){\tiny{$i+j$}}
\put(80,42){\tiny{$k$}}
\put(60,42){\tiny{$j$}}
\put(40,42){\tiny{$i$}}
\end{picture})$.
\item $\hat{C}_N( \setlength{\unitlength}{0.75pt}
\begin{picture}(65,80)(-30,35)
\put(0,-5){\varepsilonctor(0,1){15}}
\put(0,60){\varepsilonctor(0,1){15}}
\qbezier(0,10)(-10,10)(-10,15)
\qbezier(0,60)(-10,60)(-10,55)
\put(-10,15){\varepsilonctor(0,1){40}}
\qbezier(0,10)(15,10)(15,20)
\qbezier(0,60)(15,60)(15,50)
\put(15,20){\varepsilonctor(0,1){30}}
\put(5,65){\tiny{$_{m+n}$}}
\put(5,3){\tiny{$_{m+n}$}}
\put(17,35){\tiny{$_{n}$}}
\put(-22,35){\tiny{$_{m}$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{.75pt}
\begin{picture}(55,80)(-20,40)
\put(0,0){\varepsilonctor(0,1){80}}
\put(5,75){\tiny{$_{m+n}$}}
\end{picture})\{\qb{m+n}{n}\} $.
\item $\hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(60,80)(-30,40)
\put(0,0){\varepsilonctor(0,1){30}}
\put(0,30){\varepsilonctor(0,1){20}}
\put(0,50){\varepsilonctor(0,1){30}}
\put(-1,40){\line(1,0){2}}
\qbezier(0,30)(25,20)(25,30)
\qbezier(0,50)(25,60)(25,50)
\put(25,50){\varepsilonctor(0,-1){20}}
\put(5,75){\tiny{$_{m}$}}
\put(5,5){\tiny{$_{m}$}}
\put(-30,38){\tiny{$_{m+n}$}}
\put(14,60){\tiny{$_{n}$}}
\end{picture}) \simeq \hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(40,80)(-20,40)
\put(0,0){\varepsilonctor(0,1){80}}
\put(5,75){\tiny{$_{m}$}}
\end{picture})\{\qb{N-m}{n}\}$.
\item $\hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(80,60)(-180,30)
\put(-170,0){\varepsilonctor(1,1){20}}
\put(-150,20){\varepsilonctor(1,0){20}}
\put(-130,20){\varepsilonctor(0,1){20}}
\put(-130,20){\varepsilonctor(1,-1){20}}
\put(-130,40){\varepsilonctor(-1,0){20}}
\put(-150,40){\varepsilonctor(0,-1){20}}
\put(-150,40){\varepsilonctor(-1,1){20}}
\put(-110,60){\varepsilonctor(-1,-1){20}}
\put(-175,0){\tiny{$_1$}}
\put(-175,55){\tiny{$_1$}}
\put(-127,30){\tiny{$_1$}}
\put(-108,0){\tiny{$_m$}}
\put(-108,55){\tiny{$_m$}}
\put(-160,30){\tiny{$_m$}}
\put(-150,45){\tiny{$_{m+1}$}}
\put(-150,13){\tiny{$_{m+1}$}}
\end{picture}) \simeq \hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(60,60)(-30,30)
\put(-20,0){\varepsilonctor(0,1){60}}
\put(20,60){\varepsilonctor(0,-1){60}}
\put(-25,30){\tiny{$_1$}}
\put(22,30){\tiny{$_m$}}
\end{picture}) \oplus \hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(60,60)(100,30)
\put(110,0){\varepsilonctor(1,1){20}}
\put(130,20){\varepsilonctor(1,-1){20}}
\put(130,40){\varepsilonctor(0,-1){20}}
\put(130,40){\varepsilonctor(-1,1){20}}
\put(150,60){\varepsilonctor(-1,-1){20}}
\put(105,0){\tiny{$_1$}}
\put(105,55){\tiny{$_1$}}
\put(152,0){\tiny{$_m$}}
\put(152,55){\tiny{$_m$}}
\put(132,30){\tiny{$_{m-1}$}}
\end{picture})\{[N-m-1]\}.$
\item $\hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(95,90)(-180,45)
\put(-160,0){\varepsilonctor(0,1){25}}
\put(-160,65){\varepsilonctor(0,1){25}}
\put(-160,25){\varepsilonctor(0,1){40}}
\put(-160,65){\varepsilonctor(1,0){40}}
\put(-120,25){\varepsilonctor(0,1){40}}
\put(-120,25){\varepsilonctor(-1,0){40}}
\put(-120,0){\varepsilonctor(0,1){25}}
\put(-120,65){\varepsilonctor(0,1){25}}
\put(-167,13){\tiny{$_1$}}
\put(-167,72){\tiny{$_l$}}
\put(-180,48){\tiny{$_{l+n}$}}
\put(-117,13){\tiny{$_{m+l-1}$}}
\put(-117,72){\tiny{$_{m}$}}
\put(-117,48){\tiny{$_{m-n}$}}
\put(-155,32){\tiny{$_{l+n-1}$}}
\put(-142,58){\tiny{$_n$}}
\end{picture}) \simeq \hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(85,90)(-30,45)
\put(-20,0){\varepsilonctor(0,1){45}}
\put(-20,45){\varepsilonctor(0,1){45}}
\put(20,0){\varepsilonctor(0,1){45}}
\put(20,45){\varepsilonctor(0,1){45}}
\put(20,45){\varepsilonctor(-1,0){40}}
\put(-27,20){\tiny{$_1$}}
\put(23,20){\tiny{$_{m+l-1}$}}
\put(-27,65){\tiny{$_l$}}
\put(23,65){\tiny{$_m$}}
\put(-5,38){\tiny{$_{l-1}$}}
\end{picture}) \{\qb{m-1}{n}\} \oplus \hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(60,90)(110,45)
\put(110,0){\varepsilonctor(2,3){20}}
\put(150,0){\varepsilonctor(-2,3){20}}
\put(130,30){\varepsilonctor(0,1){30}}
\put(130,60){\varepsilonctor(-2,3){20}}
\put(130,60){\varepsilonctor(2,3){20}}
\put(117,20){\tiny{$_1$}}
\put(140,20){\tiny{$_{m+l-1}$}}
\put(117,65){\tiny{$_l$}}
\put(140,65){\tiny{$_m$}}
\put(133,42){\tiny{$_{m+l}$}}
\end{picture}) \{\qb{m-1}{n-1}\}.$
\item $\hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(80,30)(-35,30)
\put(-15,0){\varepsilonctor(0,1){20}}
\put(-15,20){\varepsilonctor(0,1){20}}
\put(-15,40){\varepsilonctor(0,1){20}}
\put(15,0){\varepsilonctor(0,1){20}}
\put(15,20){\varepsilonctor(0,1){20}}
\put(15,40){\varepsilonctor(0,1){20}}
\put(15,20){\varepsilonctor(-1,0){30}}
\put(-15,40){\varepsilonctor(1,0){30}}
\put(-25,5){\tiny{$_{n}$}}
\put(-25,55){\tiny{$_{m}$}}
\put(-30,30){\tiny{$_{n+k}$}}
\put(-2,15){\tiny{$_{k}$}}
\put(-13,44){\tiny{$_{n+k-m}$}}
\put(18,5){\tiny{$_{m+l}$}}
\put(18,55){\tiny{$_{n+l}$}}
\put(18,30){\tiny{$_{m+l-k}$}}
\end{picture}) \simeq \bigoplus_{j=\max\{m-n,0\}}^m \hat{C}_N( \setlength{\unitlength}{1pt}
\begin{picture}(80,40)(-40,30)
| 3,879 | 69,513 |
en
|
train
|
0.103.13
|
\put(130,40){\varepsilonctor(0,-1){20}}
\put(130,40){\varepsilonctor(-1,1){20}}
\put(150,60){\varepsilonctor(-1,-1){20}}
\put(105,0){\tiny{$_1$}}
\put(105,55){\tiny{$_1$}}
\put(152,0){\tiny{$_m$}}
\put(152,55){\tiny{$_m$}}
\put(132,30){\tiny{$_{m-1}$}}
\end{picture})\{[N-m-1]\}.$
\item $\hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(95,90)(-180,45)
\put(-160,0){\varepsilonctor(0,1){25}}
\put(-160,65){\varepsilonctor(0,1){25}}
\put(-160,25){\varepsilonctor(0,1){40}}
\put(-160,65){\varepsilonctor(1,0){40}}
\put(-120,25){\varepsilonctor(0,1){40}}
\put(-120,25){\varepsilonctor(-1,0){40}}
\put(-120,0){\varepsilonctor(0,1){25}}
\put(-120,65){\varepsilonctor(0,1){25}}
\put(-167,13){\tiny{$_1$}}
\put(-167,72){\tiny{$_l$}}
\put(-180,48){\tiny{$_{l+n}$}}
\put(-117,13){\tiny{$_{m+l-1}$}}
\put(-117,72){\tiny{$_{m}$}}
\put(-117,48){\tiny{$_{m-n}$}}
\put(-155,32){\tiny{$_{l+n-1}$}}
\put(-142,58){\tiny{$_n$}}
\end{picture}) \simeq \hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(85,90)(-30,45)
\put(-20,0){\varepsilonctor(0,1){45}}
\put(-20,45){\varepsilonctor(0,1){45}}
\put(20,0){\varepsilonctor(0,1){45}}
\put(20,45){\varepsilonctor(0,1){45}}
\put(20,45){\varepsilonctor(-1,0){40}}
\put(-27,20){\tiny{$_1$}}
\put(23,20){\tiny{$_{m+l-1}$}}
\put(-27,65){\tiny{$_l$}}
\put(23,65){\tiny{$_m$}}
\put(-5,38){\tiny{$_{l-1}$}}
\end{picture}) \{\qb{m-1}{n}\} \oplus \hat{C}_N( \setlength{\unitlength}{.75pt}
\begin{picture}(60,90)(110,45)
\put(110,0){\varepsilonctor(2,3){20}}
\put(150,0){\varepsilonctor(-2,3){20}}
\put(130,30){\varepsilonctor(0,1){30}}
\put(130,60){\varepsilonctor(-2,3){20}}
\put(130,60){\varepsilonctor(2,3){20}}
\put(117,20){\tiny{$_1$}}
\put(140,20){\tiny{$_{m+l-1}$}}
\put(117,65){\tiny{$_l$}}
\put(140,65){\tiny{$_m$}}
\put(133,42){\tiny{$_{m+l}$}}
\end{picture}) \{\qb{m-1}{n-1}\}.$
\item $\hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(80,30)(-35,30)
\put(-15,0){\varepsilonctor(0,1){20}}
\put(-15,20){\varepsilonctor(0,1){20}}
\put(-15,40){\varepsilonctor(0,1){20}}
\put(15,0){\varepsilonctor(0,1){20}}
\put(15,20){\varepsilonctor(0,1){20}}
\put(15,40){\varepsilonctor(0,1){20}}
\put(15,20){\varepsilonctor(-1,0){30}}
\put(-15,40){\varepsilonctor(1,0){30}}
\put(-25,5){\tiny{$_{n}$}}
\put(-25,55){\tiny{$_{m}$}}
\put(-30,30){\tiny{$_{n+k}$}}
\put(-2,15){\tiny{$_{k}$}}
\put(-13,44){\tiny{$_{n+k-m}$}}
\put(18,5){\tiny{$_{m+l}$}}
\put(18,55){\tiny{$_{n+l}$}}
\put(18,30){\tiny{$_{m+l-k}$}}
\end{picture}) \simeq \bigoplus_{j=\max\{m-n,0\}}^m \hat{C}_N( \setlength{\unitlength}{1pt}
\begin{picture}(80,40)(-40,30)
\put(-15,0){\varepsilonctor(0,1){20}}
\put(-15,20){\varepsilonctor(0,1){20}}
\put(-15,40){\varepsilonctor(0,1){20}}
\put(15,0){\varepsilonctor(0,1){20}}
\put(15,20){\varepsilonctor(0,1){20}}
\put(15,40){\varepsilonctor(0,1){20}}
\put(15,40){\varepsilonctor(-1,0){30}}
\put(-15,20){\varepsilonctor(1,0){30}}
\put(-25,5){\tiny{$_{n}$}}
\put(-25,55){\tiny{$_{m}$}}
\put(-35,30){\tiny{$_{m-j}$}}
\put(-2,45){\tiny{$_{j}$}}
\put(-12,15){\tiny{$_{n+j-m}$}}
\put(18,5){\tiny{$_{m+l}$}}
\put(18,55){\tiny{$_{n+l}$}}
\put(18,30){\tiny{$_{n+l+j}$}}
\end{picture})\{\qb{l}{k-j}\}$.
\end{enumerate}
Here, ``$\simeq$" means that there is a homogeneous homotopy equivalence of chain complexes of graded matrix factorizations between the two sides that preserves the $\mathbb{Z}_2$-grading, the quantum grading and the homological grading.
The above relations remain true if we reverse the orientation of the MOY graphs on both sides or reverse the orientation of $\mathbb{R}^2$.
For a MOY graph $\Gamma$, denote by $H_N^j(\Gamma)$ the homogeneous part of $H_N(\Gamma)$ of quantum degree $j$. Then
\[
\sum_j q^j \dim_\mathbb{C} H_N^j(\Gamma) = \left\langle \Gamma \right\rangle_N.
\]
In other words, the graded dimension of $H_N(\Gamma)$ is equal to $\left\langle \Gamma \right\rangle_N$.
\end{theorem}
\begin{theorem}\cite{Wu-color}\label{thm-MOY-knotted-invariance}
\begin{enumerate}
\item $\hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,20)(-20,20)
\put(0,0){\line(0,1){8}}
\put(0,12){\varepsilonctor(0,1){8}}
\put(0,20){\varepsilonctor(1,1){20}}
\put(0,20){\varepsilonctor(-1,1){20}}
\put(-20,10){\varepsilonctor(1,0){40}}
\put(-13,35){\tiny{$_m$}}
\put(10,35){\tiny{$_l$}}
\put(3,2){\tiny{$_{m+l}$}}
\put(10,13){\tiny{$_n$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,20)(-20,20)
\put(0,0){\varepsilonctor(0,1){20}}
\put(0,20){\line(1,1){8}}
\put(0,20){\line(-1,1){8}}
\put(12,32){\varepsilonctor(1,1){8}}
\put(-12,32){\varepsilonctor(-1,1){8}}
\put(-20,30){\varepsilonctor(1,0){40}}
\put(-13,35){\tiny{$_m$}}
\put(10,35){\tiny{$_l$}}
\put(3,2){\tiny{$_{m+l}$}}
\put(12,26){\tiny{$_n$}}
\end{picture})
$. We call the difference between the two knotted MOY graphs here a fork sliding. The homotopy equivalence remains true if we reverse the orientation of one or both strands involved, or if the horizontal strand is under the vertex instead of above it. See the full statement in \cite[Theorem 12.1]{Wu-color}.
\item For a knotted MOY graph $D$, the homotopy type of $\hat{C}_N(D)$, with its three gradings, is invariant under regular Reidemeister moves.
\item $\hat{C}_N(\setlength{\unitlength}{.75pt}
\begin{picture}(30,20)(-10,7)
\put(-10,0){\line(1,1){12}}
\put(-2,12){\line(-1,1){8}}
\qbezier(2,12)(10,20)(10,10)
\qbezier(2,8)(10,0)(10,10)
\put(12,12){\tiny{$m$}}
\end{picture}) = \hat{C}_N(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\qbezier(-10,0)(10,10)(-10,20)
\put(3,12){\tiny{$m$}}
\end{picture})\left\langle m\right\rangle \| m \| \{q^{-m(N+1-m)}\}$
\item For a knotted MOY graph $D$, define the homology $\hat{H}_N(D)$ of $\hat{C}_N(D)$ as in \cite[Subsection 1.2]{Wu-color}. Then the graded Euler characteristic of $\hat{H}_N(D)$ is equal to $\left\langle D \right\rangle_N$.
\end{enumerate}
\end{theorem}
\begin{remark}\label{homology-grading-conventions}
In the above theorems,
\begin{enumerate}[1.]
\item ``$\left\langle \ast \right\rangle$" means shifting the $\mathbb{Z}_2$-grading by $\ast$. (See for example \cite[Subsection 2.3]{Wu-color}.)
\item ``$\|\ast\|$" means shifting the homological grading up by $\ast$. (See for example \cite[Definition 2.33]{Wu-color}.)
\item ``$\{F(q)\}$" means shifting the quantum grading up by $F(q)$. (See for example \cite[Subsection 2.1]{Wu-color}.)
\item Our normalization of the quantum integers is
\[
[j] := \frac{q^j-q^{-j}}{q-q^{-1}},
\]
\[
[j]! := [1] \cdot [2] \cdots [j],
\]
\[
\qb{j}{k} := \frac{[j]!}{[k]!\cdot [j-k]!}.
\]
\end{enumerate}
\end{remark}
From the definition of $\hat{C}_N(D)$ in \cite[Definitions 11.4 and 11.16]{Wu-color}, we have the following simple lemma.
\begin{lemma}\cite[Lemma 7.3]{Wu-color-ras}\label{lemma-l-N-crossings}
\begin{equation}\label{eq-l-N-+}
\hat{C}_N (\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,0)
\put(-20,-20){\varepsilonctor(1,1){40}}
\put(20,-20){\line(-1,1){15}}
\put(-5,5){\varepsilonctor(-1,1){15}}
\put(-11,15){\tiny{$_l$}}
\put(8,15){\tiny{$_N$}}
\end{picture}) \cong \hat{C}_N (\setlength{\unitlength}{.5pt}
\begin{picture}(85,45)(-40,45)
\put(-20,0){\varepsilonctor(0,1){45}}
\put(-20,45){\varepsilonctor(0,1){45}}
\put(20,0){\varepsilonctor(0,1){45}}
\put(20,45){\varepsilonctor(0,1){45}}
\put(-20,45){\varepsilonctor(1,0){40}}
\put(-35,20){\tiny{$_N$}}
\put(25,20){\tiny{$_{l}$}}
\put(-32,65){\tiny{$_l$}}
\put(25,65){\tiny{$_N$}}
\put(-13,38){\tiny{$_{N-l}$}}
\end{picture})\|l\|\{q^{-l}\},
\end{equation}
\begin{equation}\label{eq-l-N--}
\hat{C}_N (\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,0)
\put(20,-20){\varepsilonctor(-1,1){40}}
\put(-20,-20){\line(1,1){15}}
\put(5,5){\varepsilonctor(1,1){15}}
\put(-11,15){\tiny{$_l$}}
\put(8,15){\tiny{$_N$}}
\end{picture}) \cong \hat{C}_N (\setlength{\unitlength}{.5pt}
\begin{picture}(85,45)(-40,45)
\put(-20,0){\varepsilonctor(0,1){45}}
\put(-20,45){\varepsilonctor(0,1){45}}
| 3,961 | 69,513 |
en
|
train
|
0.103.14
|
\put(12,26){\tiny{$_n$}}
\end{picture})
$. We call the difference between the two knotted MOY graphs here a fork sliding. The homotopy equivalence remains true if we reverse the orientation of one or both strands involved, or if the horizontal strand is under the vertex instead of above it. See the full statement in \cite[Theorem 12.1]{Wu-color}.
\item For a knotted MOY graph $D$, the homotopy type of $\hat{C}_N(D)$, with its three gradings, is invariant under regular Reidemeister moves.
\item $\hat{C}_N(\setlength{\unitlength}{.75pt}
\begin{picture}(30,20)(-10,7)
\put(-10,0){\line(1,1){12}}
\put(-2,12){\line(-1,1){8}}
\qbezier(2,12)(10,20)(10,10)
\qbezier(2,8)(10,0)(10,10)
\put(12,12){\tiny{$m$}}
\end{picture}) = \hat{C}_N(\setlength{\unitlength}{.75pt}
\begin{picture}(20,20)(-10,7)
\qbezier(-10,0)(10,10)(-10,20)
\put(3,12){\tiny{$m$}}
\end{picture})\left\langle m\right\rangle \| m \| \{q^{-m(N+1-m)}\}$
\item For a knotted MOY graph $D$, define the homology $\hat{H}_N(D)$ of $\hat{C}_N(D)$ as in \cite[Subsection 1.2]{Wu-color}. Then the graded Euler characteristic of $\hat{H}_N(D)$ is equal to $\left\langle D \right\rangle_N$.
\end{enumerate}
\end{theorem}
\begin{remark}\label{homology-grading-conventions}
In the above theorems,
\begin{enumerate}[1.]
\item ``$\left\langle \ast \right\rangle$" means shifting the $\mathbb{Z}_2$-grading by $\ast$. (See for example \cite[Subsection 2.3]{Wu-color}.)
\item ``$\|\ast\|$" means shifting the homological grading up by $\ast$. (See for example \cite[Definition 2.33]{Wu-color}.)
\item ``$\{F(q)\}$" means shifting the quantum grading up by $F(q)$. (See for example \cite[Subsection 2.1]{Wu-color}.)
\item Our normalization of the quantum integers is
\[
[j] := \frac{q^j-q^{-j}}{q-q^{-1}},
\]
\[
[j]! := [1] \cdot [2] \cdots [j],
\]
\[
\qb{j}{k} := \frac{[j]!}{[k]!\cdot [j-k]!}.
\]
\end{enumerate}
\end{remark}
From the definition of $\hat{C}_N(D)$ in \cite[Definitions 11.4 and 11.16]{Wu-color}, we have the following simple lemma.
\begin{lemma}\cite[Lemma 7.3]{Wu-color-ras}\label{lemma-l-N-crossings}
\begin{equation}\label{eq-l-N-+}
\hat{C}_N (\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,0)
\put(-20,-20){\varepsilonctor(1,1){40}}
\put(20,-20){\line(-1,1){15}}
\put(-5,5){\varepsilonctor(-1,1){15}}
\put(-11,15){\tiny{$_l$}}
\put(8,15){\tiny{$_N$}}
\end{picture}) \cong \hat{C}_N (\setlength{\unitlength}{.5pt}
\begin{picture}(85,45)(-40,45)
\put(-20,0){\varepsilonctor(0,1){45}}
\put(-20,45){\varepsilonctor(0,1){45}}
\put(20,0){\varepsilonctor(0,1){45}}
\put(20,45){\varepsilonctor(0,1){45}}
\put(-20,45){\varepsilonctor(1,0){40}}
\put(-35,20){\tiny{$_N$}}
\put(25,20){\tiny{$_{l}$}}
\put(-32,65){\tiny{$_l$}}
\put(25,65){\tiny{$_N$}}
\put(-13,38){\tiny{$_{N-l}$}}
\end{picture})\|l\|\{q^{-l}\},
\end{equation}
\begin{equation}\label{eq-l-N--}
\hat{C}_N (\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,0)
\put(20,-20){\varepsilonctor(-1,1){40}}
\put(-20,-20){\line(1,1){15}}
\put(5,5){\varepsilonctor(1,1){15}}
\put(-11,15){\tiny{$_l$}}
\put(8,15){\tiny{$_N$}}
\end{picture}) \cong \hat{C}_N (\setlength{\unitlength}{.5pt}
\begin{picture}(85,45)(-40,45)
\put(-20,0){\varepsilonctor(0,1){45}}
\put(-20,45){\varepsilonctor(0,1){45}}
\put(20,0){\varepsilonctor(0,1){45}}
\put(20,45){\varepsilonctor(0,1){45}}
\put(-20,45){\varepsilonctor(1,0){40}}
\put(-35,20){\tiny{$_N$}}
\put(25,20){\tiny{$_{l}$}}
\put(-32,65){\tiny{$_l$}}
\put(25,65){\tiny{$_N$}}
\put(-13,38){\tiny{$_{N-l}$}}
\end{picture})\|-l\|\{q^{l}\},
\end{equation}
Consequently,
\begin{equation}\label{eq-l-N--to+}
\hat{C}_N (\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,0)
\put(-20,-20){\varepsilonctor(1,1){40}}
\put(20,-20){\line(-1,1){15}}
\put(-5,5){\varepsilonctor(-1,1){15}}
\put(-11,15){\tiny{$_l$}}
\put(8,15){\tiny{$_N$}}
\end{picture}) \cong \hat{C}_N (\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,0)
\put(20,-20){\varepsilonctor(-1,1){40}}
\put(-20,-20){\line(1,1){15}}
\put(5,5){\varepsilonctor(1,1){15}}
\put(-11,15){\tiny{$_l$}}
\put(8,15){\tiny{$_N$}}
\end{picture})\|2l\|\{q^{-2l}\},
\end{equation}
\begin{equation}\label{eq-l-N-+to-}
\hat{C}_N (\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,0)
\put(20,-20){\varepsilonctor(-1,1){40}}
\put(-20,-20){\line(1,1){15}}
\put(5,5){\varepsilonctor(1,1){15}}
\put(-11,15){\tiny{$_l$}}
\put(8,15){\tiny{$_N$}}
\end{picture}) \cong \hat{C}_N (\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,0)
\put(-20,-20){\varepsilonctor(1,1){40}}
\put(20,-20){\line(-1,1){15}}
\put(-5,5){\varepsilonctor(-1,1){15}}
\put(-11,15){\tiny{$_l$}}
\put(8,15){\tiny{$_N$}}
\end{picture})\|-2l\|\{q^{2l}\}.
\end{equation}
\end{lemma}
We will also need the following lemma.
\begin{lemma}\label{lemma-twisted-forks}
$~$
\begin{equation}\label{eq-twisted-forks-+}
\hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n$}}
\qbezier(0,10)(-20,20)(0,30)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(0,10)(20,20)(4,28)
\put(15,32){\tiny{$n$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){20}}
\put(5,7){\tiny{$m+n$}}
\put(0,20){\varepsilonctor(1,1){20}}
\put(12,25){\tiny{$n$}}
\put(0,20){\varepsilonctor(-1,1){20}}
\put(-15,25){\tiny{$m$}}
\end{picture})\{q^{mn}\},
\end{equation}
\begin{equation}\label{eq-twisted-forks--}
\hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n$}}
\qbezier(0,10)(20,20)(0,30)
\put(0,30){\varepsilonctor(-2,1){20}}
\qbezier(0,10)(-20,20)(-4,28)
\put(15,32){\tiny{$n$}}
\put(4,32){\varepsilonctor(2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){20}}
\put(5,7){\tiny{$m+n$}}
\put(0,20){\varepsilonctor(1,1){20}}
\put(12,25){\tiny{$n$}}
\put(0,20){\varepsilonctor(-1,1){20}}
\put(-15,25){\tiny{$m$}}
\end{picture})\{q^{-mn}\}.
\end{equation}
The above relations remain true if we reverse the orientation of the knotted MOY graphs on both sides.
\end{lemma}
\begin{proof}
We prove \eqref{eq-twisted-forks-+} only. The proof of \eqref{eq-twisted-forks--} is similar and left to the reader.
To prove \eqref{eq-twisted-forks-+}, we induce on $n$. When $n=1$, \eqref{eq-twisted-forks-+} is proved in \cite[Proposition 6.1]{Yonezawa3}. Assume \eqref{eq-twisted-forks-+} is true for $n$. By Theorems \ref{thm-MOY-calculus}, \ref{thm-MOY-knotted-invariance} and the induction hypothesis, we have
\begin{eqnarray*}
&& \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\qbezier(0,10)(-20,20)(0,30)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(0,10)(20,20)(4,28)
\put(15,32){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture})\{[n+1]\} \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\put(0,10){\varepsilonctor(-2,1){10}}
\put(-10,25){\line(2,1){10}}
\put(-10,25){\varepsilonctor(2,1){0}}
\put(-10,25){\varepsilonctor(-2,1){0}}
\put(-20,18){\tiny{$n$}}
\put(-3,18){\tiny{$1$}}
\qbezier(-10,15)(0,20)(-10,25)
\qbezier(-10,15)(-20,20)(-10,25)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(0,10)(20,20)(4,28)
\put(15,32){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\put(0,10){\varepsilonctor(-2,1){10}}
| 3,947 | 69,513 |
en
|
train
|
0.103.15
|
\put(15,32){\tiny{$n$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){20}}
\put(5,7){\tiny{$m+n$}}
\put(0,20){\varepsilonctor(1,1){20}}
\put(12,25){\tiny{$n$}}
\put(0,20){\varepsilonctor(-1,1){20}}
\put(-15,25){\tiny{$m$}}
\end{picture})\{q^{mn}\},
\end{equation}
\begin{equation}\label{eq-twisted-forks--}
\hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n$}}
\qbezier(0,10)(20,20)(0,30)
\put(0,30){\varepsilonctor(-2,1){20}}
\qbezier(0,10)(-20,20)(-4,28)
\put(15,32){\tiny{$n$}}
\put(4,32){\varepsilonctor(2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){20}}
\put(5,7){\tiny{$m+n$}}
\put(0,20){\varepsilonctor(1,1){20}}
\put(12,25){\tiny{$n$}}
\put(0,20){\varepsilonctor(-1,1){20}}
\put(-15,25){\tiny{$m$}}
\end{picture})\{q^{-mn}\}.
\end{equation}
The above relations remain true if we reverse the orientation of the knotted MOY graphs on both sides.
\end{lemma}
\begin{proof}
We prove \eqref{eq-twisted-forks-+} only. The proof of \eqref{eq-twisted-forks--} is similar and left to the reader.
To prove \eqref{eq-twisted-forks-+}, we induce on $n$. When $n=1$, \eqref{eq-twisted-forks-+} is proved in \cite[Proposition 6.1]{Yonezawa3}. Assume \eqref{eq-twisted-forks-+} is true for $n$. By Theorems \ref{thm-MOY-calculus}, \ref{thm-MOY-knotted-invariance} and the induction hypothesis, we have
\begin{eqnarray*}
&& \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\qbezier(0,10)(-20,20)(0,30)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(0,10)(20,20)(4,28)
\put(15,32){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture})\{[n+1]\} \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\put(0,10){\varepsilonctor(-2,1){10}}
\put(-10,25){\line(2,1){10}}
\put(-10,25){\varepsilonctor(2,1){0}}
\put(-10,25){\varepsilonctor(-2,1){0}}
\put(-20,18){\tiny{$n$}}
\put(-3,18){\tiny{$1$}}
\qbezier(-10,15)(0,20)(-10,25)
\qbezier(-10,15)(-20,20)(-10,25)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(0,10)(20,20)(4,28)
\put(15,32){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\put(0,10){\varepsilonctor(-2,1){10}}
\put(-10,25){\line(2,1){10}}
\put(-10,25){\varepsilonctor(2,1){0}}
\put(-20,18){\tiny{$n$}}
\put(-3,20){\tiny{$1$}}
\put(-10,15){\varepsilonctor(2,1){25}}
\qbezier(15,27.5)(20,30)(15,37.5)
\qbezier(-10,15)(-20,20)(-10,25)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(0,10)(20,20)(14,23)
\put(8,26){\line(-2,1){6}}
\put(21,40){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture}) \\
&& \\
& \simeq & \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\put(0,10){\varepsilonctor(2,1){10}}
\put(0,10){\line(-2,1){10}}
\put(-10,25){\line(2,1){10}}
\put(-10,25){\varepsilonctor(2,1){0}}
\put(-20,18){\tiny{$n$}}
\put(7,20){\tiny{$1$}}
\qbezier(-10,15)(-20,20)(-10,25)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(10,15)(20,20)(12,24)
\put(10,15){\varepsilonctor(0,1){20}}
\put(8,26){\line(-2,1){6}}
\put(21,40){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\put(0,10){\varepsilonctor(2,1){10}}
\put(0,10){\line(-2,1){10}}
\put(-10,25){\line(2,1){10}}
\put(-10,25){\varepsilonctor(2,1){0}}
\put(-20,18){\tiny{$n$}}
\put(12,20){\tiny{$1$}}
\qbezier(-10,15)(-20,20)(-10,25)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(10,15)(8,26)(2,29)
\put(10,15){\varepsilonctor(0,1){20}}
\put(21,40){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture})\{q^m\} \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\qbezier(0,10)(10,10)(10,15)
\put(0,10){\varepsilonctor(-2,1){10}}
\put(-10,25){\line(2,1){10}}
\put(10,35){\varepsilonctor(2,1){0}}
\put(-20,18){\tiny{$n$}}
\put(12,20){\tiny{$1$}}
\qbezier(-10,15)(-20,20)(-10,25)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(-10,15)(8,26)(2,29)
\put(10,15){\varepsilonctor(0,1){20}}
\put(21,40){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture})\{q^m\} \\
&& \\
& \simeq & \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\qbezier(0,10)(10,10)(10,15)
\put(0,10){\varepsilonctor(-2,1){10}}
\put(10,35){\varepsilonctor(2,1){0}}
\put(0,25){\tiny{$n$}}
\put(12,20){\tiny{$1$}}
\qbezier(-10,15)(-10,25)(0,30)
\qbezier(-10,15)(-10,35)(-15,37.5)
\put(0,30){\varepsilonctor(2,1){20}}
\put(10,15){\varepsilonctor(0,1){20}}
\put(21,40){\tiny{$n+1$}}
\put(-15,37.5){\varepsilonctor(-2,1){5}}
\put(-18,32){\tiny{$m$}}
\end{picture})\{q^{m(n+1)}\} \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\put(0,10){\varepsilonctor(-2,3){20}}
\put(10,35){\varepsilonctor(2,1){0}}
\put(0,25){\tiny{$n$}}
\put(12,20){\tiny{$1$}}
\qbezier(10,15)(-10,25)(0,30)
\put(0,30){\varepsilonctor(2,1){20}}
\put(10,15){\varepsilonctor(0,1){20}}
\put(0,10){\varepsilonctor(2,1){10}}
\put(21,40){\tiny{$n+1$}}
\put(-13,32){\tiny{$m$}}
\end{picture})\{q^{m(n+1)}\} \\
& \simeq & \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){20}}
\put(5,7){\tiny{$m+n+1$}}
\put(0,20){\varepsilonctor(1,1){20}}
\put(14,30){\tiny{$n+1$}}
\put(0,20){\varepsilonctor(-1,1){20}}
\put(-15,25){\tiny{$m$}}
\end{picture})\{q^{m(n+1)}[n+1]\}.
\end{eqnarray*}
\noindent By \cite[Proposition 3.20]{Wu-color}, the above implies that
\[
\hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\qbezier(0,10)(-20,20)(0,30)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(0,10)(20,20)(4,28)
\put(15,32){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){20}}
\put(5,7){\tiny{$m+n+1$}}
\put(0,20){\varepsilonctor(1,1){20}}
| 3,912 | 69,513 |
en
|
train
|
0.103.16
|
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\qbezier(0,10)(10,10)(10,15)
\put(0,10){\varepsilonctor(-2,1){10}}
\put(-10,25){\line(2,1){10}}
\put(10,35){\varepsilonctor(2,1){0}}
\put(-20,18){\tiny{$n$}}
\put(12,20){\tiny{$1$}}
\qbezier(-10,15)(-20,20)(-10,25)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(-10,15)(8,26)(2,29)
\put(10,15){\varepsilonctor(0,1){20}}
\put(21,40){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture})\{q^m\} \\
&& \\
& \simeq & \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\qbezier(0,10)(10,10)(10,15)
\put(0,10){\varepsilonctor(-2,1){10}}
\put(10,35){\varepsilonctor(2,1){0}}
\put(0,25){\tiny{$n$}}
\put(12,20){\tiny{$1$}}
\qbezier(-10,15)(-10,25)(0,30)
\qbezier(-10,15)(-10,35)(-15,37.5)
\put(0,30){\varepsilonctor(2,1){20}}
\put(10,15){\varepsilonctor(0,1){20}}
\put(21,40){\tiny{$n+1$}}
\put(-15,37.5){\varepsilonctor(-2,1){5}}
\put(-18,32){\tiny{$m$}}
\end{picture})\{q^{m(n+1)}\} \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\put(0,10){\varepsilonctor(-2,3){20}}
\put(10,35){\varepsilonctor(2,1){0}}
\put(0,25){\tiny{$n$}}
\put(12,20){\tiny{$1$}}
\qbezier(10,15)(-10,25)(0,30)
\put(0,30){\varepsilonctor(2,1){20}}
\put(10,15){\varepsilonctor(0,1){20}}
\put(0,10){\varepsilonctor(2,1){10}}
\put(21,40){\tiny{$n+1$}}
\put(-13,32){\tiny{$m$}}
\end{picture})\{q^{m(n+1)}\} \\
& \simeq & \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){20}}
\put(5,7){\tiny{$m+n+1$}}
\put(0,20){\varepsilonctor(1,1){20}}
\put(14,30){\tiny{$n+1$}}
\put(0,20){\varepsilonctor(-1,1){20}}
\put(-15,25){\tiny{$m$}}
\end{picture})\{q^{m(n+1)}[n+1]\}.
\end{eqnarray*}
\noindent By \cite[Proposition 3.20]{Wu-color}, the above implies that
\[
\hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){10}}
\put(5,3){\tiny{$m+n+1$}}
\qbezier(0,10)(-20,20)(0,30)
\put(0,30){\varepsilonctor(2,1){20}}
\qbezier(0,10)(20,20)(4,28)
\put(15,32){\tiny{$n+1$}}
\put(-4,32){\varepsilonctor(-2,1){16}}
\put(-18,32){\tiny{$m$}}
\end{picture}) \simeq \hat{C}_N(\setlength{\unitlength}{1pt}
\begin{picture}(40,40)(-20,20)
\put(0,0){\varepsilonctor(0,1){20}}
\put(5,7){\tiny{$m+n+1$}}
\put(0,20){\varepsilonctor(1,1){20}}
\put(14,30){\tiny{$n+1$}}
\put(0,20){\varepsilonctor(-1,1){20}}
\put(-15,25){\tiny{$m$}}
\end{picture}) \{q^{m(n+1)}\}.
\]
This completes the induction and proves \eqref{eq-twisted-forks-+}.
\end{proof}
| 1,547 | 69,513 |
en
|
train
|
0.103.17
|
From \cite[Theorems 1.3 and 14.7]{Wu-color}, we know that the $\mathbb{Z}_2$-grading of $C_N$ and $\hat{C}_N$ is always pure and does not carry any significant information. So we do not keep track of this $\mathbb{Z}_2$-grading in the remainder of this paper. The next is the main theorem of this subsection.
\begin{theorem}\label{thm-oc-reverse-homology}
Let $\Gamma$ be a MOY graph and $\Delta$ a simple circuit of $\Gamma$. Denote by $\Gamma'$ the MOY graph obtained from $\Gamma$ by reversing the orientation and the color of edges along $\Delta$. Then, up to an overall shift of the $\mathbb{Z}_2$-grading, we have
\begin{equation}\label{eq-oc-reverse-homology}
C_N(\Gamma) \simeq C_N(\Gamma').
\end{equation}
\end{theorem}
\begin{proof}
We construct the homotopy equivalence in \eqref{eq-oc-reverse-homology} in three steps.
\emph{Step One: Modifying vertices.} Let $v$ be a vertex in $\Delta$. We demonstrate in Figure \ref{oc-reverse-vertex-fig} how to modify $v$ into its corresponding vertex $v'$ in $\Gamma'$ using homogeneous homotopy equivalence. Here, we assume that the half edges $e$ and $e_1$ belong to $\Delta$.\footnote{As mentioned in the proof of Theorem \ref{thm-oc-reverse}, depending on the type of $v$ (splitting or merging) and the choice of the two edges belonging to $\Delta$, there are four possible local configurations of $\Delta$ near $v$. (See Figure \ref{rotation-numbers-oc-reverse-index-fig} below.) Figure \ref{oc-reverse-vertex-fig} covers only one of these four possible local configurations of $\Delta$ near $v$. But the other three possibilities are obtained from this one type by a reversal of orientation or horizontal flipping or both. We leave the construction for the other three cases to the reader.}
\begin{figure}\label{oc-reverse-vertex-fig}
\end{figure}
Note that:
\begin{enumerate}
\item By Theorems \ref{thm-MOY-calculus}, \ref{thm-MOY-knotted-invariance} and Lemmas \ref{lemma-l-N-crossings}, \ref{lemma-twisted-forks}, each change in Figure \ref{oc-reverse-vertex-fig} induces a homogeneous homotopy equivalence.
\item The upper right vertex in the last step in Figure \ref{oc-reverse-vertex-fig} is identical to the vertex $v'$ in $\Gamma$ corresponding to $v$.
\end{enumerate}
\emph{Step Two: Modifying edges.} After applying Step One to every vertex along $\Delta$, each edge $e$ along $\Delta$ becomes one of the two configurations in the second row in Figure \ref{oc-reverse-edge-fig}. We further modify these two configurations as in Figure \ref{oc-reverse-edge-fig}.
\begin{figure}\label{oc-reverse-edge-fig}
\end{figure}
Note that:
\begin{enumerate}
\item By Theorem \ref{thm-MOY-calculus} and Lemmas \ref{lemma-l-N-crossings}, each change made to these to configurations in Figure \ref{oc-reverse-edge-fig} induces a homogeneous homotopy equivalence.
\item At every crossing, the branch colored by $N$ is on top.
\end{enumerate}
\emph{Step Three: Removing the unknot.} After applying Step Two to every edge along $\Delta$, we get a knotted MOY graph $D$ consisting of $\Gamma'$ and an unknot colored by $N$ that is everywhere above $\Gamma'$. We can move this unknot away from $\Gamma'$ using regular Reidemeister moves and fork sliding (given in Part (1) of Theorem \ref{thm-MOY-knotted-invariance}) and obtain a MOY graph $\widetilde{\Gamma}$. By Theorem \ref{thm-MOY-knotted-invariance}, these moves induce a homogeneous homotopy equivalence. By Part (1) of Theorem \ref{thm-MOY-calculus}, we know that removing this unknot from $\widetilde{\Gamma}$ induces a homogeneous homotopy equivalence.
Putting \textit{Steps One--Three} together, we get a homogeneous homotopy equivalence\footnote{Strictly speaking, there are two notions of homotopy equivalence involved here. That is, homotopy equivalence of matrix factorizations and homotopy equivalence of complexes of matrix factorizations. But it is easy to see that, for $C_N(\Gamma)$ and $C_N(\Gamma')$, these two notions are equivalent.} from $C_N(\Gamma)$ to $C_N(\Gamma')$. It remains to check that this homotopy equivalence preserves the quantum grading. But, by Theorem \ref{thm-MOY-calculus}, the graded dimensions of the homology of $C_N(\Gamma)$ and $C_N(\Gamma')$ are equal to $\left\langle \Gamma\right\rangle_N$ and $\left\langle \Gamma'\right\rangle_N$, which are equal by Theorem \ref{thm-oc-reverse}. So any homotopy equivalence from $C_N(\Gamma)$ to $C_N(\Gamma')$ must preserve the quantum grading. This completes the proof.
\end{proof}
Using similar techniques, we can prove that reversing the orientation and the color of a component of a link colored by elements of $\{0,1,\dots,N\}$ only changes the $\mathfrak{sl}(N)$ link homology by a grading shift.
Let $L$ be an oriented framed link in $S^3$ colored by elements of $\{0,1,\dots,N\}$. Denote by $\mathcal{K}$ the set of components of $L$ and by $\mathsf{c}: \mathcal{K} \rightarrow \{0,1,\dots,N\}$ the color function of $L$. That is, for any component $K$ of $L$, the color of $K$ is $\mathsf{c}(K) \in \{0,1,\dots,N\}$. Furthermore, for any component $K$ of $L$, denote by $w(K)$ the writhe of $K$ and, for any two components $K, ~K'$ of $L$, denote by $l(K,K')$ the linking number of $K, ~K'$.
\begin{theorem}\label{thm-oc-reverse-homology-link}
Suppose $K$ is a component of $L$ and the colored framed oriented link $L'$ is obtained from $L$ by reversing the orientation and the color of $K$. Then, up to an overall shift of the $\mathbb{Z}_2$-grading,
\begin{equation}\label{eq-oc-reverse-homology-link}
\hat{C}_N(L') \simeq \hat{C}_N(L) ~\| s \|~ \{ q^{-s}\},
\end{equation}
where
\[
s = (N-2\mathsf{c}(K))w(K) - 2\sum_{K' \in \mathcal{K}\setminus \{K\}} \mathsf{c}(K') l(K,K').
\]
In particular,
\begin{equation}\label{eq-oc-reverse-poly-link}
\left\langle L' \right\rangle_N = (-1)^{N w(K)} \cdot q^{-s} \cdot \left\langle L \right\rangle_N.
\end{equation}
\end{theorem}
\begin{figure}\label{oc-reverse-homology-link-fig1}
\end{figure}
\begin{proof}
Suppose the color of $K$ is $m$. In a small segment of $K$, create a ``bubble" as in the first step in Figure \ref{oc-reverse-homology-link-fig1}. Then, using fork sliding (Part (1) of Theorem \ref{thm-MOY-knotted-invariance}) and Reidemeister moves of type (II), we can push the left vertex of this bubble along $K$ until it is back in the same small segment of $K$. This is shown in step two in Figure \ref{oc-reverse-homology-link-fig1}. The last two steps in Figure \ref{oc-reverse-homology-link-fig1} are local and self-explanatory. The end result of all these changes is a link $L_1$ consisting of $L'$ and an extra component $\widetilde{K}$ colored by $N$ that is obtained by slightly pushing $K$ in the direction of its framing. (So $\widetilde{K}$ is isotopic to $K$.) By Theorems \ref{thm-MOY-calculus}, \ref{thm-MOY-knotted-invariance} and Lemma \ref{lemma-l-N-crossings}, each step in Figure \ref{oc-reverse-homology-link-fig1} induces a homogeneous homotopy equivalence that preserves both the quantum grading and the homological grading. So
\[
\hat{C}_N(L_1) \simeq \hat{C}_N(L).
\]
By switching the upper- and lower-branches at crossings, we can unlink $\widetilde{K}$ from every component of $L'$. From relations \eqref{eq-l-N--to+} and \eqref{eq-l-N-+to-} in Lemma \ref{lemma-l-N-crossings}, we know that unlinking $\widetilde{K}$ from a component $K'$ of $L'$ shifts the homological grading by $- 2 \mathsf{c}(K') l(K,K')$ and the quantum grading by $2 \mathsf{c}(K') l(K,K')$. Note that:
\begin{itemize}
\item If $K'$ is the component of $L'$ obtained by reversing the orientation and the color of $K$, then $\mathsf{c}(K') = N- \mathsf{c}(K)$ and $l(K,K') = -w(K)$.
\item If $K'$ is any other component of $L'$, then $K'$ is also a component of $L$. More precisely, $K' \in \mathcal{K}\setminus \{K\}$.
\end{itemize}
Thus, unlinking $\widetilde{K}$ from $L'$ shifts the homological grading by $$2(N-\mathsf{c}(K))w(K) - 2\sum_{K' \in \mathcal{K}\setminus \{K\}} \mathsf{c}(K') l(K,K') = Nw(K) + s$$ and the quantum grading by $$-2(N-\mathsf{c}(K))w(K) + 2\sum_{K' \in \mathcal{K}\setminus \{K\}} \mathsf{c}(K') l(K,K') = -Nw(K) - s.$$
In other words, we have
\[
\hat{C}_N(L' \sqcup \widetilde{K}) \simeq \hat{C}_N(L_1) ~\| Nw(K) + s \|~ \{ q^{-Nw(K) - s} \},
\]
where $L' \sqcup \widetilde{K}$ is $L'$ plus a copy of $\widetilde{K}$ that is unlinked to $L'$.
Next, using \eqref{eq-l-N-+} and \eqref{eq-l-N--} in Lemma \ref{lemma-l-N-crossings}, we can change $\widetilde{K}$ (which is now not linked to $L'$) into an unlink $U$ with Seifert framing (which is not linked to $L'$) and get
\[
\hat{C}_N ( U ) \simeq \hat{C}_N (\widetilde{K}) \| -Nw(K) \| \{ q^{Nw(K)}\}.
\]
Putting the above together, we get
\begin{equation}\label{eq-oc-reverse-homology-link-proof-1}
\hat{C}_N(L' \sqcup U) \simeq \hat{C}_N(L) ~\| s \|~ \{ q^{-s}\}.
\end{equation}
Finally, by Part (1) of Theorem \ref{thm-MOY-calculus}, we have
\begin{equation}\label{eq-oc-reverse-homology-link-proof-2}
\hat{C}_N(L') \simeq \hat{C}_N(L' \sqcup U).
\end{equation}
Homotopy equivalence \eqref{eq-oc-reverse-homology-link} follows from \eqref{eq-oc-reverse-homology-link-proof-1} and \eqref{eq-oc-reverse-homology-link-proof-2}.
\end{proof}
For a knotted MOY graph, we also have a notion of simple circuits. For a knotted MOY graph $D$, a subgraph $\Delta$ of $D$ is called a simple circuit if
\begin{enumerate}[(i)]
\item $\Delta$ is a diagram of a knot;
\item the orientations of all edges of $\Delta$ coincide with the same orientation of this knot.
\end{enumerate}
Combining the tricks used in the proofs of Theorems \ref{thm-oc-reverse-homology} and \ref{thm-oc-reverse-homology-link}, one can prove the following corollary. We leave the proof to the reader.
\begin{corollary}\label{cor-oc-reverse-homology-general}
Let $D$ be a knotted MOY graph and $\Delta$ a simple circuit of $D$. Denote by $D'$ the knotted MOY graph obtained from $D$ by reversing the orientation and the color of edges along $D$. Then, up to an overall shift of the $\mathbb{Z}_2$-, quantum and homological gradings, $\hat{C}_N(D)$ is homotopic to $\hat{C}_N(D')$.
In particular, $\left\langle D \right\rangle_N$ and $\left\langle D' \right\rangle_N$ differ from each other only by a factor of the form $\pm q^k$.
\end{corollary}
| 3,432 | 69,513 |
en
|
train
|
0.103.18
|
\section{An Explicit $\mathfrak{so}(6)$ Kauffman Homology}
Theorem \ref{thm-oc-reverse-homology-link} implies that the $N$-colored $\mathfrak{sl}(2N)$ link homology is essentially an invariant of unoriented links. If $N=1$, this homology is the well known Khovanov homology \cite{K1}. In this section, we shed some light on the $2$-colored $\mathfrak{sl}(4)$ link homology. Specifically, we use results from the first two sections to verify that, up to normalization, the $2$-colored $\mathfrak{sl}(4)$ Reshetikhin-Turaev link polynomial is equal to the $\mathfrak{so}(6)$ Kauffman polynomial and, therefore, up to normalization, the $2$-colored $\mathfrak{sl}(4)$ link homology categorifies the $\mathfrak{so}(6)$ Kauffman polynomial. We do so by comparing the Jaeger Formula for the $\mathfrak{so}(6)$ KV polynomial (equation \eqref{eq-Jaeger-formula-N-graph} with $N=3$) to the composition product of the MOY polynomial associated to the embedding $\mathfrak{sl}(1)\times\mathfrak{sl}(3)\hookrightarrow \mathfrak{sl}(4)$.
\begin{remark}\label{remark-approaches}
There is an alternative approach to the above result. Basically, one can apply Corollary \ref{cor-oc-reverse-homology-general} to the $\mathfrak{sl}(4)$ MOY polynomial of MOY resolutions of $2$-colored link diagrams and keep track of the shifting of the quantum grading while doing so. This would allow one to show that the $2$-colored $\mathfrak{sl}(4)$ Reshetikhin-Turaev link polynomial satisfies the skein relation \eqref{Kauffman-skein} for the $\mathfrak{so}(6)$ Kauffman polynomial. We leave it to the reader to figure out the details of this approach.
Since the coincidence of the $\mathfrak{so}(6)$ Jaeger Formula and the $\mathfrak{sl}(1)\times\mathfrak{sl}(3)\hookrightarrow \mathfrak{sl}(4)$ composition product is itself interesting, we choose to use this coincidence in our proof.
\end{remark}
\subsection{Renormalizing the $N$-colored $\mathfrak{sl}(2N)$ link homology} We start by renormalizing the $N$-colored $\mathfrak{sl}(2N)$ link homology to make it independent of the orientation.
\begin{definition}\label{def-2N-homology-renormalized}
Let $L$ be an oriented framed link that is colored entirely by $N$. Assume the writhe of $L$ is $w(L)$. We define
\begin{eqnarray}
\label{eq-renormal-homology} \widetilde{C}_{2N}(L) & = & \hat{C}_{2N}(L) \|-\frac{N}{2}w(L)\| \{q^{\frac{N}{2}w(L)}\}, \\
\label{eq-renormal-polynomial} \widetilde{R}_{2N}(L) & = & (-q)^{\frac{N}{2}w(L)} \left\langle L \right\rangle_{2N}.
\end{eqnarray}
Denote by $\widetilde{H}_{2N}(L)$ the homology of $\widetilde{C}_{2N}(L)$.
Note that, if $N$ is odd, then $\frac{N}{2}w(L) \in \frac{1}{2}\mathbb{Z}$. In this case, $\widetilde{C}_{2N}(L)$ and $\widetilde{H}_{2N}(L)$ are $(\frac{1}{2}\mathbb{Z})\oplus(\frac{1}{2}\mathbb{Z})$-graded.
\end{definition}
\begin{lemma}\label{lemma-independence-orientation}
The homotopy type of $\widetilde{C}_{2N}(L)$, with its quantum and homological gradings, is independent of the orientation of $L$.
Consequently, $\widetilde{R}_{2N}(L)$ does not depend on the orientation of $L$.
\end{lemma}
\begin{proof}
This follows easily from Theorem \ref{thm-oc-reverse-homology-link}.
\end{proof}
\subsection{The composition product} Now we review the composition product established in \cite{Wu-color-MFW}.
\begin{definition}\label{def-MOY-label}
Let $\Gamma$ be a MOY graph. Denote by $\mathsf{c}$ its color function. That is, for every edge $e$ of $\Gamma$, the color of $e$ is $\mathsf{c}(e)$. A labeling $\mathsf{f}$ of $\Gamma$ is a MOY coloring of the underlying oriented trivalent graph of $\Gamma$ such that $\mathsf{f}(e)\leq \mathsf{c}(e)$ for every edge $e$ of $\Gamma$.
Denote by $\mathcal{L}(\Gamma)$ the set of all labellings of $\Gamma$. For every $\mathsf{f} \in \mathcal{L}(\Gamma)$, denote by $\Gamma_{\mathsf{f}}$ the MOY graph obtained by re-coloring the underlying oriented trivalent graph of $\Gamma$ using $\mathsf{f}$.
For every $\mathsf{f} \in \mathcal{L}(\Gamma)$, define a function $\bar{\mathsf{f}}$ on $E(\Gamma)$ by $\bar{\mathsf{f}}(e)= \mathsf{c}(e)- \mathsf{f}(e)$ for every edge $e$ of $\Gamma$. It is easy to see that $\bar{\mathsf{f}}\in \mathcal{L}(\Gamma)$.
Let $v$ be a vertex of $\Gamma$ of either type in Figure \ref{fig-MOY-vertex}. (Note that, in either case, $e_1$ is to the left of $e_2$ when one looks in the direction of $e$.) For every $\mathsf{f} \in \mathcal{L}(\Gamma)$, define
\[
[v|\Gamma|\mathsf{f}] = \frac{1}{2} (\mathsf{f}(e_1)\bar{\mathsf{f}}(e_2) - \bar{\mathsf{f}}(e_1)\mathsf{f}(e_2)).
\]
\end{definition}
The following is the composition product established in \cite{Wu-color-MFW}.
\begin{theorem}\cite[Theorem 1.7]{Wu-color-MFW}\label{THM-composition-product}
Let $\Gamma$ be a MOY graph. For positive integers $M$, $N$ and $\mathsf{f} \in \mathcal{L}(\Gamma)$, define
\[
\sigma_{M,N}(\Gamma,\mathsf{f}) = M \cdot \mathrm{rot}(\Gamma_{\bar{\mathsf{f}}}) - N \cdot \mathrm{rot}(\Gamma_{\mathsf{f}}) + \sum_{v\in V(\Gamma)} [v|\Gamma|\mathsf{f}],
\]
where the rotation number $\mathrm{rot}$ is defined in \eqref{eq-rot-gamma}. Then
\begin{equation}\label{eq-composition-product}
\left\langle \Gamma \right\rangle_{M+N} = \sum_{\mathsf{f} \in \mathcal{L}(\Gamma)} q^{\sigma_{M,N}(\Gamma,\mathsf{f})} \cdot \left\langle \Gamma_{\mathsf{f}} \right\rangle_M \cdot \left\langle \Gamma_{\bar{\mathsf{f}}} \right\rangle_N.
\end{equation}
\end{theorem}
\begin{remark}
The composition product \eqref{eq-composition-product} can be viewed as induced by the embedding $\mathfrak{su}(M)\times\mathfrak{su}(N)\hookrightarrow \mathfrak{su}(M+N)$. The Jaeger Formula \eqref{eq-Jaeger-formula-N} and \eqref{eq-Jaeger-formula-N-graph} can be viewed as induced by the embedding $\mathfrak{su}(N)\hookrightarrow \mathfrak{so}(2N)$. In \cite{Chen-Reshetikhin}, Chen and Reshetikhin presented an extensive study of formulas of the (uncolored) HOMFLY-PT and Kauffman polynomials induced by these and other embeddings.
\end{remark}
| 1,950 | 69,513 |
en
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.