set
stringclasses
1 value
id
stringlengths
5
9
chunk_text
stringlengths
1
115k
chunk_num_tokens
int64
1
106k
document_num_tokens
int64
58
521k
document_language
stringclasses
2 values
train
0.90.2
where $C(s,a)$ is the optimal value function for a task with reward \begin{equation}\label{eq:std_convex_C_def} r_C(s,a) = f(r(s,a)) + \gamma \E_{s' \mathcal{S}im{} p } V_f(s') - f(Q(s,a)). \end{equation} that is, $C$ satisfies the following recursive equation: \begin{equation} C(s,a) = r_C(s,a) + \gamma \E_{s' \mathcal{S}im{} p} \max_{a'} C(s',a'). \end{equation} \end{lemma} With this result, we have a double-sided bound on the values of the optimal $Q$-function for the composite task. In particular, the lower bound ($f(Q)$) provides a zero-shot approximation for the optimal $Q$-function. It is thus of interest to analyze how well a policy $\pi_f$ extracted from such an estimate ($f(Q)$) might perform. To this end, we provide the following result which bounds the suboptimality of $\pi_f$ as compared to the optimal policy. \begin{lemma} Consider the value of the policy $\pi_f(s) = \max_{a} f(Q(s,a))$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}(s,a)$. The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq D(s,a) \end{equation} where $D$ is the value of the policy $\pi_f$ in a task with reward \begin{align*} r_D(s,a) = \gamma \E_{s'\mathcal{S}im{} p}\E_{a' \mathcal{S}im{} \pi_f} &\biggr[ \max_{b} \big\{ f(Q(s',b)) + C(s',b) \big\} \\ &- f(Q(s',a')) \biggr] \end{align*} that is, $D$ satisfies the following recursive equation: \begin{equation} D(s,a) = r_D(s,a) + \gamma \E_{s'\mathcal{S}im{}p} \E_{a' \mathcal{S}im{} \pi_f} D(s',a'). \end{equation} \end{lemma} Interestingly, the previous result shows that for functions $f$ admitting a tight double-sided bound (that is, a relatively small value of $C$), the associated zero-shot policy $\pi_f$ can be expected to perform near-optimally in the composite domain. Another class of functions for which general bounds can be derived arises when $f$ satisfies the following ``reverse'' conditions. \begin{lemma}[Concave Conditions]\label{thm:concave_cond_std} Given a primitive task with discount factor $\gamma$ and a bounded, continuous transformation function $f~:~X~\to~\mathbb{R}$ which satisfies: \begin{enumerate} \item $f$ is concave on its domain $X$\textsuperscript{\ref{dynamics condition}}; \item $f$ is superlinear: \begin{enumerate}[label=(\roman*)] \item $f(x+y) \geq f(x) + f(y)$ for all $x,y \in X$ \item $f(\gamma x) \geq \gamma f(x)$ for all $x \in X$ \end{enumerate} \item $f\left( \max_{a} \mathcal{Q}(s,a) \right) \geq \max_{a}~f\left( \mathcal{Q}(s,a) \right)$ for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to X.$ \end{enumerate} then the optimal action-value functions are now related in the following way: \begin{equation}\label{eqn:concave_std} f(Q(s,a)) - \hat{C}(s,a) \leq \widetilde{Q}(s,a) \leq f(Q(s,a)) \end{equation} where $\hat{C}$ is the optimal value function for a task with reward \begin{equation} \hat{r}_C(s,a) = f(Q(s,a)) - f(r(s,a)) - \gamma \E_{s' \mathcal{S}im{} p} V_f(s') \end{equation} \end{lemma} One obvious way to satisfy the final condition in the preceding lemma is to consider functions $f(x)$ which are monotonically increasing. Note that the definitions of $C$ and $\hat{C}$ guarantee them to be positive, as is required for the bounds to be meaningful (this statement is shown explicitly in the Supplementary Material). Furthermore, by again considering the derived policy $\pi_f(a|s)$, we next provide a similar result for concave conditions, noting the difference in definitions between $D$ and $\hat{D}$. \begin{lemma} Consider the value of the policy $\pi_f(s) = \max_{a} f(Q(s,a))$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}(s,a)$. The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq \hat{D}(s,a) \end{equation} where $\hat{D}$ is the value of the policy $\pi_f$ in a task with reward \begin{equation*} \hat{r}_D = \gamma \underset{s'\mathcal{S}im{} p\ }{\mathbb{E}} \underset{a' \mathcal{S}im{} \pi_f}{\mathbb{E}} \biggr[ V_f(s') - f(Q(s',a')) + \hat{C}(s',a') \biggr] \end{equation*} \end{lemma} \renewcommand{1.5}{1.5} \begin{table}[ht] \centering \begin{tabular}{ll} \toprule \multicolumn{2}{c}{Standard RL Results} \\ \cmidrule(r){1-2} Transformation & Result \\ \midrule Linear Map: & $\widetilde{Q}(s,a) = k Q(s,a)$ \\ Convex conditions: & $\widetilde{Q}(s,a) \geq f(Q(s,a))$ \\ Concave conditions: & $\widetilde{Q}(s,a) \leq f(Q(s,a))$ \\ OR Composition: & $\widetilde{Q}(s,a) \geq \max_k \{Q^{(k)}(s,a)\}$ \\ AND Composition: & $\widetilde{Q}(s,a) \leq \min_{k} \{ Q^{(k)}(s,a)\}$ \\ NOT Gate: & $ \widetilde{Q}(s,a) \geq - Q(s,a)$ \\ Conical combination: & $\widetilde{Q}(s,a) \leq \mathcal{S}um_k \alpha_k Q^{(k)}(s,a)$ \\ \bottomrule \end{tabular} \caption{\textbf{Standard Transfer Library.} Lemmas \ref{thm:convex_cond_std}, \ref{thm:concave_cond_std} stated in Section \ref{sec:std_lemmas} lead to a broad class of applicable transfer functions in standard RL. In this table we list several common examples which are demonstrated throughout the paper and in the Supplementary Materials. We show only one side of the bounds from Eq. \eqref{eqn:convex_std}, \eqref{eqn:concave_std} which requires no additional training.}\label{tab:std_rl} \end{table} We remark that the conditions imposed on the function $f$ are not very restrictive. For example, the Boolean functions and linear combinations considered in previous work are all included in our framework, while we also include novel transformations not considered in previous work (see Table~\ref{tab:std_rl}). Furthermore, the conditions for $f$ can be further relaxed if specific conditions are met. For the case of deterministic dynamics, the first condition is not required ($f$ need not be convex nor concave). We have shown that in the standard RL case, quite general conditions (convexity and sublinearity) lead to a wide class of applicable functions defining the Transfer Library. The conditions given in Lemmas \ref{thm:convex_cond_std} and \ref{thm:concave_cond_std} are straightforward to check for general functions. When given a primitive task defined by a reward function $r$, one can therefore bound the optimal $Q$ function for a general transformation of the rewards, $f(r)$, when $f$ obeys the conditions above. This new set of transformed tasks defines the Transfer Library from a given set of primitive tasks. The previous (and following) results are presented for the case in which the primitive task $Q$-values are known \textit{exactly}. In practice, however, this is not typically the case, even in tabular settings. In continuous environments where the use of function approximators is necessary, the error that is present in learned $Q$-values is further increased. To address this issue, we provide an extension of all double-sided bounds for the case where an $\varepsilon$-optimal estimate of the primitive task's $Q$-values is known, such that $|Q(s,a)~-~\bar{Q}(s,a)|~\leq~\varepsilon$ for all $s,a$. To derive such an extension, we further require that the composition function $f$ is $L$-Lipschitz continuous (essentially a bounded first derivative), i.e. $|f(x_1)-f(x_2)|\leq L|x_1-x_2|$ for all $x_1,x_2 \in X$, the domain of $f$ (in the present case, the $x_i$ are the primitive task's $Q$-values). To maintain the focus of the main text, we provide these results and the corresponding proofs in the Supplementary Material. We note that all functions listed in Table \ref{tab:std_rl} and \ref{tab:entropy-regularized} are indeed $1$-Lipschitz continuous. \mathcal{S}ubsection{Generalization to Composition of Primitive Tasks}\label{sec:comp_std} The previous lemmas can be extended to the case of multivariable transformations (see Supplementary Material for details), where $X \to \bigotimes X^{(k)}$ (the Cartesian product of primitive codomains). That is, with a function $F: \bigotimes X^{(k)} \to \mathbb{R}$ and a collection of $M$ subtasks, $\{r^{(k)}(s,a)\}_{k=1}^{M}$, one can synthesize a new, \textbf{composition of subtasks}, with reward defined by $r^{(c)}(s,a) = F(r^{(1)}(s,a), \dotsc, r^{(M)}(s,a))$. In this vectorized format, $F$ must obey the above conditions in each argument: \begin{itemize} \item $F$ is convex (concave) in each argument, \item $F$ is sublinear (superlinear) in each argument. \end{itemize} For the final conditions, we also require a similar vectorized inequality, which we spell out in detail in the Supplementary Material. As an example of composition in standard RL, we consider the possible sums of reward functions, with each task having a positive weight associated to it. In such a setup, the agent has learned to solve a set of primitive tasks, then it must solve a task with a new compositely-defined reward function, say $f\left(r^{(1)}, \dotsc, r^{(M)}\right)~\doteq~\mathcal{S}um_{k=1}^{M} \alpha_k r^{(k)}$ for (possibly many) target tasks defined by the weights $\{\alpha_k\}$. To determine which bound is satisfied for such a composition function, we look to the vectorized conditions above. This function is linear in all arguments, so we must only check the final condition. Since the inequality \begin{equation} \mathcal{S}um_k \alpha_k \max_a Q^{(k)}(s,a) \geq \max_a \mathcal{S}um_k \alpha_k Q^{(k)}(s,a) \end{equation} holds for any set of $\alpha_k > 0$, this function conforms to the concave vectorized conditions, implying that $\widetilde{Q}(s,a)~\leq~f(Q^{(k)}(s,a))=\mathcal{S}um_k~\alpha_k Q^{(k)}(s,a)$. We can then use the right-hand side of this bound to calculate the associated state-value function ($V_f(s) = \max_a f(Q^{(k)}(s,a))$) and the associated greedy policy ($\pi_f(s) = \text{argmax}_a f(Q^{(k)}(s,a))$). This result agrees with an independent result by \cite{nemecek} (the upper bound in Theorem 1 therein) without accounting for approximation errors. \mathcal{S}ection{entropy-regularized RL}\label{sec:entropy-regularized_lemmas} \mathcal{S}ubsection{Transformation of Primitive Task} We will now extend the results obtained in the previous section to the case of entropy-regularized RL. Again we first consider the single-reward transformation $f(r)$ for some function $f$. Here we state the conditions that must be met by functions $f$, which define the Transfer Library for entropy-regularized RL. We now use the following definitions in the subsequent (entropy-regularized RL) results. In the following results, we set $\beta=1$ for brevity, and the expectation in the final condition is understood to be over actions, sampled from the prior policy. Full details can be found in the proofs provided in the Supplementary Material. \begin{lemma}[Convex Conditions] \label{thm:forward_cond_entropy-regularized} Given a primitive task with discount factor $\gamma$ and a bounded, continuous transformation function $f~:~X~\to~\mathbb{R}$ which satisfies: \begin{enumerate} \item $f$ is convex on its domain $X$\textsuperscript{\ref{dynamics condition}}; \item $f$ is sublinear: \begin{enumerate}[label=(\roman*)] \item $f(x+y) \leq f(x) + f(y)$ for all $x,y \in X$ \item $f(\gamma x) \leq \gamma f(x)$ for all $x \in X$ \end{enumerate} \item $f\left( \log \E \exp \mathcal{Q}(s,a) \right) \leq \log \E \exp f\left( \mathcal{Q}(s,a) \right)$ for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to X.$ \end{enumerate} then the optimal action-value function for the transformed rewards, $\widetilde{Q}$, is now related to the optimal action-value function with respect to the original rewards by: \begin{equation}\label{eq:convex_entropy-regularized} f \left( Q(s,a) \right) \leq \widetilde{Q}(s,a) \leq f \left( Q(s,a) \right) + C(s,a) \end{equation} \end{lemma}
3,846
49,178
en
train
0.90.3
\mathcal{S}ubsection{Generalization to Composition of Primitive Tasks}\label{sec:comp_std} The previous lemmas can be extended to the case of multivariable transformations (see Supplementary Material for details), where $X \to \bigotimes X^{(k)}$ (the Cartesian product of primitive codomains). That is, with a function $F: \bigotimes X^{(k)} \to \mathbb{R}$ and a collection of $M$ subtasks, $\{r^{(k)}(s,a)\}_{k=1}^{M}$, one can synthesize a new, \textbf{composition of subtasks}, with reward defined by $r^{(c)}(s,a) = F(r^{(1)}(s,a), \dotsc, r^{(M)}(s,a))$. In this vectorized format, $F$ must obey the above conditions in each argument: \begin{itemize} \item $F$ is convex (concave) in each argument, \item $F$ is sublinear (superlinear) in each argument. \end{itemize} For the final conditions, we also require a similar vectorized inequality, which we spell out in detail in the Supplementary Material. As an example of composition in standard RL, we consider the possible sums of reward functions, with each task having a positive weight associated to it. In such a setup, the agent has learned to solve a set of primitive tasks, then it must solve a task with a new compositely-defined reward function, say $f\left(r^{(1)}, \dotsc, r^{(M)}\right)~\doteq~\mathcal{S}um_{k=1}^{M} \alpha_k r^{(k)}$ for (possibly many) target tasks defined by the weights $\{\alpha_k\}$. To determine which bound is satisfied for such a composition function, we look to the vectorized conditions above. This function is linear in all arguments, so we must only check the final condition. Since the inequality \begin{equation} \mathcal{S}um_k \alpha_k \max_a Q^{(k)}(s,a) \geq \max_a \mathcal{S}um_k \alpha_k Q^{(k)}(s,a) \end{equation} holds for any set of $\alpha_k > 0$, this function conforms to the concave vectorized conditions, implying that $\widetilde{Q}(s,a)~\leq~f(Q^{(k)}(s,a))=\mathcal{S}um_k~\alpha_k Q^{(k)}(s,a)$. We can then use the right-hand side of this bound to calculate the associated state-value function ($V_f(s) = \max_a f(Q^{(k)}(s,a))$) and the associated greedy policy ($\pi_f(s) = \text{argmax}_a f(Q^{(k)}(s,a))$). This result agrees with an independent result by \cite{nemecek} (the upper bound in Theorem 1 therein) without accounting for approximation errors. \mathcal{S}ection{entropy-regularized RL}\label{sec:entropy-regularized_lemmas} \mathcal{S}ubsection{Transformation of Primitive Task} We will now extend the results obtained in the previous section to the case of entropy-regularized RL. Again we first consider the single-reward transformation $f(r)$ for some function $f$. Here we state the conditions that must be met by functions $f$, which define the Transfer Library for entropy-regularized RL. We now use the following definitions in the subsequent (entropy-regularized RL) results. In the following results, we set $\beta=1$ for brevity, and the expectation in the final condition is understood to be over actions, sampled from the prior policy. Full details can be found in the proofs provided in the Supplementary Material. \begin{lemma}[Convex Conditions] \label{thm:forward_cond_entropy-regularized} Given a primitive task with discount factor $\gamma$ and a bounded, continuous transformation function $f~:~X~\to~\mathbb{R}$ which satisfies: \begin{enumerate} \item $f$ is convex on its domain $X$\textsuperscript{\ref{dynamics condition}}; \item $f$ is sublinear: \begin{enumerate}[label=(\roman*)] \item $f(x+y) \leq f(x) + f(y)$ for all $x,y \in X$ \item $f(\gamma x) \leq \gamma f(x)$ for all $x \in X$ \end{enumerate} \item $f\left( \log \E \exp \mathcal{Q}(s,a) \right) \leq \log \E \exp f\left( \mathcal{Q}(s,a) \right)$ for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to X.$ \end{enumerate} then the optimal action-value function for the transformed rewards, $\widetilde{Q}$, is now related to the optimal action-value function with respect to the original rewards by: \begin{equation}\label{eq:convex_entropy-regularized} f \left( Q(s,a) \right) \leq \widetilde{Q}(s,a) \leq f \left( Q(s,a) \right) + C(s,a) \end{equation} \end{lemma} \begin{table}[ht] \centering \begin{tabular}{ll} \toprule \multicolumn{2}{c}{Entropy-Regularized RL Results} \\ \cmidrule(r){1-2} Transformation& Result\\ \midrule Linear Map, $k \in (0,1)$\tablefootnote{Note that linear reward scaling can also be viewed as a linear scaling in the temperature parameter.}:& $\widetilde{Q}(s,a) \geq k Q(s,a) $ \\ Linear Map, $k > 1$:& $\widetilde{Q}(s,a) \leq k Q(s,a)$ \\ Convex conditions: & $\widetilde{Q} \geq f(Q(s,a))$ \\ Concave conditions: & $\widetilde{Q} \leq f(Q(s,a))$ \\ OR Composition: & $\widetilde{Q}(s,a) \geq \max_k \{Q^{(k)}(s,a)\}$ \\ AND Composition: & $ \widetilde{Q}(s,a) \leq \min_k \{ Q^{(k)}(s,a)\}$ \\ NOT Gate: & $\widetilde{Q}(s,a) \geq -Q(s,a)$ \\ Convex combination\tablefootnote{This extends to the case $\mathcal{S}um_k \alpha_k \geq 1$ by composing with a linear scaling, which respects the same inequality.}: & $\widetilde{Q}(s,a) \leq \mathcal{S}um_k \alpha_k Q^{(k)}(s,a)$ \\ \bottomrule \end{tabular} \caption{\textbf{Entropy-Regularized Transfer Library.} Lemmas \ref{thm:forward_cond_entropy-regularized}, \ref{thm:reverse_cond_entropy-regularized} lead to a broad class of applicable transfer functions in entropy-regularized RL. In this table we list several common examples which are demonstrated throughout the paper and in the Supplementary Materials. We show only one side of the bounds from Eq. \eqref{eq:convex_entropy-regularized}, \eqref{eq:concave_entropy-regularized} which requires no additional training.}\label{tab:entropy-regularized} \end{table} We note that $C$ has the same definition as before, but with $V_f$ replaced by its entropy-regularized analog: $V_f(s)~\doteq~\log \E_{a \mathcal{S}im{} \pi_0} \exp f\left(Q(s,a)\right)$. \begin{lemma} Consider the soft value of the policy $\pi_f(a|s)~=~\pi_0(a|s)\exp\left( f(Q(s,a)) - V_f(s) \right)$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}(s,a)$. The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq D(s,a) \end{equation} where $D$ is the soft value of the policy $\pi_f$ with reward \begin{equation*} r_D(s,a) = \gamma \E_{s'} \left[ \max_{b} \left\{ f\left(Q(s',b)\right) + C(s',b) \right\} -V_f(s') \right]. \end{equation*} \end{lemma} Conversely, for concave conditions we have \begin{lemma}[Concave Conditions] \label{thm:reverse_cond_entropy-regularized} Given a primitive task with discount factor $\gamma$ and a bounded, continuous transformation function $f~:~X~\to~\mathbb{R}$ which satisfies: \begin{enumerate} \item $f$ is concave on its domain $X$\textsuperscript{\ref{dynamics condition}}; \item $f$ is superlinear: \begin{enumerate}[label=(\roman*)] \item $f(x+y) \geq f(x) + f(y)$ for all $x,y \in X$ \item $f(\gamma x) \geq \gamma f(x)$ for all $x \in X$ \end{enumerate} \item $f\left( \log \E \exp \mathcal{Q}(s,a) \right) \geq \log \E \exp f\left( \mathcal{Q}(s,a) \right)$ for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to X$. \end{enumerate} then the optimal action-value function for the transformed rewards obeys the following inequality: \begin{equation}\label{eq:concave_entropy-regularized} f\left( Q(s,a) \right) - \hat{C}(s,a) \leq \widetilde{Q}(s,a) \leq f \left( Q(s,a) \right) \end{equation} \end{lemma} As in the preceding section, we provide a similar result for the derived policy $\pi_{f}$, given the concave conditions provided. \begin{lemma} Consider the soft value of the policy $\pi_f(a|s)$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}(s,a)$. The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq \hat{D}(s,a) \end{equation} where $\hat{D}$ satisfies the following recursive equation \begin{equation} \hat{D}(s,a) = \gamma \E_{s' \mathcal{S}im{} p}\E_{a' \mathcal{S}im{} \pi_f} \left( \hat{C}(s',a') + \hat{D}(s',a') \right). \end{equation} \label{lem:concave_regret_maxent} \end{lemma} Now, by taking $V_f(s)$ as the previously defined \textit{soft} value function, the fixed points $C$ and $\hat{C}$ have the same definitions as presented in Lemma \ref{thm:convex_cond_std} and \ref{thm:concave_cond_std}, respectively with this new definition of $V_f$. This final constraint (in Lemma 5.1 and 5.3) on $f$ arises out of the requirements for extending the previous results to entropy-regularized RL. Although the final condition (similar to a log-convexity) appears somewhat cumbersome, we show that it is nevertheless possible to satisfy it for several non-trivial functions (Table~\ref{tab:entropy-regularized}). For instance, functions defining Boolean composition over subtasks ($\max(\cdot), \ \min(\cdot)$), which have not been considered in previous entropy-regularized results \citep{Haarnoja2018, vanNiekerk} as well as new functional transformations such as the NOT gate (Table~\ref{tab:entropy-regularized}). \begin{figure*} \caption{In the first panel, we show learning curves for each of the clipping methods proposed, averaged over $50$ trials, with a 95\% confidence interval shown in the shaded region. In the next two panels, we depict the primitive tasks with rewarding states (orange diamonds) on the left side and bottom side of the maze, respectively. In the rightmost panel, we show the composite task of interest, with the multivariable ``OR'' composition function $\max_k\{...\} \label{fig:tasks} \end{figure*} \mathcal{S}ubsection{Generalization to Composition of Primitive Tasks} As we have done in the standard RL setting (Section \ref{sec:comp_std}), we can also extend the previous results to include compositionality: functions operating over multiple primitive tasks. In this case, \cite{Haarnoja2018} have demonstrated a special case of Lemma \ref{thm:reverse_cond_entropy-regularized} for the composition function $f(\{r^{(k)}\}) = \mathcal{S}um_k \alpha_k r^{(k)}$ for convex weights $\alpha_k$. This can also be shown in our framework by proving the final condition of Lemma \ref{thm:reverse_cond_entropy-regularized} (since the others are automatic given that $f$ is linear). This vectorized condition can be proven via H\"older's inequality. Besides this previously studied composition function, we can now readily derive value function bounds for other transformations and compositions, for example Boolean compositions as defined previously. The corresponding results for entropy-regularized RL are summarized in Table \ref{tab:entropy-regularized}. \mathcal{S}ection{Experiments} To test our theoretical results using function approximators (FAs), we consider a deterministic ``gridworld'' MDP amenable to task composition\footnote{Source code available at \url{https://github.com/JacobHA/Q-Bounding-in-Compositional-RL}}. Figure ~\ref{fig:tasks} shows the environments of the trained primitive tasks ``$6\times6\ \text{L}$'' and ``$6\times6\ \text{D}$'', whose reward functions are then combined to produce a composite task, ``$6\times6\ \text{L OR D}$''. The agent has $4$ possible actions (in each of the cardinal directions) and begins at the green circle in all cases. The agent's goal is to navigate to the orange states which provide a reward. We note that these states are \textit{not absorbing} unlike the cases considered in prior work. The red ``X'' indicates a penalizing state where the agent's episode is immediately terminated. Finally, a wall (black square) is added for the agent to navigate around. The primitive tasks are assumed to be solved with high accuracy (i.e. we assume the $Q$-values for primitive tasks to be exactly known). Although the domain is rather simple, we use such an experiment as a means of validating our theoretical results while gaining insight on the experimental effects of \textit{clipping} (discussed below) during training. \begin{figure} \caption{Mean bound violation, shown with shaded 95\% confidence intervals. The bound violation measures the difference between the Q-network's estimate and the allowed bound $\widetilde{Q} \label{fig:bound_viol} \end{figure} With the primitive tasks solved, we now consider training on a target composite (``OR'') task. We learn from scratch (with no prior information or bounds being applied) as our baseline (blue line, denoted ``none'', in Fig.~\ref{fig:tasks} and Fig.~\ref{fig:bound_viol}).
3,925
49,178
en
train
0.90.4
\end{enumerate} then the optimal action-value function for the transformed rewards obeys the following inequality: \begin{equation}\label{eq:concave_entropy-regularized} f\left( Q(s,a) \right) - \hat{C}(s,a) \leq \widetilde{Q}(s,a) \leq f \left( Q(s,a) \right) \end{equation} \end{lemma} As in the preceding section, we provide a similar result for the derived policy $\pi_{f}$, given the concave conditions provided. \begin{lemma} Consider the soft value of the policy $\pi_f(a|s)$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}(s,a)$. The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq \hat{D}(s,a) \end{equation} where $\hat{D}$ satisfies the following recursive equation \begin{equation} \hat{D}(s,a) = \gamma \E_{s' \mathcal{S}im{} p}\E_{a' \mathcal{S}im{} \pi_f} \left( \hat{C}(s',a') + \hat{D}(s',a') \right). \end{equation} \label{lem:concave_regret_maxent} \end{lemma} Now, by taking $V_f(s)$ as the previously defined \textit{soft} value function, the fixed points $C$ and $\hat{C}$ have the same definitions as presented in Lemma \ref{thm:convex_cond_std} and \ref{thm:concave_cond_std}, respectively with this new definition of $V_f$. This final constraint (in Lemma 5.1 and 5.3) on $f$ arises out of the requirements for extending the previous results to entropy-regularized RL. Although the final condition (similar to a log-convexity) appears somewhat cumbersome, we show that it is nevertheless possible to satisfy it for several non-trivial functions (Table~\ref{tab:entropy-regularized}). For instance, functions defining Boolean composition over subtasks ($\max(\cdot), \ \min(\cdot)$), which have not been considered in previous entropy-regularized results \citep{Haarnoja2018, vanNiekerk} as well as new functional transformations such as the NOT gate (Table~\ref{tab:entropy-regularized}). \begin{figure*} \caption{In the first panel, we show learning curves for each of the clipping methods proposed, averaged over $50$ trials, with a 95\% confidence interval shown in the shaded region. In the next two panels, we depict the primitive tasks with rewarding states (orange diamonds) on the left side and bottom side of the maze, respectively. In the rightmost panel, we show the composite task of interest, with the multivariable ``OR'' composition function $\max_k\{...\} \label{fig:tasks} \end{figure*} \mathcal{S}ubsection{Generalization to Composition of Primitive Tasks} As we have done in the standard RL setting (Section \ref{sec:comp_std}), we can also extend the previous results to include compositionality: functions operating over multiple primitive tasks. In this case, \cite{Haarnoja2018} have demonstrated a special case of Lemma \ref{thm:reverse_cond_entropy-regularized} for the composition function $f(\{r^{(k)}\}) = \mathcal{S}um_k \alpha_k r^{(k)}$ for convex weights $\alpha_k$. This can also be shown in our framework by proving the final condition of Lemma \ref{thm:reverse_cond_entropy-regularized} (since the others are automatic given that $f$ is linear). This vectorized condition can be proven via H\"older's inequality. Besides this previously studied composition function, we can now readily derive value function bounds for other transformations and compositions, for example Boolean compositions as defined previously. The corresponding results for entropy-regularized RL are summarized in Table \ref{tab:entropy-regularized}. \mathcal{S}ection{Experiments} To test our theoretical results using function approximators (FAs), we consider a deterministic ``gridworld'' MDP amenable to task composition\footnote{Source code available at \url{https://github.com/JacobHA/Q-Bounding-in-Compositional-RL}}. Figure ~\ref{fig:tasks} shows the environments of the trained primitive tasks ``$6\times6\ \text{L}$'' and ``$6\times6\ \text{D}$'', whose reward functions are then combined to produce a composite task, ``$6\times6\ \text{L OR D}$''. The agent has $4$ possible actions (in each of the cardinal directions) and begins at the green circle in all cases. The agent's goal is to navigate to the orange states which provide a reward. We note that these states are \textit{not absorbing} unlike the cases considered in prior work. The red ``X'' indicates a penalizing state where the agent's episode is immediately terminated. Finally, a wall (black square) is added for the agent to navigate around. The primitive tasks are assumed to be solved with high accuracy (i.e. we assume the $Q$-values for primitive tasks to be exactly known). Although the domain is rather simple, we use such an experiment as a means of validating our theoretical results while gaining insight on the experimental effects of \textit{clipping} (discussed below) during training. \begin{figure} \caption{Mean bound violation, shown with shaded 95\% confidence intervals. The bound violation measures the difference between the Q-network's estimate and the allowed bound $\widetilde{Q} \label{fig:bound_viol} \end{figure} With the primitive tasks solved, we now consider training on a target composite (``OR'') task. We learn from scratch (with no prior information or bounds being applied) as our baseline (blue line, denoted ``none'', in Fig.~\ref{fig:tasks} and Fig.~\ref{fig:bound_viol}). To implement the derived bounds, we consider the one-sided bound, thereby not requiring further training. In this case (standard RL, ``OR'' composition) we have the following lower bound (see Table~\ref{tab:std_rl}): $Q^{(\text{OR})} \ge \max\{Q^{(\text{L})}, Q^{(\text{D})}\}$. There are many ways to implement such a bound in practice. One na\"ive method is to simply clip the target network's new (proposed target) value to be within the allowed region for each of the $\widetilde{Q}(s,a)$ that are currently being updated. We term this method ``hard clipping''. Inspired by Section 3.2 of \citep{kimconstrained}, we can also use an additional penalty by adding to the loss function the absolute value of bound violations that occur (the quantity ``$\textrm{BV}$'' defined in Eq.~\eqref{eq:bound_violations}). We term this method as ``soft clipping''. As mentioned by \citep{kimconstrained}, this method of clipping could produce a new hyperparameter (the relative weight for this term relative to the Bellman residual). We keep this coefficient fixed (to unity) for simplicity, and we intend on exploring the possibility of a variable weight in future work. \begin{equation} \textrm{BV} \doteq || \widetilde{Q}(s,a) - f(\{Q^{(k)}(s,a)\}_{k=1}^M) || \label{eq:bound_violations} \end{equation} Similar to Eq. (21) of \citep{kimconstrained} we also considered a clipping at test-time only, with some differences in how the bounds are applied. This discrepancy is due to the difference in frameworks: \citep{kimconstrained} leverages the GPI framework, and in our setting we are learning a new policy from scratch while imposing said bounds. Our method is as follows: Whenever the agent acts greedily and samples from the policy network, it first applies (hard) clipping to the network's value; then the agent extracts an action via greedy argmax. We term this method of clipping as ``test clipping''. The results for each method (as well as a combination of both hard and soft clipping) are shown in Fig.~\ref{fig:tasks} and \ref{fig:bound_viol}. Interestingly, we find that by directly incorporating the bound violations into the loss function (via the ``soft'' clipping mechanism); the bound violations most quickly become (and remain) zero (Fig.~\ref{fig:bound_viol}) as opposed to the other methods considered. We find that reduction in bound violation also generally correlates with a high evaluation reward during training. One exception to this observation (for the particular environment shown) is the case of ``test'' clipping. For this particular composition, either primitive task will solve the composite task, thus yielding high evaluation rewards (Fig.~\ref{fig:tasks}). However, the $Q$-values are not accurate, which leads to a high frequency of clipping, comparable to the baseline without clipping (Fig.~\ref{fig:bound_viol}). In order to ensure the agent has learned accurate $Q$-values, it is therefore important to monitor the bound violations rather than only the evaluation performance which may not be representative of convergence of $Q$-values. \mathcal{S}ection{Discussion} In summary, we have established a general theoretical treatment for functional transformation and composition of primitive tasks. This extends the scope of previous work, which has primarily focused on isolated instances of reward transformations and compositions without general structure. Additionally, we have theoretically addressed the broader setting of stochastic dynamics, with rewards varying on both terminal and non-terminal (i.e. boundary and interior) states. In this work, we have shown that it is possible to derive a general class of functions which obey transfer bounds in standard and entropy-regularized RL beyond those cases discussed in previous work. In particular, we show that by using the same functional form on the optimal $Q$ functions as used on the reward, we can bound the transformed optimal $Q$ function. The derived bound can then be used to calculate a zero-shot solution. We have used these functions to define a Transfer Library: a set of tasks which can immediately be addressed by our bounds. Since our approach via the optimal backup equation is general, we apply it to both standard RL and entropy-regularized RL. The newly-defined fixed point $C$ ($\hat{C}$) has an interesting interpretation. Rather than simply being an arbitrary function, for both the standard RL and entropy-regularized RL bounds, $C$ represents an optimal value function for a standard RL task with reward function given by $r_C$ ($\hat{r}_C$ for $\hat{C}$). The function $C$ bounds the total gap between $f(Q(s,a))$ and $\widetilde{Q}(s,a)$ at the level of state-actions. We also note the simple relationship between reward functions $r_C = -\hat{r}_C$. The fixed point $D$ ($\hat{D}$) is not an optimal value function, but the value of the zero-shot policy $\pi_f$ in some other auxiliary task. The auxiliary task takes various ``rewards'', e.g. the function $\gamma \hat{C}$ in Lemma \ref{lem:concave_regret_maxent}. Although for general functions $f$, the rewards do not have a simple interpretation (i.e. R\'enyi divergence between two policies as in \citep{Haarnoja2018}), we see that $r_C$ essentially measures the non-linearities of the composition function $f$ with respect to the given dynamics, and hence accounts for the errors made in using the bounding conditions of $f$. Furthermore, we can bound $C$ (and thus the difference between the optimal value and the suggested zero-shot approximation $f(Q)$) in a simple way: by bounding the rewards corresponding to $C$. By simply calculating the maximum of $r_C$ for example, one easily finds $C(s,a) \le f\left(r(s,a)\right)ac{1}{1-\gamma} \max_{s,a} r_C(s,a)$ (and similarly for $\hat{C}$). Interestingly, \cite{TL_bound} have shown the provable usefulness of using an upper bound when used for ``warmstarting'' the training in new domains. In particular, it appears that $f(Q)$ (for the concave conditions) is related to their proposed ``$\alpha$-weak admissible heuristic'' for $\widetilde{\mathcal{T}}$. In future work, we hope to precisely connect to such theoretical results in order to obtain provable benefits to our derived bounds. Experimentally we have observed that this warmstarting procedure does indeed improve convergence times, however a detailed study of this effect is beyond the scope of the present work and will be explored in future work. The derived results have also been used to devise protocols for clipping which improve performance and reduce variance during training based on the experiments presented. In the future, we hope that the class of functions discussed in this work will be broadened further, allowing for a larger class of non-trivial zero-shot bounds for the Transfer Library. By adding these known transformations and compositions to the Transfer Library, the RL agent will be able to approach significantly more novel tasks without the need for further training. The current research has also emphasized questions for transfer learning in this context, such as: \textit{Which primitives should be prioritized for learning?} (Discussed in \cite{boolean_stoch, nemecek, alver}.) \textit{What other functions can be used for transfer?} \textit{How tight are these bounds?} \textit{How does the Transfer Library depend on the parameters $\gamma$ and $\beta$?} In this work, we provide general bounds for the discrete MDP setting and an extension of the theory to continuous state-action spaces is deferred to future work. It would be of interest to explore if it is possible to prove general bounds for this extension, given sufficient smoothness conditions on the dynamics and the function of transformation. Other extensions can be considered as well, for instance: the applicability to other value-based or actor-critic methods, the warmstarting of function approximators, learning the $C$ and $D$ functions, and adjusting the ``soft'' clipping weight parameter. In future work, we also aim to discover other functions satisfying the derived conditions; and will attempt to find necessary (rather than sufficient) conditions that classify the functions $f \in \mathcal{T}L$. It would be of interest to explore if extensions of the current approach can further enable agents to expand and generalize their knowledge base to solve complex dynamic tasks in Deep RL. \nocite{haarnoja2019_learn} \nocite{hardy1952inequalities} \nocite{lee2021sunrise} \nocite{openAI} \title{Bounding the Optimal Value Function \\in Compositional Reinforcement Learning\\(Supplementary Material)} \onecolumn \maketitle \mathcal{S}ection{Introduction} In the following, we discuss the results of additional experiments in the four room domain. In these experiments, we want to answer the following questions: \begin{itemize} \item How do the optimal policies and value functions compare to those calculated from the zero-shot approximations using the derived bounds? \item What are other examples of compositions and functional transformations that can be analyzed using our approach? \item Does warmstarting (using the derived bounds for initialization) in the tabular case improve the convergence? \end{itemize} To address these issues, we modify OpenAI's frozen lake environment \cite{openAI} to allow for stochastic dynamics. In the tabular experiments, numerical solutions for the optimal $Q$ functions were obtained by solving the Bellman backup equations iteratively. Iterations are considered converged once the maximum difference between successive iterates is less than $10^{-10}$. Beyond the motivating example shown in the main text, we have included video files demonstrating a full range of zero-shot compositions with convex weights between the Bottom Left (BL) room and Bottom Right (BR) room subtasks, in both entropy-regularized ($\beta=5$) and standard RL with deterministic dynamics. These videos, along with all code for the above experiments are made publicly available at a repository on \url{https://github.com/JacobHA/Q-Bounding-in-Compositional-RL}. \mathcal{S}ection{Experiments} \mathcal{S}ubsection{Function Approximators}
3,949
49,178
en
train
0.90.5
The newly-defined fixed point $C$ ($\hat{C}$) has an interesting interpretation. Rather than simply being an arbitrary function, for both the standard RL and entropy-regularized RL bounds, $C$ represents an optimal value function for a standard RL task with reward function given by $r_C$ ($\hat{r}_C$ for $\hat{C}$). The function $C$ bounds the total gap between $f(Q(s,a))$ and $\widetilde{Q}(s,a)$ at the level of state-actions. We also note the simple relationship between reward functions $r_C = -\hat{r}_C$. The fixed point $D$ ($\hat{D}$) is not an optimal value function, but the value of the zero-shot policy $\pi_f$ in some other auxiliary task. The auxiliary task takes various ``rewards'', e.g. the function $\gamma \hat{C}$ in Lemma \ref{lem:concave_regret_maxent}. Although for general functions $f$, the rewards do not have a simple interpretation (i.e. R\'enyi divergence between two policies as in \citep{Haarnoja2018}), we see that $r_C$ essentially measures the non-linearities of the composition function $f$ with respect to the given dynamics, and hence accounts for the errors made in using the bounding conditions of $f$. Furthermore, we can bound $C$ (and thus the difference between the optimal value and the suggested zero-shot approximation $f(Q)$) in a simple way: by bounding the rewards corresponding to $C$. By simply calculating the maximum of $r_C$ for example, one easily finds $C(s,a) \le f\left(r(s,a)\right)ac{1}{1-\gamma} \max_{s,a} r_C(s,a)$ (and similarly for $\hat{C}$). Interestingly, \cite{TL_bound} have shown the provable usefulness of using an upper bound when used for ``warmstarting'' the training in new domains. In particular, it appears that $f(Q)$ (for the concave conditions) is related to their proposed ``$\alpha$-weak admissible heuristic'' for $\widetilde{\mathcal{T}}$. In future work, we hope to precisely connect to such theoretical results in order to obtain provable benefits to our derived bounds. Experimentally we have observed that this warmstarting procedure does indeed improve convergence times, however a detailed study of this effect is beyond the scope of the present work and will be explored in future work. The derived results have also been used to devise protocols for clipping which improve performance and reduce variance during training based on the experiments presented. In the future, we hope that the class of functions discussed in this work will be broadened further, allowing for a larger class of non-trivial zero-shot bounds for the Transfer Library. By adding these known transformations and compositions to the Transfer Library, the RL agent will be able to approach significantly more novel tasks without the need for further training. The current research has also emphasized questions for transfer learning in this context, such as: \textit{Which primitives should be prioritized for learning?} (Discussed in \cite{boolean_stoch, nemecek, alver}.) \textit{What other functions can be used for transfer?} \textit{How tight are these bounds?} \textit{How does the Transfer Library depend on the parameters $\gamma$ and $\beta$?} In this work, we provide general bounds for the discrete MDP setting and an extension of the theory to continuous state-action spaces is deferred to future work. It would be of interest to explore if it is possible to prove general bounds for this extension, given sufficient smoothness conditions on the dynamics and the function of transformation. Other extensions can be considered as well, for instance: the applicability to other value-based or actor-critic methods, the warmstarting of function approximators, learning the $C$ and $D$ functions, and adjusting the ``soft'' clipping weight parameter. In future work, we also aim to discover other functions satisfying the derived conditions; and will attempt to find necessary (rather than sufficient) conditions that classify the functions $f \in \mathcal{T}L$. It would be of interest to explore if extensions of the current approach can further enable agents to expand and generalize their knowledge base to solve complex dynamic tasks in Deep RL. \nocite{haarnoja2019_learn} \nocite{hardy1952inequalities} \nocite{lee2021sunrise} \nocite{openAI} \title{Bounding the Optimal Value Function \\in Compositional Reinforcement Learning\\(Supplementary Material)} \onecolumn \maketitle \mathcal{S}ection{Introduction} In the following, we discuss the results of additional experiments in the four room domain. In these experiments, we want to answer the following questions: \begin{itemize} \item How do the optimal policies and value functions compare to those calculated from the zero-shot approximations using the derived bounds? \item What are other examples of compositions and functional transformations that can be analyzed using our approach? \item Does warmstarting (using the derived bounds for initialization) in the tabular case improve the convergence? \end{itemize} To address these issues, we modify OpenAI's frozen lake environment \cite{openAI} to allow for stochastic dynamics. In the tabular experiments, numerical solutions for the optimal $Q$ functions were obtained by solving the Bellman backup equations iteratively. Iterations are considered converged once the maximum difference between successive iterates is less than $10^{-10}$. Beyond the motivating example shown in the main text, we have included video files demonstrating a full range of zero-shot compositions with convex weights between the Bottom Left (BL) room and Bottom Right (BR) room subtasks, in both entropy-regularized ($\beta=5$) and standard RL with deterministic dynamics. These videos, along with all code for the above experiments are made publicly available at a repository on \url{https://github.com/JacobHA/Q-Bounding-in-Compositional-RL}. \mathcal{S}ection{Experiments} \mathcal{S}ubsection{Function Approximators} For function approximator experiments (as shown in the main text), we use the DQN implementation from Stable-Baselines3 \cite{stable-baselines3}. We first fully train the subtasks (seen in Fig. 1 of the main text). Then, we perform hyperparameter sweeps for each possible clipping option. Several hyperparameters are kept fixed (Table~\ref{tab:shared}), and we sweep with the range and distribution shown below in Table~\ref{tab:sweep}. Finally, we use the optimal hyperparameters (as measured by those which maximize the accumulated reward throughout training). These values are shown in Table~\ref{tab:optimal}. \begin{center} \captionof{table}{Hyperparameters shared by all Deep Q Networks} \begin{tabular}{||c c||} \hline Hyperparameter & Value \\ [0.5ex] \hline\hline Buffer Size & 1,000,000 \\ \hline Discount factor, $\gamma$ & 0.99 \\ \hline $\epsilon_{\text{initial}}$ & 1.0 \\ \hline $\epsilon_{\text{final}}$ & 0.05 \\ \hline ``learning starts'' & 5,000 \\ \hline Target Update Interval & 10000 \\ [1ex] \hline \end{tabular} \label{tab:shared} \end{center} \begin{center} \captionof{table}{Hyperparameter Ranges Used for Finetuning} \begin{tabular}{||c c c c||} \hline Hyperparameter & Sampling Distribution & Min. Value & Max. Value\\ [0.5ex] \hline Learning Rate & Log Uniform & $10^{-4}$ & $10^{-1}$ \\ \hline Batch Size & Uniform & $32$ & $256$ \\ \hline Exploration Fraction & Uniform & $0.1$ & $0.3$ \\ \hline Polyak Update, $\tau$ & Uniform & $0.5$ & $1.0$ \\ [1ex] \hline \end{tabular} \label{tab:sweep} \end{center} \begin{center} \captionof{table}{Hyperparameters used for different clipping methods} \begin{tabular}{||c c c c c||} \hline Hyperparameter & None & Soft & Hard & Soft-Hard \\ [0.5ex] \hline Learning Rate & $7.825\times10^{-4}$ & $3.732\times10^{-3}$ & $1.457\times10^{-3}$ & $3.184\times10^{-3}$ \\ \hline Batch Size & 245 & 247 & 146 & 138\\ \hline Exploration Fraction & 0.137 & 0.1075 & 0.1243 & 0.1207\\ \hline Polyak Update, $\tau$ & 0.9107 & 0.9898 & 0.5545 & 0.7682 \\ [1ex] \hline \end{tabular} \label{tab:optimal} \end{center} \mathcal{S}ubsection{Tabular experiments} In these experiments we will demonstrate on simple discrete environments the effect of increasingly stochastic dynamics and increasingly dense rewards. As a proxy for measuring the usefulness or accuracy of the bound $f(Q)$, we calculate the mean difference between $f\left(Q(s,a)\right)-\widetilde{Q}(s,a)$, as well as the mean Kullback-Liebler (KL) divergence between $\pi$ (the true optimal policy) and $\pi_f$, the policy derived from the bound $f(Q)$. The proceeding experiments are situated in the entropy-regularized formalism (unless $\beta=\inf$ as shown in Fig.~\ref{fig:beta_inf_stoch_expt}) with the uniform prior policy $\pi_0(a|s) = 1/ |\mathcal{A}|$. \mathcal{S}ubsubsection{Stochasticity of Dynamics} In this experiment, we investigate the effect of stochastic dynamics on the bounds. Specifically, we vary the probability that taking an action will result in the intended action. This is equivalent to a slip probability. \begin{figure} \caption{Reward functions for a simple maze domain; used for stochasticity experiments. We place reward (whose cost is half the default step penalty of $-1$) at the edges of the room, denoted by an orange diamond. } \label{fig:stochastic_desc} \end{figure} We notice in the following plots that at near-deterministic dynamics the bound becomes tighter. We also remark that the Kullback-Liebler divergence is lowest in very highly-stochastic environments. This is because for any $\beta>0$, the cost of changing the policy $\pi$ away from the prior policy is not worth it: the dynamics are so stochastic that there will be no considerable difference in trajectories even if significant controls (nearly deterministic choices) are applied via $\pi$. \begin{figure} \caption{$\beta=1$ KL divergence between $\pi$ and $\pi_f$ and average difference between optimal $Q$ function and presented bound.} \label{fig:b1} \end{figure} \begin{figure} \caption{$\beta=3$ KL divergence between $\pi$ and $\pi_f$ and average difference between optimal $Q$ function and presented bound.} \label{fig:b3} \end{figure} \begin{figure} \caption{$\beta=5$ KL divergence between $\pi$ and $\pi_f$ and average difference between optimal $Q$ function and presented bound.} \label{fig:b5} \end{figure} \begin{figure} \caption{$\beta=\inf$, standard RL. Average difference between optimal $Q$ function and presented bound. Note that we do not plot a KL divergence for this case as $\pi$ is greedy and hence the divergence is always infinite.} \label{fig:beta_inf_stoch_expt} \end{figure} \mathcal{S}ubsection{Sparsity of Rewards} In this experiment, we consider an empty environment ($|S|\times |S|$ empty square) with reward $r=0$ everywhere and deterministic dynamics. No other rewards or obstacles are present. Then fix an integer $0<n<|S|$. Drawing randomly (without repetition), we choose one of the states of the environment to grant a reward, drawn uniformly between $(0, 1)$. We do this again for another copy of the empty environment. We then compose these two (randomly generated as described) subtasks by using a simple average $F(r^{(1)}, r^{(2)}) = 0.5r^{(1)} + 0.5 r^{(2)}$. We have used $\beta=5$ for all experiments in this subsection. \begin{figure} \caption{$6\times 6$ environment. KL divergence between $\pi$ and $\pi_f$ and average difference between optimal $Q$ function and presented bound, with the shaded region representing one standard deviation over 1000 runs.} \label{fig:6x6} \end{figure} \begin{figure} \caption{$10\times 10$ environment. KL divergence between $\pi$ and $\pi_f$ and average difference between optimal $Q$ function and presented bound, with the shaded region representing one standard deviation over 1000 runs.} \label{fig:10x10} \end{figure} Interestingly, we find a somewhat universal behavior, in that there is a certain level of density which makes the bound a poor approximation to the true $Q$ function. We also note that the bound is a better approximation at low densities. \mathcal{S}ection{Boolean Composition Definitions} In this section, we explicitly define the action of Boolean operators on subtask reward functions. These definitions are similar to those used by \cite{boolean}. \begin{definition}[OR Composition] Given subtask rewards $\{r^{(1)}, r^{(2)}, \dotsc , r^{(M)} \}$, the OR composition among them is given by the \textit{maximum} over all subtasks, at each state-action pair: \begin{equation} r^{(\text{OR})}(s,a) = \max_k r^{(k)}(s,a). \end{equation} \end{definition} \begin{definition}[AND Composition] Given subtask rewards $\{r^{(1)}, r^{(2)}, \dotsc , r^{(M)} \}$, the AND composition among them is given by the \textit{minimum} over all subtasks, at each state-action: \begin{equation} r^{(\text{AND})}(s,a) = \min_k r^{(k)}(s,a). \end{equation} \end{definition} \begin{definition}[NOT Gate] Given a subtask reward function $r$, applying the NOT gate transforms the reward function by negating all rewards (i.e. rewards $\to$ costs): \begin{equation} r^{(\text{NOT})}(s,a) = - r(s,a), \end{equation} \end{definition} The proofs in all subsequent sections follow an inductive form based on the Bellman backup equation, whose solution converges to the optimal $Q$ function. This is a similar approach as employed by \cite{Haarnoja2018} and \cite{hunt_diverg}, but with the extension to all applicable functions; rather than (linear) convex combinations. \mathcal{S}ection{Proofs for Standard RL} Let $X$ be the codomain for the $Q$ function of the primitive task ($Q: \mathcal{S} \times \mathcal{A} \to X \mathcal{S}ubseteq \mathbb{R}$).
3,865
49,178
en
train
0.90.6
\mathcal{S}ubsubsection{Stochasticity of Dynamics} In this experiment, we investigate the effect of stochastic dynamics on the bounds. Specifically, we vary the probability that taking an action will result in the intended action. This is equivalent to a slip probability. \begin{figure} \caption{Reward functions for a simple maze domain; used for stochasticity experiments. We place reward (whose cost is half the default step penalty of $-1$) at the edges of the room, denoted by an orange diamond. } \label{fig:stochastic_desc} \end{figure} We notice in the following plots that at near-deterministic dynamics the bound becomes tighter. We also remark that the Kullback-Liebler divergence is lowest in very highly-stochastic environments. This is because for any $\beta>0$, the cost of changing the policy $\pi$ away from the prior policy is not worth it: the dynamics are so stochastic that there will be no considerable difference in trajectories even if significant controls (nearly deterministic choices) are applied via $\pi$. \begin{figure} \caption{$\beta=1$ KL divergence between $\pi$ and $\pi_f$ and average difference between optimal $Q$ function and presented bound.} \label{fig:b1} \end{figure} \begin{figure} \caption{$\beta=3$ KL divergence between $\pi$ and $\pi_f$ and average difference between optimal $Q$ function and presented bound.} \label{fig:b3} \end{figure} \begin{figure} \caption{$\beta=5$ KL divergence between $\pi$ and $\pi_f$ and average difference between optimal $Q$ function and presented bound.} \label{fig:b5} \end{figure} \begin{figure} \caption{$\beta=\inf$, standard RL. Average difference between optimal $Q$ function and presented bound. Note that we do not plot a KL divergence for this case as $\pi$ is greedy and hence the divergence is always infinite.} \label{fig:beta_inf_stoch_expt} \end{figure} \mathcal{S}ubsection{Sparsity of Rewards} In this experiment, we consider an empty environment ($|S|\times |S|$ empty square) with reward $r=0$ everywhere and deterministic dynamics. No other rewards or obstacles are present. Then fix an integer $0<n<|S|$. Drawing randomly (without repetition), we choose one of the states of the environment to grant a reward, drawn uniformly between $(0, 1)$. We do this again for another copy of the empty environment. We then compose these two (randomly generated as described) subtasks by using a simple average $F(r^{(1)}, r^{(2)}) = 0.5r^{(1)} + 0.5 r^{(2)}$. We have used $\beta=5$ for all experiments in this subsection. \begin{figure} \caption{$6\times 6$ environment. KL divergence between $\pi$ and $\pi_f$ and average difference between optimal $Q$ function and presented bound, with the shaded region representing one standard deviation over 1000 runs.} \label{fig:6x6} \end{figure} \begin{figure} \caption{$10\times 10$ environment. KL divergence between $\pi$ and $\pi_f$ and average difference between optimal $Q$ function and presented bound, with the shaded region representing one standard deviation over 1000 runs.} \label{fig:10x10} \end{figure} Interestingly, we find a somewhat universal behavior, in that there is a certain level of density which makes the bound a poor approximation to the true $Q$ function. We also note that the bound is a better approximation at low densities. \mathcal{S}ection{Boolean Composition Definitions} In this section, we explicitly define the action of Boolean operators on subtask reward functions. These definitions are similar to those used by \cite{boolean}. \begin{definition}[OR Composition] Given subtask rewards $\{r^{(1)}, r^{(2)}, \dotsc , r^{(M)} \}$, the OR composition among them is given by the \textit{maximum} over all subtasks, at each state-action pair: \begin{equation} r^{(\text{OR})}(s,a) = \max_k r^{(k)}(s,a). \end{equation} \end{definition} \begin{definition}[AND Composition] Given subtask rewards $\{r^{(1)}, r^{(2)}, \dotsc , r^{(M)} \}$, the AND composition among them is given by the \textit{minimum} over all subtasks, at each state-action: \begin{equation} r^{(\text{AND})}(s,a) = \min_k r^{(k)}(s,a). \end{equation} \end{definition} \begin{definition}[NOT Gate] Given a subtask reward function $r$, applying the NOT gate transforms the reward function by negating all rewards (i.e. rewards $\to$ costs): \begin{equation} r^{(\text{NOT})}(s,a) = - r(s,a), \end{equation} \end{definition} The proofs in all subsequent sections follow an inductive form based on the Bellman backup equation, whose solution converges to the optimal $Q$ function. This is a similar approach as employed by \cite{Haarnoja2018} and \cite{hunt_diverg}, but with the extension to all applicable functions; rather than (linear) convex combinations. \mathcal{S}ection{Proofs for Standard RL} Let $X$ be the codomain for the $Q$ function of the primitive task ($Q: \mathcal{S} \times \mathcal{A} \to X \mathcal{S}ubseteq \mathbb{R}$). \begin{lemma}[Convex Conditions]\label{thm:convex_cond_std} Given a primitive task with discount factor $\gamma$ and a bounded, continuous transformation function $f~:~X~\to~\mathbb{R}$ which satisfies: \begin{enumerate} \item $f$ is convex on its domain $X$ (for stochastic dynamics); \item $f$ is sublinear: \begin{enumerate}[label=(\roman*)] \item $f(x+y) \leq f(x) + f(y)$ for all $x,y \in X$ \item $f(\gamma x) \leq \gamma f(x)$ for all $x \in X$ \end{enumerate} \item $f\left( \max_{a} \mathcal{Q}(s,a) \right) \leq \max_{a}~f\left( \mathcal{Q}(s,a) \right)$ for all $\mathcal{Q}: \mathcal{S} \times \mathcal{A} \to \mathbb{R}.$ \end{enumerate} then the optimal action-value function for the transformed rewards, $\widetilde{Q}$, is now related to the optimal action-value function with respect to the original rewards by: \begin{equation}\label{eqn:convex_std} f(Q(s,a)) \leq \widetilde{Q}(s,a) \leq f(Q(s,a)) + C(s,a) \end{equation} where $C$ is the optimal value function for a task with reward \begin{equation}\label{eq:std_convex_C_def} r_C(s,a) = f(r(s,a)) + \gamma \mathbb{E}_{s'} V_f(s') - f(Q(s,a)). \end{equation} \end{lemma} \begin{proof} We will prove all inequalities by induction on the number of backup steps, $N$. We start with the lower bound $\widetilde{Q} \ge f(Q)$. The base case, $N=1$ is trivial since $f(r(s,a))=f(r(s,a))$. The inductive step is the assumption $\widetilde{Q}^{(N)}(s,a) \geq f(Q^{(N)}(s,a))$ for some $N>1$. In the case of standard RL, the Bellman backup equation for transformed rewards is given by: \begin{equation} \widetilde{Q}^{(N+1)}(s,a) = f\left(r(s,a)\right) + \gamma \mathbb{E}_{s' \mathcal{S}im{} p(s'|s,a)} \max_{a'} \widetilde{Q}^{(N)}(s',a') \end{equation} Using the inductive assumption, \begin{equation} \widetilde{Q}^{(N+1)}(s,a) \geq f\left(r(s,a)\right) + \gamma \mathbb{E}_{s' \mathcal{S}im{} p(s'|s,a)} \max_{a'} f \left({Q}^{(N)}(s',a') \right) \end{equation} The condition $v_f(s) \ge f(v(s)) $ is used on the right hand side to give: \begin{equation} \widetilde{Q}^{(N+1)}(s,a) \geq f\left(r(s,a)\right) + \gamma \mathbb{E}_{s' \mathcal{S}im{} p(s'|s,a)} f \left( \max_{a'} {Q}^{(N)}(s',a') \right) \end{equation} Since $f$ is convex, we use Jensen's inequality to factor it out of the expectation. Note that this condition on $f$ is only required for stochastic dynamics. The error introduced by swapping these operators is characterized by the ``Jensen's gap'' for the transformation function $f$. \begin{equation} \widetilde{Q}^{(N+1)}(s,a) \geq f\left(r(s,a)\right) + \gamma f \left( \mathbb{E}_{s' \mathcal{S}im{} p(s'|s,a)} \max_{a'} {Q}^{(N)}(s',a') \right) \end{equation} Finally, using both sublinearity conditions \begin{equation} \widetilde{Q}^{(N+1)}(s,a) \geq f\left(r(s,a) + \gamma \mathbb{E}_{s' \mathcal{S}im{} p(s'|s,a)} \max_{a'} {Q}^{(N)}(s',a') \right) \label{eq:pf:last_line_induction} \end{equation} where the right-hand side is simply $f(Q^{(N+1)}(s,a))$. Since this inequality holds for all $N$, we take the limit $N \to \infty$ wherein $Q^{(N)}$ converges to the optimal $Q$-function. For the right-hand side of Eq. \eqref{eq:pf:last_line_induction}, we thus have (by continuity of $f$): \begin{equation} \lim_{N \to \infty} f\left(Q^{(N)}(s,a)\right) = f\left(\lim_{N \to \infty}Q^{(N)}(s,a)\right) = f(Q(s,a)) \end{equation} where $Q(s,a)$ is the optimal action value function for the primitive task. Combined with the limit of the left-hand side, we arrive at the desired inequality: \begin{equation} \widetilde{Q}(s,a) \geq f\left(Q(s,a)\right). \end{equation} This completes the proof of the lower bound. To prove the upper bound we again use induction on the backup equation of $\widetilde{Q}^{(N)}$. We wish to show $\widetilde{Q}^{(N)} \le f\left(Q(s,a)\right) + C^{(N)}(s,a)$ holds for all $N$, with the definition of $C$ provided in Lemma~4.1. Let $f$ satisfy the convex conditions. Consider the backup equation for $\widetilde{Q}$. Again, the base case ($N=1$) is trivially satisfied with equality. Using the inductive assumption, we find \begin{align*} \widetilde{Q}^{(N+1)}(s,a) &= f(r(s,a)) + \gamma \E_{s'} \max_{a'} \widetilde{Q}^{(N)}(s',a') \\ &\le f(r(s,a)) + \gamma \E_{s'} \max_{a'} \left( f(Q(s',a')) + C^{(N)}(s',a')\right) \\ &\le f(r(s,a)) + \gamma \E_{s'} \max_{a'} f(Q(s',a')) + \gamma \E \max_{a'}C^{(N)}(s',a') \\ &= f(Q(s,a)) + \left[ f(r_i) + \gamma \E_{s'} V_f(s') - f(Q(s,a)) \right] + \gamma \E_{s'} \max_{a'}C^{(N)}(s',a') \\ &= f(Q(s,a)) + C^{(N+1)}(s,a) \end{align*} \end{proof}
3,223
49,178
en
train
0.90.7
At this point, we verify that $C(s,a)>0$ which ensures the double-sided bounds above are valid. To do so, we can simply bound the reward function $r_C(s,a)$. By determining $r_C(s,a)>0$, this will ensure $C(s,a) > \min r_C / (1-\gamma) > 0$. \begin{align*} r_C(s,a) &= f(r(s,a)) + \gamma \mathbb{E}_{s'} V_f(s') - f(Q(s,a)) \\ &\geq f(r(s,a)) + \gamma \mathbb{E}_{s'} f(V(s')) - f(Q(s,a)) \\ &\geq f(r(s,a)) + f(\gamma \mathbb{E}_{s'} V(s')) - f(Q(s,a)) \\ &\geq f(r(s,a) + \gamma \mathbb{E}_{s'} V(s')) - f(Q(s,a))\\ &\geq 0 \end{align*} where each line follows from the required conditions in Lemma \ref{thm:convex_cond_std}. A similar proof holds for showing the quantities $\hat{C}$, $D$, $\hat{D}$ are all non-negative. We now prove the policy evaluation bound for standard RL. \begin{lemma} Consider the value of the policy $\pi_f(s) = \max_{a} f(Q(s,a))$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}(s,a)$. The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq D(s,a) \end{equation} where $D$ is the value of the policy $\pi_f$ in a task with reward \begin{align*} r_D = \gamma \mathbb{E}_{s',a' \mathcal{S}im{} \pi_f} \biggr[ \max_{a} \big( f(Q(s',a')) + C(s',a') \big) - f(Q(s,a)) \biggr] \end{align*} \end{lemma} \def\widetilde{Q}^{\pi_f}(s,a){\widetilde{Q}^{\pi_f}(s,a)} \def\widetilde{Q}^{\pi_f}(s,a)p{\widetilde{Q}^{\pi_f}(s',a')} \deff\left(r(s,a)\right){f\left(r(s,a)\right)} \begin{proof} We will again prove this bound by induction on steps in the Bellman backup equation for the value of $\pi_f$, as given by the following fixed point equation: \begin{equation} \widetilde{Q}^{\pi_f}(s,a) = f\left(r(s,a)\right) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\widetilde{Q}^{\pi_f}(s,a)p \end{equation} We consider the following initial conditions: $ \widetilde{Q}^{\pi_f(0)}(s,a) = \widetilde{Q}(s,a), D(s,a)=0$. We note that there is freedom in the choice of initial conditions, as the final statement (regarding the optimal value functions) holds regardless of initialization. As usual, the base case is trivially satisfied. We will now show that the equivalent inequality \begin{equation} \widetilde{Q}^{\pi_f (N)}(s,a) \ge \widetilde{Q}(s,a) - D^{(N)}(s,a) \end{equation} holds for all $N$. Similar to the previous proofs, we will subsequently take the limit $N \to \infty$ to recover the desired result. To do so, we consider the next step of the Bellman backup, and apply the inductive hypothesis: \begin{align} \widetilde{Q}^{\pi_f (N+1)}(s,a) &= f\left(r(s,a)\right) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( \widetilde{Q}^{\pi_f (N)}(s',a') \right) \\ &\geq f\left(r(s,a)\right) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( \widetilde{Q}(s',a') - D^{(N)}(s',a') \right) \\ &\geq f\left(r(s,a)\right) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( f\left(Q(s',a')\right) - D^{(N)}(s',a') \right) \\ &= f\left(r(s,a)\right) + \gamma \E_{s'} \widetilde{V}(s') + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( f\left(Q(s',a')\right) - D^{(N)}(s',a') - \widetilde{V}(s') \right) \\ &\geq \widetilde{Q}(s,a) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( f\left(Q(s',a')\right) - D^{(N)}(s',a') - \max_{a'} \left\{ f\left(Q(s',a')\right) + C(s',a') \right\} \right) \\ &= \widetilde{Q}(s,a) - \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( \max_{a'} \left\{ f\left(Q(s',a')\right) + C(s',a') \right\} - f\left(Q(s',a')\right) + D^{(N)}(s',a')\right) \\ &= \widetilde{Q}(s,a) - \left( r_D(s,a) + \gamma \E_{s',a' \mathcal{S}im{} \pi_f} D^{(N)}(s',a') \right) \\ &= \widetilde{Q}(s,a) - D^{(N+1)}(s,a) \end{align} The third and fifth line follow from the previous bounds (Lemma 4.1). In the limit $N \to \infty$, we can thus see that the fixed point $D$ corresponds to the policy evaluation for $\pi_f$ in an environment with reward function $r_D$. \end{proof} Now we prove similar results, but for the ``concave conditions'' presented in the main text. \begin{lemma}[Concave Conditions]\label{thm:concave_cond_std} Given a primitive task with discount factor $\gamma$ and a bounded, continuous transformation function $f~:~X~\to~\mathbb{R}$ which satisfies: \begin{enumerate} \item $f$ is concave on its domain $X$ (for stochastic dynamics); \item $f$ is superlinear: \begin{enumerate}[label=(\roman*)] \item $f(x+y) \geq f(x) + f(y)$ for all $x,y \in X$ \item $f(\gamma x) \geq \gamma f(x)$ for all $x \in X$ \end{enumerate} \item $f\left( \max_{a} \mathcal{Q}(s,a) \right) \geq \max_{a}~f\left( \mathcal{Q}(s,a) \right)$ for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to X.$ \end{enumerate} then the optimal action-value functions are now related in the following way: \begin{equation}\label{eqn:concave_std} f(Q(s,a)) - \hat{C}(s,a) \leq \widetilde{Q}(s,a) \leq f(Q(s,a)) \end{equation} where $\hat{C}$ is the optimal value function for a task with reward \begin{equation} \hat{r}_C(s,a) = f(Q(s,a)) - f(r(s,a)) - \gamma \E_{s'} V_f(s') \end{equation} \end{lemma} \begin{proof} The proof of $\widetilde{Q} \le f(Q)$ is the same as the preceding theorem's lower bound but with all inequalities reversed. To prove the upper bound involving $\hat{C}$, we use a similar approach \begin{align*} \widetilde{Q}^{(N+1)}(s,a) &= f(r(s,a)) + \gamma \E_{s'} \max_{a'} \widetilde{Q}^{(N)}(s',a') \\ &\ge f(r(s,a)) + \gamma \E_{s'} \max_{a'} \left( f(Q(s',a')) - \hat{C}^{(N)}(s',a')\right) \\ &\ge f(r(s,a)) + \gamma \E_{s'} \left( \max_{a'} f(Q(s',a')) - \max_{a'} \hat{C}^{(N)}(s',a') \right) \\ &= f(Q(s,a)) - \left[f(Q(s,a)) - f(r(s,a)) - \gamma \E_{s'} V_f(s') + \gamma \E_{s'} \max_{a'} \hat{C}^{(N)}(s',a')\right] \\ &= f(Q(s,a)) - \hat{C}^{(N+1)}(s,a) \end{align*} The second line follows from the inductive hypothesis. The third line follows from the $\max$ of a difference. In the penultimate line, we add and subtract $f(Q)$, and identify the definitions for $V_f$ and the backup equation for $\hat{C}$. In the limit $N \to \infty$, we have the desired result. \end{proof} \begin{lemma} Consider the value of the policy $\pi_f(s) = \max_{a} f(Q(s,a))$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}(s,a)$. The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq \hat{D}(s,a) \end{equation} where $\hat{D}$ is the value of the policy $\pi_f$ in a task with reward \begin{equation} \hat{r}_D = \gamma \mathbb{E}_{s',a' \mathcal{S}im{} \pi_f} \biggr[ V_f(s') - f(Q(s',a')) + \hat{C}(s',a') \biggr] \end{equation} \end{lemma} \begin{proof} The proof of this result is similar to that of Lemma 4.2, except now we must employ the corresponding results of Lemma 4.3. Beginning with a substitution of the inductive hypothesis: \begin{align} \widetilde{Q}^{\pi_f (N+1)}(s,a) &= f\left(r(s,a)\right) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( \widetilde{Q}^{\pi_f (N)}(s',a') \right) \\ &\geq f\left(r(s,a)\right) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( \widetilde{Q}(s',a') - \hat{D}^{(N)}(s',a') \right) \\ &\geq f\left(r(s,a)\right) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( f\left(Q(s',a')\right) - \hat{C}(s',a') - \hat{D}^{(N)}(s',a') \right) \\ &= f\left(r(s,a)\right) + \gamma \E_{s'} \widetilde{V}(s') + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( f\left(Q(s',a')\right) - \hat{C}(s',a') - \hat{D}^{(N)}(s',a') - \widetilde{V}(s') \right) \\ &\geq \widetilde{Q}(s,a) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( f\left(Q(s',a')\right) - \hat{C}(s',a') - \hat{D}^{(N)}(s',a') - V_f(s') \right) \\ &= \widetilde{Q}(s,a) - \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( V_f(s') - f\left(Q(s',a')\right)+ \hat{C}(s',a') + \hat{D}^{(N)}(s',a')\right) \\ &= \widetilde{Q}(s,a) - \left( \hat{r}_D(s,a) + \gamma \E_{s',a' \mathcal{S}im{} \pi_f} \hat{D}^{(N)}(s',a') \right) \\ &= \widetilde{Q}(s,a) - \hat{D}^{(N+1)}(s,a) \end{align} \end{proof} Now we provide further details on the technical conditions for compositions (rather than transformations) of primitive tasks to satisfy the derived bounds. \begin{lemma}[Convex Composition of Primitive Tasks] Suppose $F:\bigotimes_k X^{(k)} \to \mathbb{R}$ is convex on its domain and is sublinear (separately in each argument), that is: \begin{align} F(x^{(1)}+y^{(1)},x^{(2)}, \dotsc, x^{(M)}) &\leq F(x^{(1)},x^{(2)},\dotsc, x^{(M)}) + F(y^{(1)},x^{(2)},\dotsc, x^{(M)}) \\ F(x^{(1)},x^{(2)}+y^{(2)}, \dotsc, x^{(M)}) &\leq F(x^{(1)},y^{(2)},\dotsc, x^{(M)}) + F(x^{(1)},y^{(2)},\dotsc, x^{(M)}) \end{align} and similarly for the remaining arguments. \begin{equation} F(\gamma x^{(1)}, \dotsc, \gamma x^{(M)}) \leq \gamma F(x^{(1)}, \dotsc x^{(M)}) \end{equation}and also satisfies \begin{equation} F\left( \max_a \mathcal{Q}^{(1)}(s,a), \dotsc , \max_a \mathcal{Q}^{(M)}(s,a) \right) \leq \max_a F\left(\mathcal{Q}^{(1)}(s,a), \dotsc , \mathcal{Q}^{(M)}(s,a)\right) \end{equation} for all functions $\mathcal{Q}^{(k)}:\mathcal{S} \times \mathcal{A} \to \mathbb{R}$. Then, \begin{equation} F(\vec{Q}(s,a)) \le \widetilde{Q}(s,a) \le F(\vec{Q}(s,a)) + C(s,a) \end{equation} where we use a vector notation to emphasize that the function acts over the set of optimal value functions $\{Q^{(k)}\}$ corresponding to each primitive task, defined by $r^{(k)}$. \end{lemma} \begin{proof} The proof of this statement is identical to the proof of Lemma \ref{thm:convex_cond_std}, now using the fact that $F$ is a multivariable function $F: X^N \to Y$, with each argument obeying the required conditions. $C$ takes the analogous definition as provided for the original result. \end{proof}
3,993
49,178
en
train
0.90.8
\begin{lemma} Consider the value of the policy $\pi_f(s) = \max_{a} f(Q(s,a))$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}(s,a)$. The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq \hat{D}(s,a) \end{equation} where $\hat{D}$ is the value of the policy $\pi_f$ in a task with reward \begin{equation} \hat{r}_D = \gamma \mathbb{E}_{s',a' \mathcal{S}im{} \pi_f} \biggr[ V_f(s') - f(Q(s',a')) + \hat{C}(s',a') \biggr] \end{equation} \end{lemma} \begin{proof} The proof of this result is similar to that of Lemma 4.2, except now we must employ the corresponding results of Lemma 4.3. Beginning with a substitution of the inductive hypothesis: \begin{align} \widetilde{Q}^{\pi_f (N+1)}(s,a) &= f\left(r(s,a)\right) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( \widetilde{Q}^{\pi_f (N)}(s',a') \right) \\ &\geq f\left(r(s,a)\right) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( \widetilde{Q}(s',a') - \hat{D}^{(N)}(s',a') \right) \\ &\geq f\left(r(s,a)\right) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( f\left(Q(s',a')\right) - \hat{C}(s',a') - \hat{D}^{(N)}(s',a') \right) \\ &= f\left(r(s,a)\right) + \gamma \E_{s'} \widetilde{V}(s') + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( f\left(Q(s',a')\right) - \hat{C}(s',a') - \hat{D}^{(N)}(s',a') - \widetilde{V}(s') \right) \\ &\geq \widetilde{Q}(s,a) + \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( f\left(Q(s',a')\right) - \hat{C}(s',a') - \hat{D}^{(N)}(s',a') - V_f(s') \right) \\ &= \widetilde{Q}(s,a) - \gamma \E_{s', a' \mathcal{S}im{} \pi_f}\left( V_f(s') - f\left(Q(s',a')\right)+ \hat{C}(s',a') + \hat{D}^{(N)}(s',a')\right) \\ &= \widetilde{Q}(s,a) - \left( \hat{r}_D(s,a) + \gamma \E_{s',a' \mathcal{S}im{} \pi_f} \hat{D}^{(N)}(s',a') \right) \\ &= \widetilde{Q}(s,a) - \hat{D}^{(N+1)}(s,a) \end{align} \end{proof} Now we provide further details on the technical conditions for compositions (rather than transformations) of primitive tasks to satisfy the derived bounds. \begin{lemma}[Convex Composition of Primitive Tasks] Suppose $F:\bigotimes_k X^{(k)} \to \mathbb{R}$ is convex on its domain and is sublinear (separately in each argument), that is: \begin{align} F(x^{(1)}+y^{(1)},x^{(2)}, \dotsc, x^{(M)}) &\leq F(x^{(1)},x^{(2)},\dotsc, x^{(M)}) + F(y^{(1)},x^{(2)},\dotsc, x^{(M)}) \\ F(x^{(1)},x^{(2)}+y^{(2)}, \dotsc, x^{(M)}) &\leq F(x^{(1)},y^{(2)},\dotsc, x^{(M)}) + F(x^{(1)},y^{(2)},\dotsc, x^{(M)}) \end{align} and similarly for the remaining arguments. \begin{equation} F(\gamma x^{(1)}, \dotsc, \gamma x^{(M)}) \leq \gamma F(x^{(1)}, \dotsc x^{(M)}) \end{equation}and also satisfies \begin{equation} F\left( \max_a \mathcal{Q}^{(1)}(s,a), \dotsc , \max_a \mathcal{Q}^{(M)}(s,a) \right) \leq \max_a F\left(\mathcal{Q}^{(1)}(s,a), \dotsc , \mathcal{Q}^{(M)}(s,a)\right) \end{equation} for all functions $\mathcal{Q}^{(k)}:\mathcal{S} \times \mathcal{A} \to \mathbb{R}$. Then, \begin{equation} F(\vec{Q}(s,a)) \le \widetilde{Q}(s,a) \le F(\vec{Q}(s,a)) + C(s,a) \end{equation} where we use a vector notation to emphasize that the function acts over the set of optimal value functions $\{Q^{(k)}\}$ corresponding to each primitive task, defined by $r^{(k)}$. \end{lemma} \begin{proof} The proof of this statement is identical to the proof of Lemma \ref{thm:convex_cond_std}, now using the fact that $F$ is a multivariable function $F: X^N \to Y$, with each argument obeying the required conditions. $C$ takes the analogous definition as provided for the original result. \end{proof} \begin{lemma}[Concave Composition of Primitive Tasks]\label{thm:compos_concave_std} If on the other hand $F$ is concave and superlinear in each argument, and also satisfies \begin{equation} F\left( \max_a \mathcal{Q}^{(1)}(s,a), \dotsc , \max_a \mathcal{Q}^{(M)}(s,a) \right) \leq \max_a F\left(\mathcal{Q}^{(1)}(s,a), \dotsc , \mathcal{Q}^{(M)}(s,a)\right) \end{equation} for all functions $\mathcal{Q}^{(k)}:\mathcal{S} \times \mathcal{A} \to \mathbb{R}$, then \begin{equation} F(\vec{Q}(s,a)) - \hat{C}(s,a) \le \widetilde{Q}(s,a) \le F(\vec{Q}(s,a)). \end{equation} \end{lemma} \begin{proof} Again, the proof of this statement is identical to the proof of Lemma \ref{thm:concave_cond_std}, now using the fact that $F$ is a multivariable function $F: X^N \to Y$, with each argument obeying the required conditions. \end{proof} \mathcal{S}ubsection{Examples of transformations and compositions} In this section, we consider the examples of transformations and compositions mentioned in the main text, and discuss the corresponding results in standard RL. \begin{remark} Given the convex composition of subtasks $r^{(c)} \equiv f(\{r^{(k)}\}) = \mathcal{S}um_k \alpha_k r^{(k)}$ considered by \cite{Haarnoja2018} and \cite{hunt_diverg}, we can use the results of Lemma \ref{thm:compos_concave_std} to bound the optimal $Q$ function by using the optimal $Q$ functions for the primitive tasks: \begin{equation} Q^{(c)}(s,a) \leq \mathcal{S}um_k \alpha_k Q^{(k)}(s,a) \end{equation} \end{remark} \begin{proof} In standard RL, we need only show that $f( \max_i x_{1i}, \dotsc , \max_i x_{ni} ) \geq \max_i f(x_i, \dotsc , x_n)$: \begin{equation} \mathcal{S}um_k \alpha_k \max_i x^{(k)}_{i} \geq \max_i \mathcal{S}um_k \alpha_k x^{(k)}_i \end{equation} which holds given $\alpha_k \geq 0$ for all $k$. We also note that in this case the result clearly holds for general $\alpha_k \geq 0$ not necessarily with $\mathcal{S}um_k \alpha_k = 1$ (as assumed in \cite{Haarnoja2018} and \cite{hunt_diverg}). \end{proof} \begin{remark} Given the AND composition defined above and considered in \cite{boolean}, we have the following result in standard RL: \begin{equation} Q^{\text{AND}}(s,a) \leq \min_k \left\{Q^{(k)}(s,a)\right\} \end{equation} \end{remark} \begin{proof} We could proceed via induction as in the previous proofs, or simply use the above remark, and prove the necessary conditions on the function $f(\cdot) = \min(\cdot)$. The function $\min(\cdot)$ is concave in each argument. It is also straightforward to show that $\min(\cdot)$ is subadditive over all arguments. \end{proof} \begin{remark} Result of (hard) OR composition result in standard RL: \begin{equation} Q^{\text{OR}}(s,a) \geq \max_k \left\{Q^{(k)}(s,a)\right\} \end{equation} \end{remark} \begin{proof} The proof is analogous to the (hard) AND result: $\max$ is a convex, superadditive function. \end{proof} \begin{remark} Result for NOT operation in standard RL: \begin{equation} Q^{\text{NOT}}(s,a) \geq - Q(s,a) \end{equation} \end{remark} \begin{proof} Since the ``NOT'' gate is a unary function, and we are in the standard RL setting, we must check the conditions of Lemma 4.1 or 4.3. Moreoever, since the transformation function applied to the rewards, $f(r)=-r$ is linear, we must check the final condition: $\max_i\{-x_i\} = -\min_i\{x_i\} \geq -\max_i\{x_i\}$. This is the condition required by the concave conditions. \end{proof}
2,712
49,178
en
train
0.90.9
\begin{lemma}[Concave Composition of Primitive Tasks]\label{thm:compos_concave_std} If on the other hand $F$ is concave and superlinear in each argument, and also satisfies \begin{equation} F\left( \max_a \mathcal{Q}^{(1)}(s,a), \dotsc , \max_a \mathcal{Q}^{(M)}(s,a) \right) \leq \max_a F\left(\mathcal{Q}^{(1)}(s,a), \dotsc , \mathcal{Q}^{(M)}(s,a)\right) \end{equation} for all functions $\mathcal{Q}^{(k)}:\mathcal{S} \times \mathcal{A} \to \mathbb{R}$, then \begin{equation} F(\vec{Q}(s,a)) - \hat{C}(s,a) \le \widetilde{Q}(s,a) \le F(\vec{Q}(s,a)). \end{equation} \end{lemma} \begin{proof} Again, the proof of this statement is identical to the proof of Lemma \ref{thm:concave_cond_std}, now using the fact that $F$ is a multivariable function $F: X^N \to Y$, with each argument obeying the required conditions. \end{proof} \mathcal{S}ubsection{Examples of transformations and compositions} In this section, we consider the examples of transformations and compositions mentioned in the main text, and discuss the corresponding results in standard RL. \begin{remark} Given the convex composition of subtasks $r^{(c)} \equiv f(\{r^{(k)}\}) = \mathcal{S}um_k \alpha_k r^{(k)}$ considered by \cite{Haarnoja2018} and \cite{hunt_diverg}, we can use the results of Lemma \ref{thm:compos_concave_std} to bound the optimal $Q$ function by using the optimal $Q$ functions for the primitive tasks: \begin{equation} Q^{(c)}(s,a) \leq \mathcal{S}um_k \alpha_k Q^{(k)}(s,a) \end{equation} \end{remark} \begin{proof} In standard RL, we need only show that $f( \max_i x_{1i}, \dotsc , \max_i x_{ni} ) \geq \max_i f(x_i, \dotsc , x_n)$: \begin{equation} \mathcal{S}um_k \alpha_k \max_i x^{(k)}_{i} \geq \max_i \mathcal{S}um_k \alpha_k x^{(k)}_i \end{equation} which holds given $\alpha_k \geq 0$ for all $k$. We also note that in this case the result clearly holds for general $\alpha_k \geq 0$ not necessarily with $\mathcal{S}um_k \alpha_k = 1$ (as assumed in \cite{Haarnoja2018} and \cite{hunt_diverg}). \end{proof} \begin{remark} Given the AND composition defined above and considered in \cite{boolean}, we have the following result in standard RL: \begin{equation} Q^{\text{AND}}(s,a) \leq \min_k \left\{Q^{(k)}(s,a)\right\} \end{equation} \end{remark} \begin{proof} We could proceed via induction as in the previous proofs, or simply use the above remark, and prove the necessary conditions on the function $f(\cdot) = \min(\cdot)$. The function $\min(\cdot)$ is concave in each argument. It is also straightforward to show that $\min(\cdot)$ is subadditive over all arguments. \end{proof} \begin{remark} Result of (hard) OR composition result in standard RL: \begin{equation} Q^{\text{OR}}(s,a) \geq \max_k \left\{Q^{(k)}(s,a)\right\} \end{equation} \end{remark} \begin{proof} The proof is analogous to the (hard) AND result: $\max$ is a convex, superadditive function. \end{proof} \begin{remark} Result for NOT operation in standard RL: \begin{equation} Q^{\text{NOT}}(s,a) \geq - Q(s,a) \end{equation} \end{remark} \begin{proof} Since the ``NOT'' gate is a unary function, and we are in the standard RL setting, we must check the conditions of Lemma 4.1 or 4.3. Moreoever, since the transformation function applied to the rewards, $f(r)=-r$ is linear, we must check the final condition: $\max_i\{-x_i\} = -\min_i\{x_i\} \geq -\max_i\{x_i\}$. This is the condition required by the concave conditions. \end{proof} \mathcal{S}ection{Proofs for Entropy-Regularized RL} Let $X$ be the codomain for the $Q$ function of the primitive task ($Q: \mathcal{S} \times \mathcal{A} \to X \mathcal{S}ubseteq \mathbb{R}$). \begin{lemma}[Convex Conditions] \label{thm:forward_cond_entropy-regularized} Given a bounded, continuous transformation function $f~:~X~\to~\mathbb{R}$ which satisfies: \begin{enumerate} \item $f$ is convex on its domain $X$ (for stochastic dynamics); \item $f$ is sublinear: \begin{enumerate}[label=(\roman*)] \item $f(x+y) \leq f(x) + f(y)$ for all $x,y \in X$ \item $f(\gamma x) \leq \gamma f(x)$ for all $x \in X$ \end{enumerate} \item $f\left( \log \E \exp \mathcal{Q}(s,a) \right) \leq \log \E \exp f\left( \mathcal{Q}(s,a) \right)$ for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to \mathbb{R}.$ \end{enumerate} then the optimal action-value function for the transformed rewards, $\widetilde{Q}$, is now related to the optimal action-value function with respect to the original rewards by: \begin{equation}\label{eq:convex_entropy-regularized} f \left( Q(s,a) \right) \leq \widetilde{Q}(s,a) \leq f \left( Q(s,a) \right) + C(s,a) \end{equation} \end{lemma} \begin{proof} We will again prove the result with induction, beginning by writing the backup equation for the optimal soft $Q$ function in the transformed reward environment to prove the upper bound on $\widetilde{Q}$: \begin{equation} \widetilde{Q}^{(N+1)}(s,a) = f(r(s,a)) + \gamma \mathbb{E}_{s' \mathcal{S}im{} p(s'|s,a)} f\left(r(s,a)\right)ac{1}{\beta} \log \mathbb{E}_{a' \mathcal{S}im{} \pi_0(a'|s')} \exp \left(\beta Q^{(N)}(s',a')\right) \end{equation} where $p$ is the dynamics and $\pi_0$ is the prior policy. Applying the inductive assumption, \begin{equation} \widetilde{Q}^{(N+1)}(s,a) \geq f(r(s,a)) + \gamma \mathbb{E}_{s' \mathcal{S}im{} p(s'|s,a)} f\left(r(s,a)\right)ac{1}{\beta} \log \mathbb{E}_{a' \mathcal{S}im{} \pi_0(a'|s')}\exp \left( f\left(\beta Q^{(N)}(s',a')\right) \right) \end{equation} Next, using the third condition on $f$ as well as its convexity, we may factor $f$ out of the expectations by Jensen's inequality: \begin{equation} \widetilde{Q}^{(N+1)}(s,a) \geq f(r(s,a)) + \gamma f\left( \mathbb{E}_{s' \mathcal{S}im{} p(s'|s,a)} f\left(r(s,a)\right)ac{1}{\beta} \log \mathbb{E}_{a' \mathcal{S}im{} \pi_0(a'|s')}\exp \left(\beta Q^{(N)}(s',a')\right) \right) \end{equation} Finally, using the sublinearity conditions of $f$, we arrive at \begin{equation} \widetilde{Q}^{(N+1)}(s,a) \geq f \left(r(s,a) + \gamma \mathbb{E}_{s' \mathcal{S}im{} p(s'|s,a)} f\left(r(s,a)\right)ac{1}{\beta} \log \mathbb{E}_{a' \mathcal{S}im{} \pi_0(a'|s')}\exp \left(\beta Q^{(N)}(s',a')\right) \right) \end{equation} The right hand side is $f \left(Q^{(N+1)}(s,a) \right)$. In the limit $N\to \infty,\ Q^{(N)}(s,a) \to Q(s,a)$ so the inductive proof for the upper bound is complete. Let $f$ satisfy the ``convex conditions''. Consider the backup equation for $\widetilde{Q}$. For the initialization (base case) we let $\widetilde{Q}^{(0)}(s,a)=f\left(Q(s,a)\right)$ and $C^{(0)}(s,a)=0$. Using the inductive assumption, \begin{align*} \widetilde{Q}^{(N+1)}(s,a) &= f(r(s,a)) + f\left(r(s,a)\right)ac{\gamma}{\beta} \E_{s' \mathcal{S}im{} p} \log \E_{a' \mathcal{S}im{} \pi_0} \exp \beta \widetilde{Q}^{(N)}(s',a') \\ &\leq f(r(s,a)) + f\left(r(s,a)\right)ac{\gamma}{\beta} \E_{s'} \log \E_{a'} \exp \beta \left( f(Q(s',a')) + C^{(N)}(s',a')\right) \\ &\leq f(r(s,a)) + f\left(r(s,a)\right)ac{\gamma}{\beta} \E_{s'} \left(\log \E_{a'} \exp \beta f(Q(s',a')) + \max_{a'} C^{(N)}(s',a')\right) \\ &= f(Q(s,a)) + f(r(s,a)) + \gamma \E_{s'} V_f(s') - f(Q(s,a)) + \gamma \E_{s'} \max_{a'} C^{(N)}(s',a') \\ &= f(Q(s,a)) + C^{(N+1)}(s,a) \end{align*} Therefore in the limit $N \to \infty$, we have: $\widetilde{Q}(s,a) \leq f(Q(s,a)) + C(s,a)$ as desired. We note that since $f(r(s,a))~+~\gamma~\E_{s'}~V_f(s')~\geq~f(Q(s,a))$, we immediately have $C(s,a) \ge 0$, as is required for the bound to be non-vacuous. \end{proof} \begin{lemma} Consider the soft value of the policy $\pi_f \propto \exp \beta f(Q)$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}$(s,a). The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq D(s,a) \end{equation} where $D$ is the value of the policy $\pi_f$ with reward \begin{equation} r_D(s,a) = \gamma \mathbb{E}_{s' \mathcal{S}im{} p} \left[ \max_{b} \left\{ f\left(Q(s',b)\right) + C(s',b) \right\} -V_f(s') \right] \end{equation} \end{lemma} \begin{proof} To prove the (soft) policy evaluation bound, we use iterations of soft-policy evaluation \cite{Haarnoja_SAC} and denote iteration $N$ of the evaluation of $\pi_f$ in the composite environment as $\widetilde{Q}^{\pi_f(N)}$. Beginning with the definitions $\widetilde{Q}^{\pi_f(0)}(s,a) = Q(s,a)$ (since the evaluation is independent of the initialization), and $D^{(0)}=0$, the $N=0$ step is trivially satisfied. Assuming the inductive hypothesis, we consider the next step of soft policy evaluation: As in the previous policy evaluation results, we prove an equivalent result with induction. \begin{align*} \widetilde{Q}^{\pi_f(N+1)}(s,a) &= f(r(s,a)) + \gamma \E_{s' \mathcal{S}im{} p}\E_{ a'\mathcal{S}im{} \pi_f} \left[\widetilde{Q}^{\pi_f(N)}(s',a') - f\left(r(s,a)\right)ac{1}{\beta} \log f\left(r(s,a)\right)ac{\pi_f(a'|s')}{\pi_0(a'|s')} \right] \\ &\geq f(r(s,a)) + \gamma \E_{s',a'} \left[\widetilde{Q}(s',a') - D^{(N)}(s',a') - f(Q(s',a')) + V_f(s') \right] \\ &= f(r(s,a)) + \gamma \E_{s'}\widetilde{V}(s') + \gamma \E_{s',a'} \left[\widetilde{Q}(s',a') - D^{(N)}(s',a') - f(Q(s',a')) + V_f(s') - \widetilde{V}(s')\right] \\ &\geq \widetilde{Q}(s,a) + \gamma \E_{s',a'} \left[f(Q(s',a')) - D^{(N)}(s',a') - f(Q(s',a')) + V_f(s') - \widetilde{V}(s')\right] \\ &\geq \widetilde{Q}(s,a) + \gamma \E_{s',a'} \left[ - D^{(N)}(s',a') + V_f(s') - \max_{b} \left\{ f\left(Q(s',b)\right) + C(s',b) \right\} \right] \\ &\geq \widetilde{Q}(s,a) -D^{(N+1)}(s,a) \\ \end{align*} where we have used $\widetilde{Q}(s,a) \geq f(Q(s,a))$ in the fourth line. where we have used the fact that $ \widetilde{V}(s) \leq \max_b \left\{ f\left(Q(s,b)\right) + \max_a C(s,a)\right\}$ and $\widetilde{Q}(s,a) - f(Q(s,a)) \geq 0$ which both follow from the previously stated bounds. \end{proof}
3,925
49,178
en
train
0.90.10
\begin{lemma} Consider the soft value of the policy $\pi_f \propto \exp \beta f(Q)$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}$(s,a). The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq D(s,a) \end{equation} where $D$ is the value of the policy $\pi_f$ with reward \begin{equation} r_D(s,a) = \gamma \mathbb{E}_{s' \mathcal{S}im{} p} \left[ \max_{b} \left\{ f\left(Q(s',b)\right) + C(s',b) \right\} -V_f(s') \right] \end{equation} \end{lemma} \begin{proof} To prove the (soft) policy evaluation bound, we use iterations of soft-policy evaluation \cite{Haarnoja_SAC} and denote iteration $N$ of the evaluation of $\pi_f$ in the composite environment as $\widetilde{Q}^{\pi_f(N)}$. Beginning with the definitions $\widetilde{Q}^{\pi_f(0)}(s,a) = Q(s,a)$ (since the evaluation is independent of the initialization), and $D^{(0)}=0$, the $N=0$ step is trivially satisfied. Assuming the inductive hypothesis, we consider the next step of soft policy evaluation: As in the previous policy evaluation results, we prove an equivalent result with induction. \begin{align*} \widetilde{Q}^{\pi_f(N+1)}(s,a) &= f(r(s,a)) + \gamma \E_{s' \mathcal{S}im{} p}\E_{ a'\mathcal{S}im{} \pi_f} \left[\widetilde{Q}^{\pi_f(N)}(s',a') - f\left(r(s,a)\right)ac{1}{\beta} \log f\left(r(s,a)\right)ac{\pi_f(a'|s')}{\pi_0(a'|s')} \right] \\ &\geq f(r(s,a)) + \gamma \E_{s',a'} \left[\widetilde{Q}(s',a') - D^{(N)}(s',a') - f(Q(s',a')) + V_f(s') \right] \\ &= f(r(s,a)) + \gamma \E_{s'}\widetilde{V}(s') + \gamma \E_{s',a'} \left[\widetilde{Q}(s',a') - D^{(N)}(s',a') - f(Q(s',a')) + V_f(s') - \widetilde{V}(s')\right] \\ &\geq \widetilde{Q}(s,a) + \gamma \E_{s',a'} \left[f(Q(s',a')) - D^{(N)}(s',a') - f(Q(s',a')) + V_f(s') - \widetilde{V}(s')\right] \\ &\geq \widetilde{Q}(s,a) + \gamma \E_{s',a'} \left[ - D^{(N)}(s',a') + V_f(s') - \max_{b} \left\{ f\left(Q(s',b)\right) + C(s',b) \right\} \right] \\ &\geq \widetilde{Q}(s,a) -D^{(N+1)}(s,a) \\ \end{align*} where we have used $\widetilde{Q}(s,a) \geq f(Q(s,a))$ in the fourth line. where we have used the fact that $ \widetilde{V}(s) \leq \max_b \left\{ f\left(Q(s,b)\right) + \max_a C(s,a)\right\}$ and $\widetilde{Q}(s,a) - f(Q(s,a)) \geq 0$ which both follow from the previously stated bounds. \end{proof} \begin{lemma}[Concave Conditions] \label{thm:reverse_cond_entropy-regularized} Given a bounded, continuous transformation function $f~:~X~\to~\mathbb{R}$ which satisfies: \begin{enumerate} \item $f$ is concave on its domain $X$ (for stochastic dynamics); \item $f$ is superlinear: \begin{enumerate}[label=(\roman*)] \item $f(x+y) \geq f(x) + f(y)$ for all $x,y \in X$ \item $f(\gamma x) \geq \gamma f(x)$ for all $x \in X$ \end{enumerate} \item $f\left( \log \E \exp \mathcal{Q}(s,a) \right) \geq \log \E \exp f\left( \mathcal{Q}(s,a) \right)$ for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to \mathbb{R}.$ \end{enumerate} then the optimal action-value function for the transformed rewards obeys the following inequality: \begin{equation}\label{eq:concave_entropy-regularized} f\left( Q(s,a) \right) - \hat{C}(s,a) \leq \widetilde{Q}(s,a) \leq f \left( Q(s,a) \right) \end{equation} \end{lemma} \begin{proof} The proof of the upper bound is the same as the preceding theorem's lower bound with all inequalities reversed. For the lower bound involving $C$, Again consider the backup equation for $\widetilde{Q}$. Using the definitions and inductive assumption as before, we have \begin{align*} \widetilde{Q}^{(N+1)}(s,a) &= f(r(s,a)) + f\left(r(s,a)\right)ac{\gamma}{\beta} \E_{s' \mathcal{S}im{} p} \log \E_{a' \mathcal{S}im{} \pi_0} \exp \beta \widetilde{Q}^{(N)}(s',a') \\ &\geq f(r(s,a)) + f\left(r(s,a)\right)ac{\gamma}{\beta} \E_{s'} \log \E_{a'} \exp \beta \left( f(Q(s',a')) -\hat{C}^{(N)}(s',a')\right) \\ &\geq f(r(s,a)) + f\left(r(s,a)\right)ac{\gamma}{\beta} \E_{s'} \left(\log \E_{a'} \exp \beta f(Q(s',a')) - \max_{a'} \hat{C}^{(N)}(s',a')\right) \\ &= f(Q(s,a)) - \left[f(Q(s,a)) - f(r(s,a)) - \gamma \E_{s'} V_f(s') + \gamma \E_{s'} \max_{a'} \hat{C}^{(N)}(s',a')\right] \\ &= f(Q(s,a)) - \hat{C}^{(N+1)}(s,a) \end{align*} Therefore in the limit $N \to \infty$, we have: $\widetilde{Q}(s,a) \geq f(Q(s,a)) - \hat{C}(s,a)$ as desired. \end{proof} \begin{lemma} Consider the soft value of the policy $\pi_f \propto \exp \beta f(Q)$ on the transformed task of interest, denoted by $\widetilde{Q}^{\pi_f}$(s,a). The sub-optimality of $\pi_f$ is then upper bounded by: \begin{equation} \widetilde{Q}(s,a) - \widetilde{Q}^{\pi_f}(s,a) \leq \hat{D}(s,a) \end{equation} where $\hat{D}$ is the fixed point of \begin{equation} \hat{D}(s,a) \xleftarrow{} \gamma \mathbb{E}_{s' \mathcal{S}im{} p}\mathbb{E}_{a' \mathcal{S}im{} \pi_f} \left[ \hat{C}(s',a') + \hat{D}(s',a') \right] \end{equation} \end{lemma} \begin{proof} We will show the policy evaluation result by induction, by evaluating $\pi_f \propto \exp(\beta f(Q))$ in the environment with rewards $f(r)$. We shall denote iterations of policy evaluation for $\pi_f$ in the environment with rewards $f(r)$ by $\widetilde{Q}^{\pi_f(N)}(s,a)$. \begin{align*} \widetilde{Q}^{\pi_f(N+1)}(s,a) &= f(r(s,a)) + \gamma \E_{s'\mathcal{S}im{}p} \E_{a'\mathcal{S}im{} \pi_f} \left[\widetilde{Q}^{\pi_f(N)}(s',a') - f\left(r(s,a)\right)ac{1}{\beta} \log f\left(r(s,a)\right)ac{\pi_f(a'|s')}{\pi_0(a'|s')} \right] \\ &\geq f(r(s,a)) + \gamma \E_{s',a'} \left[\widetilde{Q}(s',a')-\hat{D}^{(N)}(s',a') - (f(Q(s',a')) - V_f(s')) \right] \\ &\geq f(r(s,a)) + \gamma \E_{s',a'} \left[\widetilde{Q}(s',a')-\hat{D}^{(N)}(s',a') - \widetilde{Q}(s',a') -\hat{C}(s',a') + V_f(s') \right] \\ &\ge f(r(s,a)) + \gamma \E_{s'} \widetilde{V}(s') - \gamma \E_{s',a'} \left[\hat{D}^{(N)}(s',a') + \hat{C}(s',a') \right] \\ &= \widetilde{Q}(s,a)) - \hat{D}^{(N+1)}(s,a) \\ \end{align*} where we have used the inductive assumption and $V_f(s) \ge \widetilde{V}(s)$ and which follows from the previously stated bounds. Therefore in the limit $N \to \infty$, we have: $ \widetilde{Q}^{\pi_f}(s,a) \geq \widetilde{Q}(s,a) - \hat{D}(s,a) $ as desired. \end{proof} \begin{lemma}[Convex Composition of Primitive Tasks]\label{thm:compos_convex_maxent} Suppose $F:X^N \to Y$ is convex on its domain $X^N$ and satisfies all conditions of Lemma 5.1 (Main Text) component-wise. Then, \begin{equation} F(\vec{Q}(s,a)) \le \widetilde{Q}(s,a) \le F(\vec{Q}(s,a)) + C(s,a) \end{equation} and \begin{equation} \widetilde{Q}^{\pi_f}(s,a) \geq \widetilde{Q}(s,a) - D(s,a) \end{equation} where we use a vector notation to emphasize that the function acts over the set of optimal $\{Q_k\}$ functions corresponding to each subtask, defined by $r_k$. \end{lemma} \begin{proof} The proof of this statement is identical to the previous proofs, now using the fact that $F$ is a multivariable function $F: X^N \to Y$, with each argument obeying the required conditions. \end{proof} \begin{lemma}[Concave Composition of Primitive Tasks]\label{thm:compos_concave_maxent} If on the other hand $F$ is concave and and satisfies all conditions of Lemma 5.2 (Main Text) component-wise, then \begin{equation} F(\vec{Q}(s,a)) - \hat{C}(s,a) \le \widetilde{Q}(s,a) \le F(\vec{Q}(s,a)). \end{equation} and \begin{equation} \widetilde{Q}^{\pi_f}(s,a) \geq \widetilde{Q}(s,a) - \hat{D}(s,a) \end{equation} \end{lemma} \begin{proof} Again, the proof of this statement is identical to the previous proofs, now using the fact that $F$ is a multivariable function $F: X^N \to Y$, with each argument obeying the required conditions. \end{proof} \mathcal{S}ubsection{Examples of Transformations and Compositions} In this section we consider several examples mentioned in the main text, and show how they are proved with our results in entropy-regularized RL. \begin{remark} Given the convex composition of subtasks $r^{(c)} \equiv F(\{r^{(k)}\}) = \mathcal{S}um_k \alpha_k r^{(k)}$ considered by \cite{Haarnoja2018} and \cite{hunt_diverg}, we can use the results of Lemma \ref{thm:compos_concave_maxent} to bound the optimal $Q$ function by using the optimal $Q$ functions for the primitive tasks: \begin{equation} Q^{(c)}(s,a) \leq \mathcal{S}um_k \alpha_k Q^{(k)}(s,a) \end{equation} \end{remark} \begin{proof} In entropy-regularized RL we need to show that the final condition holds (in vectorized form). This is simply H\"older's inequality \cite{hardy1952inequalities} for vector-valued functions in a probability space (with measure defined by $\pi_0$). \end{proof} \begin{remark} Given the AND composition defined above and considered in \cite{boolean}, we have the following result in standard RL: \begin{equation} Q^{\text{AND}}(s,a) \leq \min_k \left\{Q^{(k)}(s,a)\right\} \end{equation} \end{remark} \begin{proof} The function $\min(\cdot)$ is concave in each argument. It is also straightforward to show that $\min(\cdot)$ is subadditive over all arguments. For the final condition, the $\min$ acts globally over all subtasks: \begin{equation} \min_k \left\{ f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left(\beta \mathcal{Q}^{(k)}(s,a)\right)\right\} \leq f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left( \beta \min_k \left\{\mathcal{Q}^{(k)}(s,a)\right\}\right). \end{equation} \end{proof} \begin{remark} Result of (hard) OR composition result in standard RL: \begin{equation} Q^{\text{OR}}(s,a) \geq \max_k \left\{Q^{(k)}(s,a)\right\} \end{equation} \end{remark} \begin{proof} The proof is analogous to the (hard) AND result: $\max$ is a convex, superadditive function. For the final condition, the $\max$ again acts globally over all subtasks: \begin{equation} \max_k \left\{ f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left(\beta \mathcal{Q}^{(k)}(s,a)\right)\right\} \ge f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left( \beta \max_k \left\{\mathcal{Q}^{(k)}(s,a)\right\}\right). \end{equation} \end{proof}
4,068
49,178
en
train
0.90.11
\begin{lemma}[Convex Composition of Primitive Tasks]\label{thm:compos_convex_maxent} Suppose $F:X^N \to Y$ is convex on its domain $X^N$ and satisfies all conditions of Lemma 5.1 (Main Text) component-wise. Then, \begin{equation} F(\vec{Q}(s,a)) \le \widetilde{Q}(s,a) \le F(\vec{Q}(s,a)) + C(s,a) \end{equation} and \begin{equation} \widetilde{Q}^{\pi_f}(s,a) \geq \widetilde{Q}(s,a) - D(s,a) \end{equation} where we use a vector notation to emphasize that the function acts over the set of optimal $\{Q_k\}$ functions corresponding to each subtask, defined by $r_k$. \end{lemma} \begin{proof} The proof of this statement is identical to the previous proofs, now using the fact that $F$ is a multivariable function $F: X^N \to Y$, with each argument obeying the required conditions. \end{proof} \begin{lemma}[Concave Composition of Primitive Tasks]\label{thm:compos_concave_maxent} If on the other hand $F$ is concave and and satisfies all conditions of Lemma 5.2 (Main Text) component-wise, then \begin{equation} F(\vec{Q}(s,a)) - \hat{C}(s,a) \le \widetilde{Q}(s,a) \le F(\vec{Q}(s,a)). \end{equation} and \begin{equation} \widetilde{Q}^{\pi_f}(s,a) \geq \widetilde{Q}(s,a) - \hat{D}(s,a) \end{equation} \end{lemma} \begin{proof} Again, the proof of this statement is identical to the previous proofs, now using the fact that $F$ is a multivariable function $F: X^N \to Y$, with each argument obeying the required conditions. \end{proof} \mathcal{S}ubsection{Examples of Transformations and Compositions} In this section we consider several examples mentioned in the main text, and show how they are proved with our results in entropy-regularized RL. \begin{remark} Given the convex composition of subtasks $r^{(c)} \equiv F(\{r^{(k)}\}) = \mathcal{S}um_k \alpha_k r^{(k)}$ considered by \cite{Haarnoja2018} and \cite{hunt_diverg}, we can use the results of Lemma \ref{thm:compos_concave_maxent} to bound the optimal $Q$ function by using the optimal $Q$ functions for the primitive tasks: \begin{equation} Q^{(c)}(s,a) \leq \mathcal{S}um_k \alpha_k Q^{(k)}(s,a) \end{equation} \end{remark} \begin{proof} In entropy-regularized RL we need to show that the final condition holds (in vectorized form). This is simply H\"older's inequality \cite{hardy1952inequalities} for vector-valued functions in a probability space (with measure defined by $\pi_0$). \end{proof} \begin{remark} Given the AND composition defined above and considered in \cite{boolean}, we have the following result in standard RL: \begin{equation} Q^{\text{AND}}(s,a) \leq \min_k \left\{Q^{(k)}(s,a)\right\} \end{equation} \end{remark} \begin{proof} The function $\min(\cdot)$ is concave in each argument. It is also straightforward to show that $\min(\cdot)$ is subadditive over all arguments. For the final condition, the $\min$ acts globally over all subtasks: \begin{equation} \min_k \left\{ f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left(\beta \mathcal{Q}^{(k)}(s,a)\right)\right\} \leq f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left( \beta \min_k \left\{\mathcal{Q}^{(k)}(s,a)\right\}\right). \end{equation} \end{proof} \begin{remark} Result of (hard) OR composition result in standard RL: \begin{equation} Q^{\text{OR}}(s,a) \geq \max_k \left\{Q^{(k)}(s,a)\right\} \end{equation} \end{remark} \begin{proof} The proof is analogous to the (hard) AND result: $\max$ is a convex, superadditive function. For the final condition, the $\max$ again acts globally over all subtasks: \begin{equation} \max_k \left\{ f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left(\beta \mathcal{Q}^{(k)}(s,a)\right)\right\} \ge f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left( \beta \max_k \left\{\mathcal{Q}^{(k)}(s,a)\right\}\right). \end{equation} \end{proof} \begin{remark} Again we consider the NOT operation defined above, now in entropy-regularized RL, which yields the bound: \begin{equation} Q^{\text{NOT}}(s,a) \geq - Q(s,a) \end{equation} \end{remark} \begin{proof} As in the standard RL case, we need only consider the third condition of either Lemma 5.1 or 5.3. In particular, we show \begin{equation} f\left( \log \E \exp \mathcal{Q}(s,a) \right) \leq \log \E \exp f\left( \mathcal{Q}(s,a) \right) \end{equation} for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to \mathbb{R}$. This follows from \begin{align} f\left(r(s,a)\right)ac{1}{\E \exp \mathcal{Q}(s,a)} \leq \E f\left(r(s,a)\right)ac{1}{\exp \mathcal{Q}(s,a) } \end{align} which is given by Jensen's inequality, since the function $f(x)=1/x$ is convex. \end{proof} \begin{remark}[Linear Scaling] \label{thm:scaling} Given some $k \in (0,1)$ the function $f(x) = k x$ satisfies the results of the first theorem. Conversely, if $k \geq 1$, $f(x) = k x$ satisfies the results of the second theorem. \end{remark} \begin{proof} This result (specifically the third condition of Lemma 5.1, 5.3) follows from the monotonicity of $\ell_p$ norms. \end{proof}
1,854
49,178
en
train
0.90.12
\begin{remark} Given the AND composition defined above and considered in \cite{boolean}, we have the following result in standard RL: \begin{equation} Q^{\text{AND}}(s,a) \leq \min_k \left\{Q^{(k)}(s,a)\right\} \end{equation} \end{remark} \begin{proof} The function $\min(\cdot)$ is concave in each argument. It is also straightforward to show that $\min(\cdot)$ is subadditive over all arguments. For the final condition, the $\min$ acts globally over all subtasks: \begin{equation} \min_k \left\{ f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left(\beta \mathcal{Q}^{(k)}(s,a)\right)\right\} \leq f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left( \beta \min_k \left\{\mathcal{Q}^{(k)}(s,a)\right\}\right). \end{equation} \end{proof} \begin{remark} Result of (hard) OR composition result in standard RL: \begin{equation} Q^{\text{OR}}(s,a) \geq \max_k \left\{Q^{(k)}(s,a)\right\} \end{equation} \end{remark} \begin{proof} The proof is analogous to the (hard) AND result: $\max$ is a convex, superadditive function. For the final condition, the $\max$ again acts globally over all subtasks: \begin{equation} \max_k \left\{ f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left(\beta \mathcal{Q}^{(k)}(s,a)\right)\right\} \ge f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left( \beta \max_k \left\{\mathcal{Q}^{(k)}(s,a)\right\}\right). \end{equation} \end{proof} \begin{remark} Again we consider the NOT operation defined above, now in entropy-regularized RL, which yields the bound: \begin{equation} Q^{\text{NOT}}(s,a) \geq - Q(s,a) \end{equation} \end{remark} \begin{proof} As in the standard RL case, we need only consider the third condition of either Lemma 5.1 or 5.3. In particular, we show \begin{equation} f\left( \log \E \exp \mathcal{Q}(s,a) \right) \leq \log \E \exp f\left( \mathcal{Q}(s,a) \right) \end{equation} for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to \mathbb{R}$. This follows from \begin{align} f\left(r(s,a)\right)ac{1}{\E \exp \mathcal{Q}(s,a)} \leq \E f\left(r(s,a)\right)ac{1}{\exp \mathcal{Q}(s,a) } \end{align} which is given by Jensen's inequality, since the function $f(x)=1/x$ is convex. \end{proof} \begin{remark}[Linear Scaling] \label{thm:scaling} Given some $k \in (0,1)$ the function $f(x) = k x$ satisfies the results of the first theorem. Conversely, if $k \geq 1$, $f(x) = k x$ satisfies the results of the second theorem. \end{remark} \begin{proof} This result (specifically the third condition of Lemma 5.1, 5.3) follows from the monotonicity of $\ell_p$ norms. \end{proof} Since we have already shown the case of $k=-1$ (NOT gate), with the result of Theorem \ref{thm:compos}, the case for all $k \in \mathbb{R}$ has been characterized. \mathcal{S}ection{Extension for Error-Prone $Q$-Values} In this section, we provide some discussion on the case of inexact $Q$-values, as often occurs in practice (discussed at the end of Section 4.1 in the main text). We focus on the case of task transformation in standard RL. The corresponding statements in the settings of composition and entropy-regularized RL follow similarly. As our starting point, we assume that an ``$\varepsilon$-optimal estimate'' $\overbar{Q}(s,a)$ for a primitive task's exact value function $Q(s,a)$ is known. \begin{definition} An $\varepsilon$-optimal $Q$-function, $\overbar{Q}$, satisfies \begin{equation} |Q(s,a)-\overbar{Q}(s,a)|\leq \varepsilon \end{equation} for all $s \in \mathcal{S}, a \in \mathcal{A}$. \end{definition} To allow the derived double-sided bounds on the transformed tasks' $Q$-values to carry over to this more general setting, we assume that the transformation function is $L$-Lipschitz continuous. With these assumptions, we prove the following extensions of Lemma 4.1 and 4.3: \begin{customlemma}{4.1A}[Convex Conditions, Error-Prone]\label{thm:convex_cond_std_err} Given a primitive task with discount factor $\gamma$, corresponding $\varepsilon$-optimal value function $\overbar{Q}$, and a bounded, continuous, $L$-Lipschitz transformation function $f~:~X~\to~\mathbb{R}$ which satisfies: \begin{enumerate} \item $f$ is convex on its domain $X$ (for stochastic dynamics); \item $f$ is sublinear: \begin{enumerate}[label=(\roman*)] \item $f(x+y) \leq f(x) + f(y)$ for all $x,y \in X$ \item $f(\gamma x) \leq \gamma f(x)$ for all $x \in X$ \end{enumerate} \item $f\left( \max_{a} \mathcal{Q}(s,a) \right) \leq \max_{a}~f\left( \mathcal{Q}(s,a) \right)$ for all $\mathcal{Q}: \mathcal{S} \times \mathcal{A} \to \mathbb{R}.$ \end{enumerate} then the optimal action-value function for the transformed rewards, $\widetilde{Q}$, is now related to the optimal action-value function with respect to the original rewards by: \begin{equation}\label{eqn:convex_std_err} f(\overbar{Q}(s,a)) - L \varepsilon \leq \widetilde{Q}(s,a) \leq f(\overbar{Q}(s,a)) + \overbar{C}(s,a) + f\left(r(s,a)\right)ac{2}{1-\gamma}L \varepsilon \end{equation} where $\overbar{C}$ is the optimal value function for a task with reward \begin{equation}\label{eq:std_convex_C_def_err} \overbar{r_C}(s,a) = f(r(s,a)) + \gamma \mathbb{E}_{s'} \overbar{V_f}(s') - f(\overbar{Q}(s,a)). \end{equation} with $\overbar{V_f}(s)=\max_a f(\overbar{Q}(s,a))$. \end{customlemma} Note that as $\varepsilon \to 0$, the exact result (Lemma 4.1) is recovered. If the function $\overbar{C}$ is not known exactly, one can similarly exchange $\overbar{C}$ for $\overbar{\overbar{C}}$, an $\varepsilon$-optimal estimate for $\overbar{C}$. This consideration loosens the upper-bound by an addition of $\varepsilon$, shown at the end of the proof. We will make use a well-known result (cf. proof of Lemma 1 in \cite{barreto_sf}) that bounds the difference in optimal $Q$-values for two tasks with different reward functions. \begin{lemma} Let two tasks, only differing in their reward functions, be given with reward $r_1(s,a)$ and $r_2(s,a)$, respectively. Suppose $|r_1(s,a)-r_2(s,a)|\leq \delta$ Then, the optimal value functions for the tasks satisfies: \begin{equation} |Q_1(s,a)-Q_2(s,a)|\leq f\left(r(s,a)\right)ac{\delta}{1-\gamma} \end{equation} \label{lem:bounded_q_diff} \end{lemma} Now we are in a position to prove Lemma \ref{thm:convex_cond_std_err}: \begin{proof} To prove the lower bound, we begin with the original lower bound in Lemma 4.1, for the optimal primitive task $Q$-values: \begin{equation} \widetilde{Q}(s,a) \geq f(Q(s,a)), \end{equation} or equivalently \begin{align} -\widetilde{Q}(s,a) &\leq -f(Q(s,a)) \\ -\widetilde{Q}(s,a) &\leq -f(Q(s,a)) + f(\overbar{Q}(s,a)) -f(\overbar{Q}(s,a)) \\ -\widetilde{Q}(s,a) &\leq |f(Q(s,a)) - f(\overbar{Q}(s,a))| - f(\overbar{Q}(s,a)) \\ \widetilde{Q}(s,a) &\geq -|f(Q(s,a)) - f(\overbar{Q}(s,a))| + f(\overbar{Q}(s,a)) \\ \widetilde{Q}(s,a) &\geq -L|Q(s,a) - \overbar{Q}(s,a)| + f(\overbar{Q}(s,a)) \\ \widetilde{Q}(s,a) &\geq f(\overbar{Q}(s,a)) - L \varepsilon \\ \end{align} Where the final steps follow from the function $f$ being $L$-Lipschitz and the definition of $\varepsilon$-optimality of $\overbar{Q}(s,a)$. To prove the upper bound, we take a similar approach, noting that the reward function $r_C$ in Lemma 4.1 must be updated to account for the inexact $Q$-values. Therefore, we must account for the following error propagations: \begin{align*} Q(s,a) &\to \overbar{Q}(s,a) \\ V_f(s) &\to \overbar{V_f}(s)\\ r_C(s,a) &\to \overbar{r_C}(s,a). \end{align*} We first find the difference between $r_C$ and $\overbar{r_C}$ to be bounded by $(1+\gamma)L\varepsilon$: \begin{align} |r_C(s,a)-\overbar{r_C}(s,a)| &= |\gamma \E_{s' \mathcal{S}im{} p } V_f^*(s') - f(Q^*(s,a)) - \gamma \E_{s' \mathcal{S}im{} p } V_f(s') + f(Q(s,a))| \\ &\leq \gamma \E_{s'} |V_f^*(s') - V_f(s')| + |f(Q^*(s,a)) - f(Q(s,a))| \\ &\leq \gamma \E_{s'} \max_{a'} |f(Q^*(s',a')) - f(Q(s',a'))| + |f(Q^*(s,a)) - f(Q(s,a))| \\ &\leq (1+\gamma)L \varepsilon \end{align} where in the third line we have used the bound $|\max_{x} f(x) - \max_{x} g(x)| \leq \max_{x} |f(x)-g(x)|$. Now, applying Lemma \ref{lem:bounded_q_diff} to the reward functions $r_C$ and $\overbar{r_C}$: \begin{equation} |C(s,a) - \overbar{C}(s,a)| \leq f\left(r(s,a)\right)ac{ (1+\gamma)}{1-\gamma}L \varepsilon \end{equation} With the same technique as was used above for the lower bound, we find: \begin{align} \widetilde{Q}(s,a) &\leq f(Q(s,a)) + C(s,a) \\ &\leq f(\overbar{Q}(s,a)) + L \varepsilon + C(s,a) \\ &= f(\overbar{Q}(s,a)) + L \varepsilon + \overbar{C}(s,a) - \overbar{C}(s,a) + C(s,a) \\ &\leq f(\overbar{Q}(s,a)) + L \varepsilon + |C(s,a) - \overbar{C}(s,a)| + \overbar{C}(s,a) \\ &\leq f(\overbar{Q}(s,a)) + L \varepsilon + \overbar{C}(s,a) + f\left(r(s,a)\right)ac{ (1+\gamma)}{1-\gamma}L \varepsilon \\ &= f(\overbar{Q}(s,a)) + \overbar{C}(s,a) + f\left(r(s,a)\right)ac{2}{1-\gamma}L \varepsilon \end{align} Further extending the result to the case where only an $\varepsilon$-optimal estimate of $\overbar{C}$ is known, denoted by $\overbar{\overbar{C}}$, we find: \begin{align} \widetilde{Q}(s,a) &\leq f(\overbar{Q}(s,a)) + \overbar{C}(s,a) + f\left(r(s,a)\right)ac{2}{1-\gamma}L \varepsilon \\ &\leq f(\overbar{Q}(s,a)) + \overbar{\overbar{C}}(s,a) + |\overbar{\overbar{C}}(s,a)- \overbar{C}(s,a)| + f\left(r(s,a)\right)ac{2}{1-\gamma}L \varepsilon \\ &\leq f(\overbar{Q}(s,a)) + \overbar{\overbar{C}}(s,a) + \varepsilon + f\left(r(s,a)\right)ac{2}{1-\gamma}L \varepsilon \\ &= f(\overbar{Q}(s,a)) + \overbar{\overbar{C}}(s,a) + \left(1+ f\left(r(s,a)\right)ac{2}{1-\gamma}L \right)\varepsilon \end{align} \end{proof}
3,793
49,178
en
train
0.90.13
Similarly, Lemma 4.3 from the main text can be extended under the same conditions: \begin{customlemma}{4.3A}[Concave Conditions, Error-Prone]\label{thm:concave_cond_std_err} Given a primitive task with discount factor $\gamma$, corresponding $\varepsilon$-optimal value function $\overbar{Q}$, and a bounded, continuous, $L$-Lipschitz transformation function $f~:~X~\to~\mathbb{R}$ which satisfies: \begin{enumerate} \item $f$ is concave on its domain $X$ (for stochastic dynamics); \item $f$ is superlinear: \begin{enumerate}[label=(\roman*)] \item $f(x+y) \geq f(x) + f(y)$ for all $x,y \in X$ \item $f(\gamma x) \geq \gamma f(x)$ for all $x \in X$ \end{enumerate} \item $f\left( \max_{a} \mathcal{Q}(s,a) \right) \geq \max_{a}~f\left( \mathcal{Q}(s,a) \right)$ for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to X.$ \end{enumerate} then the optimal action-value functions are now related in the following way: \begin{equation}\label{eqn:concave_std_err} f(\overbar{Q}(s,a)) - \overbar{\hat{C}}(s,a)-f\left(r(s,a)\right)ac{2}{1-\gamma}L \varepsilon \leq \widetilde{Q}(s,a) \leq f(\overbar{Q}(s,a)) + L \varepsilon \end{equation} where $\overbar{\hat{C}}$ is the optimal value function for a task with reward \begin{equation} \overbar{\hat{r}_C}(s,a) = f(\overbar{Q}(s,a)) - f(r(s,a)) - \gamma \E_{s'\mathcal{S}im{}p} \overbar{V_f}(s') \end{equation} with $\overbar{V_f}(s)=\max_a f(\overbar{Q}(s,a))$. \end{customlemma} The proof of Lemma \ref{thm:concave_cond_std_err} is the same as that given above for Lemma \ref{thm:convex_cond_std_err}, with all signs flipped. Finally, we note that both extensions of Lemma \ref{thm:convex_cond_std_err} and \ref{thm:concave_cond_std_err} hold for the entropy-regularized case. The only differences required to prove the results are showing that Lemma \ref{lem:bounded_q_diff} and $|V_f(s)-\overbar{V_f}(s)|\leq L\varepsilon$ hold in entropy-regularized RL. Both statements are trivial given that the necessary soft-max operation is $1$-Lipschitz. Similar results can be derived for the case of compositions, when each subtasks' $Q$-function is replaced by an $\varepsilon$-optimal estimate thereof. \mathcal{S}ection{Results Applying to Both Entropy-Regularized and Standard RL} As we have discussed in the main text; an agent with a large library of accessible functions will be able to transform and compose their primitive knowledge in a wider variety of ways. Therefore, we would like to extend $\mathcal{F}$ to encompass as many functions as possible. Below, we will show that the functions $f\in \mathcal{F}$ characterizing the Transfer MDP Library have two closure properties (additivity and function composition) which enables more accessible transfer functions. First, let $\mathcal{F}^+$ denote the set of functions $f \in \mathcal{F}$ obeying the convex conditions, and similarly let $\mathcal{F}^-$ denote the set of functions obeying the concave conditions. In standard RL, we have the following closure property for addition of functions. \begin{theorem} Let $f,g \in \mathcal{F}^+$. Then $f+g \in \mathcal{F}^+$. Similarly, if $f,g \in \mathcal{F}^-$, then $f+g \in \mathcal{F}^-$. \end{theorem} \begin{proof} Let $f,g \in \mathcal{F}^+$. Convexity: The sum of two convex functions is convex. Subadditive: $(f+g)(x+y) = f(x+y)+g(x+y)\leq f(x)+g(x)+f(y)+g(y)=(f+g)(x)+(f+g)(y)$. Submultiplicative: $(f+g)(\gamma x) = f(\gamma x) + g(\gamma x) \le \gamma f(x) + \gamma g(x) = \gamma(f+g)(x)$. The proof for $f,g \in \mathcal{F}^-$ is the same with all signs flipped, except for the additional final condition: $(f+g)(\max_i x_i) = f(\max_i x_i) + g(\max_i x_i) = \max f(x) + \max g(x) \ge \max f(x) + g(x).$ Although this is not equality as shown in the main text, the condition still suffices. For the case of a single function (no addition, as seen in main text), it can never be the cases that $\max_i f(x_i)~>~\max f(x)$ and therefore was excluded. (Just as $\max_i f(x_i) \le \max f(x)$ is automatically satisfied for the convex conditions.) \end{proof} \begin{theorem}[Function Composition] \label{thm:compos} For any reward-mapping functions $f$, $g \in \mathcal{F}^+$ ($\mathcal{F}^-$) with $f$ non-decreasing, the composition of functions $f$ and $g$, $h(x) = f(g(x)) \in \mathcal{F}^+ (\mathcal{F}^-)$. \end{theorem} \begin{proof} Let $f,g \in \mathcal{F}^+$ assume $f: B \to C$ and $g:A \to B$, and let $f$ be non-decreasing. This guarantees that $f(g(x))$ is convex. Additionally, $f(g(x+y)) \leq f(g(x)+g(y)) \leq f(g(x)) + f(g(y))$ by the sublinearity of $g,f$ respectively. Similarly $f(g(\gamma x)) \leq f(\gamma g(x)) \leq \gamma f(g(x))$. For the standard RL (concave) condition, note that for all functions $\mathcal{Q}:~\mathcal{S}~\times~\mathcal{A} \to X$: \begin{equation} f\left( g\left( \max_{a} \mathcal{Q}(s,a)\right) \right) \geq f\left(\max_{a}~g\left( \mathcal{Q}(s,a)\right) \right) \geq \max_{a}~f\left(g\left( \mathcal{Q}(s,a)\right) \right) \end{equation} For the entropy-regularized condition, we first apply the condition to $g$: \begin{equation} f\left( g\left( f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp(\beta \mathcal{Q}(s,a))\right) \right) \le f\left( f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp(\beta g(\mathcal{Q}(s,a)) )\right) \end{equation} Then to $f$: \begin{equation} f\left( g\left( f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp(\beta \mathcal{Q}(s,a))\right) \right) \le f\left(r(s,a)\right)ac{1}{\beta}\log\mathbb{E}_{a \mathcal{S}im{} \pi_0(a|s)} \exp\left(\beta f\left( g\left(\mathcal{Q}(s,a)\right) \right) \right) \end{equation} The reversed statement, when $f,g \in \mathcal{F}^-$ with $f$ non-decreasing has a similar proof and is omitted. \end{proof} With this result established, we are now able to concatenate multiple transformations. This allows for multiple gates in Boolean logic statements, for example. As stated in the main text, this ability to compose multiple functions will greatly expand the number of tasks in the Transfer MDP Library which the agent may (approximately) solve. \nocite{openAI} \begin{@fileswfalse} \end{@fileswfalse} \end{document}
2,297
49,178
en
train
0.91.0
\begin{document} \title[A splitter theorem for $3$-connected $2$-polymatroids]{A splitter theorem for $3$-connected $2$-polymatroids} \thanks{The second and third authors were supported by the New Zealand Marsden Fund.} \author{James Oxley} \address{Department of Mathematics, Louisiana State University, Baton Rouge, Louisiana, USA} \email{[email protected]} \author{Charles Semple} \address{Department of Mathematics and Statistics, University of Canterbury, Christchurch, New Zealand} \email{[email protected]} \author{Geoff Whittle} \address{School of Mathematics, Statistics and Operations Research, Victoria University, Wellington, New Zealand} \email{[email protected]} \subjclass{05B35} \downarrowte{\today} \begin{abstract} Seymour's Splitter Theorem is a basic inductive tool for dealing with $3$-connected matroids. This paper proves a generalization of that theorem for the class of $2$-polymatroids. Such structures include matroids, and they model both sets of points and lines in a projective space and sets of edges in a graph. A series compression in such a structure is an analogue of contracting an edge of a graph that is in a series pair. A $2$-polymatroid $N$ is an s-minor of a $2$-polymatroid $M$ if $N$ can be obtained from $M$ by a sequence of contractions, series compressions, and dual-contractions, where the last are modified deletions. The main result proves that if $M$ and $N$ are $3$-connected $2$-polymatroids such that $N$ is an s-minor of $M$, then $M$ has a $3$-connected s-minor $M'$ that has an s-minor isomorphic to $N$ and has $|E(M)| - 1$ elements unless $M$ is a whirl or the cycle matroid of a wheel. In the exceptional case, such an $M'$ can be found with $|E(M)| - 2$ elements. \end{abstract} \maketitle \vspace*{-30pt}
592
142,722
en
train
0.91.1
\section{Introduction} \label{intro} Let $M$ be a $3$-connected matroid other than a wheel or a whirl. Tutte~\cite{wtt} proved that $M$ has an element whose deletion or contraction is $3$-connected. Seymour~\cite{pds} extended this theorem by showing that, for a proper $3$-connected minor $N$ of $M$, the matroid $M$ has an element whose deletion or contraction is $3$-connected and has an $N$-minor. These theorems have been powerful inductive tools for working with $3$-connected matroids. In \cite{oswww}, with a view to attacking representability problems for 2-polymatroids, we generalized the Wheels-and-Whirls Theorem to $2$-polymatroids. In this paper, we prove a generalization of the Splitter Theorem for $2$-polymatroids. A basic example of a matroid is a set of points in a projective space. If, instead, we take a finite set of points and lines in a projective space, we get an example of a $2$-polymatroid. Whereas each element of a matroid has rank zero or one, an individual element in a $2$-polymatroid can also have rank two. Formally, for a positive integer $k$, a {\em $k$-polymatroid} $M$ is a pair $(E,r)$ consisting of a finite set $E$, called the {\it ground set}, and a function $r$, called the {\it rank function}, from the power set of $E$ into the integers satisfying the following conditions: \begin{itemize} \item[(i)] $r(\emptyset) = 0$; \item[(ii)] if $X \subseteq Y \subseteq E$, then $r(X) \le r(Y)$; \item[(iii)] if $X$ and $Y$ are subsets of $E$, then $r(X) + r(Y) \ge r(X \cup Y) + r(X \cap Y)$; and \item[(iv)] $r(\{e\})\leq k$ for all $e\in E$. \end{itemize} A matroid is just a 1-polymatroid. Equivalently, it is a $2$-polymatroid in which every element has rank at most one. Our focus in this paper will be on $2$-polymatroids. From a graph $G$, in addition to its cycle matroid, we can derive a second $2$-polymatroid on $E(G)$, which we denote by $M_2(G)$. The latter is defined by letting the rank of a set $A$ of edges be the number of vertices incident with edges in $A$. Observe that non-loop edges of $G$ have rank two in $M_2(G)$. Matroid connectivity generalizes naturally to $2$-polymatroids. In particular, 3-connectivity for matroids extends routinely to a notion of 3-connectivity for $2$-polymatroids. A simple $3$-connected graph $G$ has a $3$-connected cycle matroid. On the other hand, $M_2(G)$ is $3$-connected whenever $G$ is a $2$-connected loopless graph. Deletion and contraction for matroids extend easily to $2$-polymatroids. This gives a notion of minor for $2$-polymatroids that extends that of minor for matroids, and, via cycle matroids, that of minor for graphs. But what happens when we consider the $2$-polymatroid $M_2(G)$? If $e$ is an edge of $G$, then deletion in $M_2(G)$ corresponds to deletion in $G$, but it is not the same with contraction. However, there is an operation on $M_2(G)$ that corresponds to contraction in $G$. Specifically, if $e$ is an element of the $2$-polymatroid $M$ and $r(\{e\}) > 0$, then the {\em compression} of $e$ from $M$, denoted $M\downarrow e$, is obtained by placing a rank-$1$ element $x$ freely on $e$, contracting $x$, and then deleting $e$ from the resulting $2$-polymatroid. In particular, $M_2(G)\downarrow e=M_2(G/e)$ for a non-loop edge $e$ of the graph $G$. Representability of matroids extends easily to representability of polymatroids over fields. Indeed, much of the motivation for this paper is derived from our desire to develop tools for attacking representability problems for $2$-polymatroids. The class of $2$-polymatroids representable over a field $\mathbb F$ is closed under both deletion and contraction. When $\mathbb F$ is finite, this is not the case for compression in general although it is the case for a restricted type of compression. In \cite{oswww}, we defined a certain type of 3-separator, which we called a `prickly' 3-separator. A series pair in a graph $G$ is a 2-element prickly 3-separator of $M_2(G)$. Larger prickly 3-separators do not arise from graphs, but do arise in more general settings. Compressing elements from prickly 3-separators preserves representability. We gave examples in \cite{oswww} to show that, if we wish to generalize Tutte's Wheels-and-Whirls Theorem to $2$-polymatroids, it is necessary to allow compression of elements from prickly 3-separators. The main result of \cite{oswww} proves such a generalization by showing that a $3$-connected non-empty $2$-polymatroid that not a whirl or the cycle matroid of a wheel has an element $e$ such that either $M\backslash e$ or $M/e$ is $3$-connected, or $e$ belongs to a prickly $3$-separator, and $M\downarrow e$ is $3$-connected. Geelen, Gerards, and Whittle~\cite{ggwrota} have announced that Rota's Conjecture~\cite{rot} is true, that is, for every finite field, there is a finite set of minor-minimal matroids that are not representable over that field. In \cite{oswww}, we showed that, for every field ${\mathbb F}$, the set of minor-minimal 2-polymatroids that are not representable over ${\mathbb F}$ is infinite, so one generalization of Rota's Conjecture for 2-polymatroids fails. We believe, however, that an alternative generalization of the conjecture does hold. Specifically, we conjectured in \cite{oswww} that, when ${\mathbb F}$ is finite, there are only finitely many 2-polymatroids that are minimal with the property of being non-representable over ${\mathbb F}$ where we allow, as reduction operations, not only deletion and contaction but also compression of elements from prickly 3-separators. Our main result appears at the end of this section. We now give the rest of the background needed to understand that result. The matroid terminology used here will follow Oxley~\cite{oxbook}. Lov\'{a}sz and Plummer~\cite[Chapter 11]{lovaplum} have given an interesting discussion of $2$-polymatroids and some of their properties. We call $(E,r)$ a {\it polymatroid} if it is a $k$-polymatroid for some positive integer $k$. In a $2$-polymatroid $(E,r)$, an element $x$ will be called a {\it line}, a {\it point}, or a {\it loop} when its rank is $2$, $1$, or $0$, respectively. For readers accustomed to using the terms `point' and `line' for flats in a matroid of rank one and two, respectively, this may create some potential confusion. However, in this paper, we shall never use the terms `point' and `line' in this alternative way. Indeed, we will not even define a flat of a $2$-polymatroid. Let $M$ be a polymatroid $(E,r)$. For a subset $X$ of $E$, the {\it deletion} $M\backslash X$ and the {\it contraction} $M/X$ of $X$ from $M$ are the pairs $(E-X,r_1)$ and \linebreak$(E-X,r_2)$ where, for all subsets $Y$ of $E-X$, we have $r_1(Y) = r(Y)$ and $r_2(Y) = r(Y \cup X) - r(X)$. We shall also write $M|(E- X)$ for $M\backslash X$. A {\it minor} of the polymatroid $M$ is any polymatroid that can be obtained from $M$ by a sequence of operations each of which is a deletion or a contraction. It is straightforward to check that every minor of a $k$-polymatroid is also a $k$-polymatroid. The {\it closure} ${\rm cl}(X)$ of a set $X$ in $M$ is, as for matroids, the set $\{x \in E: r(X \cup x) = r(X)\}$. Two polymatroids $(E_1,r_1)$ and $(E_2,r_2)$ are {\it isomorphic} if there is a bijection $\phi$ from $E_1$ onto $E_2$ such that $r_1(X) = r_2(\phi(X))$ for all subsets $X$ of $E_1$. One natural way to obtain a polymatroid is from a collection of flats of a matroid $M$. Indeed, every polymatroid arises in this way \cite{helg, lova, mcd}. More precisely, we have the following. \begin{theorem} \label{herepoly} Let $t$ be a function defined on the power set of a finite set $E$. Then $(E,t)$ is a polymatroid if and only if, for some matroid $M$, there is a function $\psi$ from $E$ into the set of flats of $M$ such that $t(X) = r_M(\cup_{x \in X} \psi(x))$ for all subsets $X$ of $E$. \end{theorem} The key idea in proving this theorem is that of freely adding a point to an element of a polymatroid. Let $(E,r)$ be a polymatroid, let $x$ be an element of $E$, and let $x'$ be an element that is not in $E$. We can extend the domain of $r$ to include all subsets of $E \cup x'$ by letting \begin{equation*} r(X \cup x') = \begin{cases} r(X), & \text{if $r(X \cup x) = r(X)$};\\ r(X) + 1, & \text{if $r(X \cup x) > r(X)$}. \end{cases} \end{equation*} Then it is not difficult to check that $(E \cup x', r)$ is a polymatroid. We say that it has been obtained from $(E,r)$ by {\it freely adding} $x'$ to $x$. If we repeat this construction by freely adding a new element $y'$ to some element $y$ of $E$, we can show that the order in which these two operations is performed is irrelevant. Using this idea, we can associate a matroid with every $2$-polymatroid $M$ as follows. Let $L$ be the set of lines of $M$. For each $\ell$ in $L$, freely add two points $s_{\ell}$ and $t_{\ell}$ to $\ell$. Let $M^+$ be the $2$-polymatroid obtained after performing all of these $2|L|$ operations. Let $M'$ be $M^+ \backslash L$. We call $M'$ the {\it natural matroid derived from $M$}. Given a graph $G$ with edge set $E$, as noted earlier, one can define a $2$-polymatroid $M_2(G)$ on $E$ by, for each subset $X$ of $E$, letting $r(X)$ be $|V(X)|$ where $V(X)$ is the set of vertices of $G$ that have at least one endpoint in $X$. A polymatroid $(E',r')$ is {\it Boolean} if is isomorphic to the $2$-polymatroid that is obtained in this way from some graph. One attractive feature of $M_2(G)$ is that, except for the possible presence of isolated vertices, it uniquely determines $G$. More precisely, if $G_1$ and $G_2$ are graphs neither of which has any isolated vertices and if $M_2(G_1) = M_2(G_2)$, then there is a labelling of the vertices of $G_2$ such that $G_1 = G_2$. This contrasts with the situation for matroids where quite different graphs can have the same cycle matroids. Let $M$ be a polymatroid $(E,r)$. The {\it connectivity function}, $\lambda_M$ or $\lambda$, of $M$ is defined, for all subsets $X$ of $E$, by $\lambda_M(X) = r(X) + r(E-X) - r(M)$. Observe that $\lambda_M(E-X) = \lambda_M(X)$. It is routine to check, using the submodularity of the rank function, that the connectivity function is submodular, that is, for all subsets $Y$ and $Z$ of $E$, $$\lambda_M(Y) + \lambda_M(Z) \ge \lambda_M(Y \cup Z) + \lambda_M(Y \cap Z).$$ Let $M$ be a polymatroid. For a positive integer $n$, a subset $X$ of $E(M)$ is {\it $n$-separating} if $\lambda_M(X) \le n-1$ and is {\it exactly $n$-separating} if $\lambda_M(X) = n-1$ We say that $M$ is {\it $2$-connected} if it has no proper non-empty $1$-separating subset. We will also say that $M$ is {\it disconnected} if it is not 2-connected. We call $M$ {\it $3$-connected} if $M$ is $2$-connected and $M$ has no {\it $2$-separation}, that is, $M$ has no partition $(X,Y)$ with $\max\{|X|, r(X)\} > 1$ and $\max\{|Y|, r(Y)\} > 1$ but $\lambda(X) \le 1$. When $M$ is a $3$-connected $2$-polymatroid $(E,r)$, a {\it $3$-separation} of $M$ is a partition $(X,Y)$ of $E$ such that $\lambda(X) = 2$ and both $r(X)$ and $r(Y)$ exceed $2$.
3,725
142,722
en
train
0.91.2
One natural way to obtain a polymatroid is from a collection of flats of a matroid $M$. Indeed, every polymatroid arises in this way \cite{helg, lova, mcd}. More precisely, we have the following. \begin{theorem} \label{herepoly} Let $t$ be a function defined on the power set of a finite set $E$. Then $(E,t)$ is a polymatroid if and only if, for some matroid $M$, there is a function $\psi$ from $E$ into the set of flats of $M$ such that $t(X) = r_M(\cup_{x \in X} \psi(x))$ for all subsets $X$ of $E$. \end{theorem} The key idea in proving this theorem is that of freely adding a point to an element of a polymatroid. Let $(E,r)$ be a polymatroid, let $x$ be an element of $E$, and let $x'$ be an element that is not in $E$. We can extend the domain of $r$ to include all subsets of $E \cup x'$ by letting \begin{equation*} r(X \cup x') = \begin{cases} r(X), & \text{if $r(X \cup x) = r(X)$};\\ r(X) + 1, & \text{if $r(X \cup x) > r(X)$}. \end{cases} \end{equation*} Then it is not difficult to check that $(E \cup x', r)$ is a polymatroid. We say that it has been obtained from $(E,r)$ by {\it freely adding} $x'$ to $x$. If we repeat this construction by freely adding a new element $y'$ to some element $y$ of $E$, we can show that the order in which these two operations is performed is irrelevant. Using this idea, we can associate a matroid with every $2$-polymatroid $M$ as follows. Let $L$ be the set of lines of $M$. For each $\ell$ in $L$, freely add two points $s_{\ell}$ and $t_{\ell}$ to $\ell$. Let $M^+$ be the $2$-polymatroid obtained after performing all of these $2|L|$ operations. Let $M'$ be $M^+ \backslash L$. We call $M'$ the {\it natural matroid derived from $M$}. Given a graph $G$ with edge set $E$, as noted earlier, one can define a $2$-polymatroid $M_2(G)$ on $E$ by, for each subset $X$ of $E$, letting $r(X)$ be $|V(X)|$ where $V(X)$ is the set of vertices of $G$ that have at least one endpoint in $X$. A polymatroid $(E',r')$ is {\it Boolean} if is isomorphic to the $2$-polymatroid that is obtained in this way from some graph. One attractive feature of $M_2(G)$ is that, except for the possible presence of isolated vertices, it uniquely determines $G$. More precisely, if $G_1$ and $G_2$ are graphs neither of which has any isolated vertices and if $M_2(G_1) = M_2(G_2)$, then there is a labelling of the vertices of $G_2$ such that $G_1 = G_2$. This contrasts with the situation for matroids where quite different graphs can have the same cycle matroids. Let $M$ be a polymatroid $(E,r)$. The {\it connectivity function}, $\lambda_M$ or $\lambda$, of $M$ is defined, for all subsets $X$ of $E$, by $\lambda_M(X) = r(X) + r(E-X) - r(M)$. Observe that $\lambda_M(E-X) = \lambda_M(X)$. It is routine to check, using the submodularity of the rank function, that the connectivity function is submodular, that is, for all subsets $Y$ and $Z$ of $E$, $$\lambda_M(Y) + \lambda_M(Z) \ge \lambda_M(Y \cup Z) + \lambda_M(Y \cap Z).$$ Let $M$ be a polymatroid. For a positive integer $n$, a subset $X$ of $E(M)$ is {\it $n$-separating} if $\lambda_M(X) \le n-1$ and is {\it exactly $n$-separating} if $\lambda_M(X) = n-1$ We say that $M$ is {\it $2$-connected} if it has no proper non-empty $1$-separating subset. We will also say that $M$ is {\it disconnected} if it is not 2-connected. We call $M$ {\it $3$-connected} if $M$ is $2$-connected and $M$ has no {\it $2$-separation}, that is, $M$ has no partition $(X,Y)$ with $\max\{|X|, r(X)\} > 1$ and $\max\{|Y|, r(Y)\} > 1$ but $\lambda(X) \le 1$. When $M$ is a $3$-connected $2$-polymatroid $(E,r)$, a {\it $3$-separation} of $M$ is a partition $(X,Y)$ of $E$ such that $\lambda(X) = 2$ and both $r(X)$ and $r(Y)$ exceed $2$. Duality plays a fundamental role in matroid theory and will also be important in our work with $2$-polymatroids. Whereas there is a standard notion of what constitutes the dual of a matroid, for $2$-polymatroids, there is more than one choice. Let $M$ be a $k$-polymatroid $(E,r)$. The {\it $k$-dual} of $M$ is the pair $(E,r^*_k)$ defined by $r^*_k(Y) = k|Y| + r(E-Y) - r(M)$. This notion of duality was used, for example, in Oxley and Whittle's treatment \cite{ow2p} of Tutte invariants for $2$-polymatroids. An {\it involution} on the class ${\mathcal M}_k$ of $k$-polymatroids is a function $\zeta$ from ${\mathcal M}_k$ into ${\mathcal M}_k$ such that $\zeta(\zeta(M)) = M$ for all $M$ in ${\mathcal M}_k$. Whittle~\cite{gpw} showed that the $k$-dual is the only involution on ${\mathcal M}_k$ under which deletion and contraction are interchanged in the familiar way. However, a disadvantage of this duality operation is that, for a matroid $M$, we can view $M$ as a $k$-polymatroid for all $k\ge 1$. Hence $M$ has a $1$-dual, which is its usual matroid dual. But it also has a $2$-dual, a $3$-dual, and so on. In \cite{oswww}, we used a duality operation on the class of all polymatroids that, when applied to a $k$-polymatroid, produces another $k$-polymatroid and that, when applied to a matroid produces its usual matroid dual. In this paper, we will use a variant on that operation that agrees with it when applied to $3$-connected $2$-polymatroids with at least two elements. Both of these versions of duality are members of a family of potential duals for a polymatroid $(E,r)$ that were defined by McDiarmid~\cite{mcd} and were based on assigning a weight $w(e)$ to each element $e$ of $E$ where $w(e) \ge r(\{e\})$ for all $e$ in $E$. For a set $X$, we shall write $||X||$ for the sum $\sum_{e \in X} w(e)$. In \cite{oswww}, we took $w(e)$ to be $\max\{r(\{e\}),1\}$. Here, instead, we will take $w(e) = r(\{e\})$ and define the {\it dual} of a polymatroid $(E,r)$ to be the pair $(E,r^*)$ where, for all subsets $Y$ of $E$, $$r^*(Y) = ||Y|| + r(E- Y) - r(E) = \sum_{e \in Y} r(\{e\}) + r(E- Y) - r(E).$$ It is straightforward to check that, when $(E,r)$ is a $k$-polymatroid, so too is $(E,r^*)$. When $M = (E,r)$, we shall write $M^*$ for $(E,r^*)$. When the polymatroid $M$ is a matroid, its dual as just defined coincides with its usual matroid dual provided $M$ has no loops. However, if $e$ is a loop of $M$, then $e$ is a loop of $M^*$. The definition of dual used in \cite{oswww} (where we took $||Y|| = \sum_{e \in Y} \max\{1,r(\{e\})\}$) was chosen to ensure that, when $M$ is a matroid, its polymatroid dual coincides with its matroid dual. Here, however, we are giving up on that, albeit in a rather specialized case. Note, however, that the two definitions of dual coincide unless $M$ has a loop so, in particular, they coincide when $M$ is $3$-connected having at least two elements. Moreover, as noted in \cite{oswww}, these two versions of duality share a number of important properties, the proofs of which are very similar. For example, $\lambda_M(X) = \lambda_{M^*}(X)$. Next we discuss the reason for the use of the above definition of duality, which follows \cite{susan, jmw}. Consider the following example, which will guide how we proceed. Begin with the matroid that is the direct sum of $PG(r-1,q)$ and $PG(k-2,q)$ viewing this as a restriction of $PG(r+k-2,q)$. Let $N$ be the restriction of $PG(r-1,q)$ to the complement of a hyperplane $H$ of it, so $N {\rm co}ng AG(r-1,q)$. Take $k$ distinct points, $x_1,x_2,\ldots,x_k$, of $PG(r-1,q)$ that are in $H$, and let $\{y_1,y_2,\ldots,y_k\}$ be a spanning circuit in $PG(k-2,q)$. For each $i$ in $\{1,2,\ldots,k\}$, let $\ell_i$ be the line of $PG(r+k-2,q)$ that is spanned by $\{x_i,y_i\}$. Let $M$ be the $2$-polymatroid whose elements are the points of $N$ along with the set $L$ consisting of the lines $\ell_1,\ell_2,\ldots,\ell_k$. It is straightforward to check that $M$ and $N$ are $3$-connected. The only way to obtain an $N$-minor of $M$ is to delete all the elements of $L$ since contracting any member of $L$ has the effect of reducing the rank of $E(N)$. But, in each of the $2$-polymatroids $M\backslash L'$, where $L'$ is a proper non-empty subset of $L - \ell_k$, the set $\ell_k$ is $2$-separating. Since our goal is a splitter theorem, where we can remove some bounded number of elements from $M$ maintaining both $3$-connectivity and an $N$-minor, we will need a strategy for dealing with this example. One significant feature of this example is the very constrained nature of the 2-separations in each $M\backslash L'$ with one side of each such 2-separation consisting of a single line. This is reminiscent of what happens in Bixby's Lemma~\cite{bixby} for $3$-connected matroids where, for every element $e$ of such a matroid $N$, either $N\backslash e$ is $3$-connected except for some possible series pairs, or $N/e$ is $3$-connected except for some possible parallel pairs. Indeed, in the matroid derived from $M\backslash L'$, each 2-separating line yields a series pair in the derived matroid. The strategy that we will adopt is intimately linked to our choice of definition for the dual of a polymatroid. It is well known that, under the familiar definition of duality for matroids, taking the dual of the dual returns us to the original matroid. We now consider the relationship between a polymatroid $M$ and the polymatroid $(M^*)^*$. If $M$ is a $3$-connected $2$-polymatroid with at least two elements, then $(M^*)^* = M$. To see what happens in general, we follow \cite{jmw}. Let $M$ be the polymatroid $(E,r)$. An element $e$ of $M$ is {\it compact} if $r(\{e\}) = \lambda_M(\{e\})$ or, equivalently, if $r(E - \{e\}) = r(E)$. We call $M$ {\it compact} if every element is compact. Thus, for example, a matroid is compact if it has no coloops. In the example in the last paragraph, although $M$ is compact, $M\backslash \{\ell_1\}$ is not since, for each $i \ge 2$, we have $r(\{\ell_i\}) = 2$ whereas $\lambda_{M\backslash \{\ell_1\}}(\{\ell_i\}) = 1$. The {\it compactification} $M^{\flat}$ of the polymatroid $M$ is the pair $(E,r^{\flat})$ where $$r^{\flat}(X) = r(X) + \sum_{x \in X} [\lambda (\{x\}) - r(\{x\})]$$ for all subsets $X$ of $E$. It is shown in \cite{jmw} that $M^{\flat}$ is a compact polymatroid and it is clear that if $M$ is a $2$-polymatroid, then so is $M^{\flat}$. The next result \cite{jmw} encapsulates some key properties of this compactification operation and justifies the approach we take here. \begin{lemma} \label{compact0} Let $(E,r)$ be a polymatroid $M$. Then \begin{enumerate} \item[(i)] $M^*$ is compact; \item[(ii)] $(M^*)^* = M^{\flat}$; \item[(iii)] $\lambda_M = \lambda_{M^*} = \lambda_{M^{\flat}}$; and \item[(iv)] $M/X$ is compact for all non-empty subsets $X$ of $E$ and $$(M/X)^* = (M^*\backslash X)^{\flat}.$$ \end{enumerate} \end{lemma} Returning to our guiding example above, although $M\backslash \{\ell_1\}$ is neither compact nor $3$-connected, its compactification is both. Observe that this compactification can be obtained from the restriction of the matroid $PG(r-1,q)$ to $E(N) \cup \{x_2,x_3,\ldots,x_k\}$ by relabelling each $x_i$ by $\ell_i$ noting that these $\ell_i$ are now points rather than lines. Thus compactification here has an analogous effect to cosimplification in matroids. By incorporating compactification as part of the deletion operation, which is justified by (iv) of the last lemma, we see that, after deleting a single element, we have both maintained $3$-connectivity and kept an $N$-minor. This is precisely what we want in a splitter theorem.
3,885
142,722
en
train
0.91.3
Consider the following example, which will guide how we proceed. Begin with the matroid that is the direct sum of $PG(r-1,q)$ and $PG(k-2,q)$ viewing this as a restriction of $PG(r+k-2,q)$. Let $N$ be the restriction of $PG(r-1,q)$ to the complement of a hyperplane $H$ of it, so $N {\rm co}ng AG(r-1,q)$. Take $k$ distinct points, $x_1,x_2,\ldots,x_k$, of $PG(r-1,q)$ that are in $H$, and let $\{y_1,y_2,\ldots,y_k\}$ be a spanning circuit in $PG(k-2,q)$. For each $i$ in $\{1,2,\ldots,k\}$, let $\ell_i$ be the line of $PG(r+k-2,q)$ that is spanned by $\{x_i,y_i\}$. Let $M$ be the $2$-polymatroid whose elements are the points of $N$ along with the set $L$ consisting of the lines $\ell_1,\ell_2,\ldots,\ell_k$. It is straightforward to check that $M$ and $N$ are $3$-connected. The only way to obtain an $N$-minor of $M$ is to delete all the elements of $L$ since contracting any member of $L$ has the effect of reducing the rank of $E(N)$. But, in each of the $2$-polymatroids $M\backslash L'$, where $L'$ is a proper non-empty subset of $L - \ell_k$, the set $\ell_k$ is $2$-separating. Since our goal is a splitter theorem, where we can remove some bounded number of elements from $M$ maintaining both $3$-connectivity and an $N$-minor, we will need a strategy for dealing with this example. One significant feature of this example is the very constrained nature of the 2-separations in each $M\backslash L'$ with one side of each such 2-separation consisting of a single line. This is reminiscent of what happens in Bixby's Lemma~\cite{bixby} for $3$-connected matroids where, for every element $e$ of such a matroid $N$, either $N\backslash e$ is $3$-connected except for some possible series pairs, or $N/e$ is $3$-connected except for some possible parallel pairs. Indeed, in the matroid derived from $M\backslash L'$, each 2-separating line yields a series pair in the derived matroid. The strategy that we will adopt is intimately linked to our choice of definition for the dual of a polymatroid. It is well known that, under the familiar definition of duality for matroids, taking the dual of the dual returns us to the original matroid. We now consider the relationship between a polymatroid $M$ and the polymatroid $(M^*)^*$. If $M$ is a $3$-connected $2$-polymatroid with at least two elements, then $(M^*)^* = M$. To see what happens in general, we follow \cite{jmw}. Let $M$ be the polymatroid $(E,r)$. An element $e$ of $M$ is {\it compact} if $r(\{e\}) = \lambda_M(\{e\})$ or, equivalently, if $r(E - \{e\}) = r(E)$. We call $M$ {\it compact} if every element is compact. Thus, for example, a matroid is compact if it has no coloops. In the example in the last paragraph, although $M$ is compact, $M\backslash \{\ell_1\}$ is not since, for each $i \ge 2$, we have $r(\{\ell_i\}) = 2$ whereas $\lambda_{M\backslash \{\ell_1\}}(\{\ell_i\}) = 1$. The {\it compactification} $M^{\flat}$ of the polymatroid $M$ is the pair $(E,r^{\flat})$ where $$r^{\flat}(X) = r(X) + \sum_{x \in X} [\lambda (\{x\}) - r(\{x\})]$$ for all subsets $X$ of $E$. It is shown in \cite{jmw} that $M^{\flat}$ is a compact polymatroid and it is clear that if $M$ is a $2$-polymatroid, then so is $M^{\flat}$. The next result \cite{jmw} encapsulates some key properties of this compactification operation and justifies the approach we take here. \begin{lemma} \label{compact0} Let $(E,r)$ be a polymatroid $M$. Then \begin{enumerate} \item[(i)] $M^*$ is compact; \item[(ii)] $(M^*)^* = M^{\flat}$; \item[(iii)] $\lambda_M = \lambda_{M^*} = \lambda_{M^{\flat}}$; and \item[(iv)] $M/X$ is compact for all non-empty subsets $X$ of $E$ and $$(M/X)^* = (M^*\backslash X)^{\flat}.$$ \end{enumerate} \end{lemma} Returning to our guiding example above, although $M\backslash \{\ell_1\}$ is neither compact nor $3$-connected, its compactification is both. Observe that this compactification can be obtained from the restriction of the matroid $PG(r-1,q)$ to $E(N) \cup \{x_2,x_3,\ldots,x_k\}$ by relabelling each $x_i$ by $\ell_i$ noting that these $\ell_i$ are now points rather than lines. Thus compactification here has an analogous effect to cosimplification in matroids. By incorporating compactification as part of the deletion operation, which is justified by (iv) of the last lemma, we see that, after deleting a single element, we have both maintained $3$-connectivity and kept an $N$-minor. This is precisely what we want in a splitter theorem. In $2$-polymatroids, the behaviour of contraction differs significantly from that for matroids. In particular, consider the $2$-polymatroid $M_2(G)$ obtained from a graph $G$, where $G$ has vertex set $V$ and edge set $E$. Let $e$ be an edge of $G$. Deleting $e$ from $G$ has an unsurprising effect; specifically, $M_2(G) \backslash e = M_2(G \backslash e)$. But, to find $M_2(G)/e$, we cannot simply look at $M_2(G/e)$. In particular, what do we do with elements whose rank is reduced to zero in the contraction? To deal with this situation, it is standard to extend the definition of a graph to allow the presence of {\it free loops}, that is, edges with no endpoints. This terminology is due to Zaslavsky \cite{zas}. For a graph $G$ with free loops, the associated $2$-polymatroid $M_2(G)$ is defined, as before, to have rank function $r(X) = |V(X)|$. The deletion of a free loop $f$ from a graph just removes $f$ from the graph. We define the contraction of $f$ to be the same as its deletion. For an edge $e$ that is not a free loop, to obtain a graph $H$ so that $M_2(G) /e = M_2(H)$, we let $H$ have edge set $E - e$ and vertex set $V - V(\{e\})$. An edge $x$ of $H$ is incident with the vertices in $V(\{x\}) - V(\{e\})$. The difference between $M_2(G)/e$ and $M_2(G/e)$ motivated us to introduce an operation for $2$-polymatroids in \cite{oswww} that mimics the effect of the usual operation of contraction of an edge from the graph. Let $(E,r)$ be a $2$-polymatroid $M$, and let $x$ be an element of $E$. We have described already what it means to add an element $x'$ freely to $x$. Our new operation $M\downarrow x$ is obtained from $M$ by freely adding $x'$ to $x$ in $M$, then contracting $x'$ from the resulting extension, and finally deleting $x$. Because each of the steps in this process results in a $2$-polymatroid, we have a well-defined operation on $2$-polymatroids. When $x$ has rank at most one in $M$, one easily checks that $M\downarrow x = M/x$. When $x$ is a line in $M$, we see that $M\downarrow x$ and $M/x$ are different as their ranks are $r(M) - 1$ and $r(M) - 2$, respectively. Combining the different parts of the definition, we see that $M\downarrow x$ is the $2$-polymatroid with ground set $E - \{x\}$ and rank function given, for all subsets $X$ of $E - \{x\}$, by \begin{equation} \label{getdown} r_{M\downarrow x}(X) = \begin{cases} r(X), & \text{if $r(x) = 0$, or $r(X \cup x) > r(X)$; and}\\ r(X) - 1, & \text{otherwise.} \end{cases} \end{equation} We shall say that $M\downarrow x$ has been obtained from $M$ by {\it compressing} $x$, and $M\downarrow x$ will be called the {\it compression} of $x$. We showed in \cite{oswww} that $M_2(G)\downarrow e = M_2(G/e).$ Songbao Mo~\cite{smo} established a number of properties of a generalization of this operation that he defines for connectivity functions and calls {\it elision}. Instead of treating arbitrary minors, much of graph theory restricts attention to topological minors in which the only allowed contractions involve edges that meet vertices of degree two. When $e$ and $f$ are the only edges in a $2$-connected graph $G$ meeting a vertex $v$, and $G$ has at least four vertices, $\{e,f\}$ is a $3$-separating set in $M_2(G)$. This $3$-separating set is an example of a special type of $3$-separating set that we introduced in \cite{oswww}. In a $2$-polymatroid $M$, a 3-separating set $Z$ is {\it prickly} if it obeys the following conditions: \begin{itemize} \item[(i)] Each element of $Z$ is a line; \item[(ii)] $|Z| \ge 2$ and $\lambda(Z) = 2$; \item[(iii)] $r((E -Z) \cup Z') = r(E - Z) + |Z'|$ for all proper subsets $Z'$ of $Z$; and \item[(iv)] if $Z'$ is a non-empty subset of $Z$, then \begin{equation*} r(Z') = \begin{cases} 2 & \text{if $|Z'| = 1$};\\ |Z'| + 2 & \text{if $1 < |Z'| < |Z|$; and}\\ |Z| + 1 & \text{if $|Z'| = |Z|.$} \end{cases} \end{equation*} \end{itemize} A prickly $3$-separating set of $M$ will also be called a {\it prickly $3$-separator} of $M$. Observe that, when $Z$ is a prickly $3$-separating set, for all distinct $z$ and $z'$ in $Z$, the $2$-polymatroid $M\backslash z$ has $(\{z'\}, E - \{z,z'\})$ as a $2$-separation. We are now able to formally state the main result of \cite{oswww}. Recall that a $2$-polymatroid is {\it pure} if every individual element has rank $2$. It is {\it non-empty} if its ground set is non-empty. \begin{theorem} \label{lastmainone} Let $M$ be a $3$-connected non-empty $2$-polymatroid. Then one of the following holds. \begin{itemize} \item[(i)] $M$ has an element $e$ such that $M\backslash e$ or $M/e$ is $3$-connected; \item[(ii)] $M$ has rank at least three and is a whirl or the cycle matroid of a wheel; or \item[(iii)] $M$ is a pure $2$-polymatroid having a prickly $3$-separating set. Indeed, every minimal $3$-separating set $Z$ with at least two elements is prickly, and $M\downarrow z$ is $3$-connected and pure for all $z$ in $Z$. \end{itemize} \end{theorem} In \cite{oswww}, we gave a number of examples to show the need for the third part of the above theorem. It is worth noting here, since it contrasts with what we have already mentioned and what will feature in the main result of this paper, the operation of deletion used in the last theorem does not incorporate compactification. In the main result of this paper, we will incorporate compactification as part of deletion but we will no longer need to allow arbitrary prickly compressions, only those that arise from a $2$-element prickly $3$-separator. Let $Z$ be such a set in a $2$-polymatroid $M$. For $z$ in $Z$, we will call $M\downarrow z$ a {\it series compression} of $M$. For a compact $2$-polymatroid $M_1$, we call $M_2$ an {\it s-minor} of $M_1$ if $M_2$ can be obtained from $M_1$ by a sequence of contractions, deletions followed by compactifications, and series compressions. The next result is the main theorem of the paper. It concerns s-minors of 3-connected 2-polymatroids. Such a 2-polymatroid is compact provided it has at least three elements. \begin{theorem} \label{mainone} Let $M$ be a $3$-connected $2$-polymatroid and $N$ be a $3$-connected proper s-minor of $M$ having at least four elements. Then one of the following holds. \begin{itemize} \item[(i)] $M$ has an element $e$ such that $M/ e$ is $3$-connected having an s-minor isomorphic to $N$; or \item[(ii)] $M$ has an element $e$ such that $(M\backslash e)^{\flat} $ is $3$-connected having an s-minor isomorphic to $N$; or \item[(iii)] $M$ has a two-element prickly $3$-separating set $Z$ such that, for each $z$ in $Z$, the series compression $M\downarrow z$ is $3$-connected having an s-minor isomorphic to $N$; or \item[(iv)] $r(M) \ge 3$ and $M$ is a whirl or the cycle matroid of a wheel. \end{itemize} \end{theorem} For compact $2$-polymatroids $M_1$ and $M_2$, we call $M_2$ a {\it c-minor} of $M_1$ if $M_2$ can be obtained from $M_1$ by a sequence of operations each consisting of a contraction or of a deletion followed by a compactification. As we shall show in Section~\ref{redc}, the last theorem can be proved by establishing the following result.
3,890
142,722
en
train
0.91.4
Instead of treating arbitrary minors, much of graph theory restricts attention to topological minors in which the only allowed contractions involve edges that meet vertices of degree two. When $e$ and $f$ are the only edges in a $2$-connected graph $G$ meeting a vertex $v$, and $G$ has at least four vertices, $\{e,f\}$ is a $3$-separating set in $M_2(G)$. This $3$-separating set is an example of a special type of $3$-separating set that we introduced in \cite{oswww}. In a $2$-polymatroid $M$, a 3-separating set $Z$ is {\it prickly} if it obeys the following conditions: \begin{itemize} \item[(i)] Each element of $Z$ is a line; \item[(ii)] $|Z| \ge 2$ and $\lambda(Z) = 2$; \item[(iii)] $r((E -Z) \cup Z') = r(E - Z) + |Z'|$ for all proper subsets $Z'$ of $Z$; and \item[(iv)] if $Z'$ is a non-empty subset of $Z$, then \begin{equation*} r(Z') = \begin{cases} 2 & \text{if $|Z'| = 1$};\\ |Z'| + 2 & \text{if $1 < |Z'| < |Z|$; and}\\ |Z| + 1 & \text{if $|Z'| = |Z|.$} \end{cases} \end{equation*} \end{itemize} A prickly $3$-separating set of $M$ will also be called a {\it prickly $3$-separator} of $M$. Observe that, when $Z$ is a prickly $3$-separating set, for all distinct $z$ and $z'$ in $Z$, the $2$-polymatroid $M\backslash z$ has $(\{z'\}, E - \{z,z'\})$ as a $2$-separation. We are now able to formally state the main result of \cite{oswww}. Recall that a $2$-polymatroid is {\it pure} if every individual element has rank $2$. It is {\it non-empty} if its ground set is non-empty. \begin{theorem} \label{lastmainone} Let $M$ be a $3$-connected non-empty $2$-polymatroid. Then one of the following holds. \begin{itemize} \item[(i)] $M$ has an element $e$ such that $M\backslash e$ or $M/e$ is $3$-connected; \item[(ii)] $M$ has rank at least three and is a whirl or the cycle matroid of a wheel; or \item[(iii)] $M$ is a pure $2$-polymatroid having a prickly $3$-separating set. Indeed, every minimal $3$-separating set $Z$ with at least two elements is prickly, and $M\downarrow z$ is $3$-connected and pure for all $z$ in $Z$. \end{itemize} \end{theorem} In \cite{oswww}, we gave a number of examples to show the need for the third part of the above theorem. It is worth noting here, since it contrasts with what we have already mentioned and what will feature in the main result of this paper, the operation of deletion used in the last theorem does not incorporate compactification. In the main result of this paper, we will incorporate compactification as part of deletion but we will no longer need to allow arbitrary prickly compressions, only those that arise from a $2$-element prickly $3$-separator. Let $Z$ be such a set in a $2$-polymatroid $M$. For $z$ in $Z$, we will call $M\downarrow z$ a {\it series compression} of $M$. For a compact $2$-polymatroid $M_1$, we call $M_2$ an {\it s-minor} of $M_1$ if $M_2$ can be obtained from $M_1$ by a sequence of contractions, deletions followed by compactifications, and series compressions. The next result is the main theorem of the paper. It concerns s-minors of 3-connected 2-polymatroids. Such a 2-polymatroid is compact provided it has at least three elements. \begin{theorem} \label{mainone} Let $M$ be a $3$-connected $2$-polymatroid and $N$ be a $3$-connected proper s-minor of $M$ having at least four elements. Then one of the following holds. \begin{itemize} \item[(i)] $M$ has an element $e$ such that $M/ e$ is $3$-connected having an s-minor isomorphic to $N$; or \item[(ii)] $M$ has an element $e$ such that $(M\backslash e)^{\flat} $ is $3$-connected having an s-minor isomorphic to $N$; or \item[(iii)] $M$ has a two-element prickly $3$-separating set $Z$ such that, for each $z$ in $Z$, the series compression $M\downarrow z$ is $3$-connected having an s-minor isomorphic to $N$; or \item[(iv)] $r(M) \ge 3$ and $M$ is a whirl or the cycle matroid of a wheel. \end{itemize} \end{theorem} For compact $2$-polymatroids $M_1$ and $M_2$, we call $M_2$ a {\it c-minor} of $M_1$ if $M_2$ can be obtained from $M_1$ by a sequence of operations each consisting of a contraction or of a deletion followed by a compactification. As we shall show in Section~\ref{redc}, the last theorem can be proved by establishing the following result. \begin{theorem} \label{modc0} Let $M$ be a $3$-connected $2$-polymatroid and $N$ be a $3$-connected proper c-minor of $M$ having at least four elements. Then one of the following holds. \begin{itemize} \item[(i)] $M$ has an element $e$ such that $M/ e$ is $3$-connected having a c-minor isomorphic to $N$; or \item[(ii)] $M$ has an element $e$ such that $(M\backslash e)^{\flat} $ is $3$-connected having a c-minor isomorphic to $N$; or \item[(iii)] $M$ has a prickly $3$-separator $\{y,z\}$ such that $M\downarrow y$ is $3$-connected having a c-minor isomorphic to $N$; or \item[(iv)] $r(M) \ge 3$ and $M$ is a whirl or the cycle matroid of a wheel. \end{itemize} \end{theorem} The paper is structured as follows. The next section includes some basic preliminaries. In Sections~\ref{clc} and \ref{pc2s}, we develop a number of results relating to connectivity and local connectivity, and to parallel connection and 2-sums. In Section~\ref{strat}, we describe the strategy for proving Theorem~\ref{mainone}. That section serves as a guide to the remaining sections of the paper, with the purpose of each of these sections being to complete an identified step in the proof. Section~\ref{redc} plays an important role in this proof by showing that the main theorem can be proved by adding the assumption that all series compressions are performed last in the production of an s-minor of $M$ isomorphic to $N$. That result is helpful but it cannot obscure the fact that the proof of Theorem~\ref{mainone} is complex with some subtleties in the logic that need to be carefully negotiated.
1,922
142,722
en
train
0.91.5
\section{Preliminaries} \label{prelim} Much of the terminology for matroids carries over to $2$-polymatroids. For example, suppose $x$ and $y$ are distinct points of a $2$-polymatroid $M$, that is, $r(\{x\}) = 1 = r(\{y\})$. If $r(\{x,y\}) = 1$, then $x$ and $y$ are {\it parallel points} of $M$. On the other hand, if $r(E - \{x,y\}) = r(E) -1 < r(E - x) = r(E-y)$, then $\{x,y\}$ is a {\it series pair of points} of $M$. Evidently, if $\{x,y\}$ is a parallel or series pair of points, then $\lambda_M(\{x,y\}) \le 1$. If $x$ and $y$ are distinct lines of $M$ and $r(\{x,y\}) = 2$, then $x$ and $y$ are {\it parallel lines} of $M$. One tool that is used repeatedly in our earlier work is the submodularity of the connectivity function. Once again, this will play a vital role here. Partitions $(X_1,X_2)$ and $(Y_1,Y_2)$ of a set $E$ are said to {\it cross} if all four of the sets \linebreak $X_1\cap Y_1,$ $X_1 \cap Y_2, X_2 \cap Y_1$, and $X_2 \cap Y_2$ are non-empty. We shall frequently encounter crossing partitions of the ground set of a $2$-polymatroid. We shall use the term {\it by uncrossing} to refer to an application of the submodularity of the connectivity function. In this paper, we shall frequently switch between considering the deletion $M\backslash X$ of a set $X$ of elements of a $2$-polymatroid $M$ and the compactification $(M\backslash X)^{\flat}$ of this deletion, which we shall sometimes call the {\it compactified deletion of $X$}. We shall often use the following abbreviated notation for the latter: $$(M\backslash X)^{\flat} = M\backslashba X.$$ We shall often encounter the situation when we have a $2$-polymatroid $M$ such that $M^{\flat}$ is $3$-connected although $M$ itself is not. This occurs when $M$ has a line $\ell$ such that $(\{\ell\},E - \ell)$ is a $2$-separation. We call such a $2$-separation of $M$ {\it trivial}. Thus, in general, a partition $(X,Y)$ of $E$ is a {\it non-trivial $2$-separation} of $M$ if $\lambda_M(X) \le 1$ and $\min\{|X|,|Y|\} \ge 2$. For a $2$-polymatroid $M$, we recall that a minor of $M$ is any $2$-polymatroid that can be obtained from $M$ by a sequence of contractions and deletions where, here, deletions are not automatically accompanied by compactifications. When $M$ and $N$ are compact, we defined $N$ to be a c-minor of $M$ if it can be obtained from $M$ by a sequence of contractions and deletions followed by compactifications. In the proof of Theorem~\ref{modc0}, it is convenient to be able to separate the compactifications from the deletions. Thus we define a {\it c-minor} of an arbitrary 2-polymatroid $M$ to be any $2$-polymatroid that can be obtained from $M$ by a sequence of contractions, deletions, and compactifications. As we shall show in Corollary~\ref{complast2}, this extension of the definition is consistent with our original definition. For a $2$-polymatroid $N$, a {\it special $N$-minor} of $M$ is any c-minor of $M$ that is either equal to $N$ or differs from $N$ by having a single point relabelled. \begin{lemma} \label{complast} Let $P$ and $Q$ be $2$-polymatroids such that $Q$ can be obtained from $P$ by a sequence of deletions, contractions, and compactifications with the last move being a compactification. Then $Q$ can be obtained from $P$ by the same sequence of deletions and contractions with none of the compactifications being done except for the last move. \end{lemma} To prove this lemma, we shall require a preliminary result. \begin{lemma} \label{complast1} Let $P$ be the $2$-polymatroid $(E,r)$. For $A \subseteq E$, \begin{itemize} \item[(i)] $(P^{\flat}\backslash A)^{\flat} = (P\backslash A)^{\flat}$; and \item[(ii)] $(P^{\flat}/ A)^{\flat} = (P/ A)^{\flat}$. \end{itemize} \end{lemma} \begin{proof} Let $P_1$ be a $2$-polymatroid with ground set $E$ and rank function $r_1$. Then, for $X \subseteq E - A$, we have \begin{eqnarray} \label{xae} r_{(P_1 \backslash A)^{\flat}}(X) & = &r_{P_1 \backslash A}(X) + \sum_{x \in X} [\lambda_{P_1 \backslash A}(\{x\}) - r_{P_1\backslash A}(\{x\})] \nonumber \\ &=& r_1(X) + \sum_{x \in X} [r_1(E - A - x) - r_1(E - A)]. \end{eqnarray} Thus \begin{equation} \label{xae0} r_{(P \backslash A)^{\flat}}(X) = r(X) + \sum_{x \in X} [r(E - A - x) - r(E - A)]. \end{equation} Next we observe that, for $x$ in $X$, \begin{align} \label{xae1} r_{P^{\flat}}(E-A - x) - r_{P^{\flat}}(E-A) & = r(E - A - x) + \sum_{y \in E-A-x} [\lambda(\{y\}) - r(\{y\})] \nonumber \\ & \hspace*{0.75in} - r(E - A) - \sum_{y \in E-A} [\lambda(\{y\}) - r(\{y\})] \nonumber \\ & = r(E - A - x) - r(E - A) - \lambda(\{x\}) + r(\{x\}). \end{align} Thus, by (\ref{xae}), (\ref{xae1}), and (\ref{xae0}), \begin{align*} r_{(P^{\flat} \backslash A)^{\flat}}(X) &= r_{P^{\flat}}(X) + \sum_{x \in X} [r_{P^{\flat}}(E - A - x) - r_{P^{\flat}}(E - A)]\\ & = r(X) + \sum_{x \in X} [\lambda(\{x\}) - r(\{x\}) +r(E - A - x) - r(E - A) \\ & \hspace*{2.5in} - \lambda(\{x\}) + r(\{x\})]\\ & = r(X) + \sum_{x \in X} [r(E - A - x) - r(E - A)]\\ & = r_{(P \backslash A)^{\flat}}(X). \end{align*} We conclude that (i) holds. Again, for $X \subseteq E - A$, we have \begin{eqnarray} \label{xae2} r_{(P_1 / A)^{\flat}}(X) & = &r_{P_1 / A}(X) + \sum_{x \in X} [\lambda_{P_1 / A}(\{x\}) - r_{P_1/ A}(\{x\})] \nonumber \\ &=& r_1(X \cup A) -r_1(A) + \sum_{x \in X} [r_{P_1 / A}(E - A - x) - r_{P_1 / A}(E - A)] \nonumber \\ & = & r_1(X \cup A) -r_1(A) + \sum_{x \in X} [r_{1}(E - x) - r_{1}(E)]. \end{eqnarray} Thus \begin{equation} \label{xae3} r_{(P / A)^{\flat}}(X) = r(X \cup A) -r(A) + \sum_{x \in X} [r(E - x) - r(E)]. \end{equation} Therefore, by (\ref{xae2}), (\ref{xae1}), and (\ref{xae3}) \begin{align*} r_{(P^{\flat} /A)^{\flat}}(X) &= r_{P^{\flat}}(X \cup A) -r_{P^{\flat}}(A) + \sum_{x \in X} [r_{P^{\flat}}(E - x) - r_{P^{\flat}}(E)]\\ & = r(X \cup A) -r (A) + \sum_{x \in X} [ \lambda(\{x\}) - r(\{x\}) +r(E - x) - r(E)\\ & \hspace*{2.6in} - \lambda(\{x\}) + r(\{x\})]\\ & = r(X \cup A) -r (A) + \sum_{x \in X} [r(E - x) - r(E)]\\ & = r_{(P / A)^{\flat}}(X). \end{align*} Hence (ii) holds. \end{proof} \begin{proof}[Proof of Lemma~\ref{complast}.] We may assume that there are disjoint subsets $A_1,A_2,\ldots,A_n$ of $E$ such that, in forming $Q$ from $P$, these sets are removed in order via deletion or contraction with the possibility that, after each such move, a compactification is performed. To prove the lemma, we argue by induction on $n$. It follows immediately from Lemma~\ref{complast1} that the lemma holds if $n= 1$. Assume the result holds for $n < m$ and let $n = m\ge 2$. Then there is a $2$-polymatroid $R$ such that $Q$ is $(R \backslash A_n)^{\flat}$ or $(R / A_n)^{\flat}$, so, by Lemma~\ref{complast1}, $Q$ is $(R^{\flat} \backslash A_n)^{\flat}$ or $(R^{\flat} / A_n)^{\flat}$, respectively. In forming $R$, a certain sequence of deletions, contractions, and compactifications is performed. Let $R_0$ be the $2$-polymatroid that is obtained from $P$ by performing the same sequence of operations except for the compactifications. Then, by the induction assumption, $R^{\flat} = R_0^{\flat}$. Since $(R^{\flat} \backslash A_n)^{\flat} = (R_0^{\flat} \backslash A_n)^{\flat} = (R_0 \backslash A_n)^{\flat}$ and $(R^{\flat} / A_n)^{\flat} =(R_0^{\flat} / A_n)^{\flat} = (R_0 / A_n)^{\flat}$, the lemma follows by induction. \end{proof} The following are straightforward consequences of Lemma~\ref{complast}. We prove only the second of these. \begin{corollary} \label{complast3} Let $P$ and $Q$ be $2$-polymatroids such that $Q$ is compact. Then $Q$ is a c-minor of $P$ if and only if $Q$ can be obtained from $P$ by a sequence of deletions and contractions followed by a single compactification. \end{corollary} \begin{corollary} \label{complast2} Let $P$ and $Q$ be compact $2$-polymatroids. Then $Q$ is a c-minor of $P$ if and only if $Q$ can be obtained from $P$ by a sequence of operations each of which consists of either a contraction or a deletion followed by a compactification. \end{corollary} \begin{proof} We need to show that if $Q$ is a c-minor of $P$, then $Q$ can be obtained as described. Now $Q^{\flat} = Q$. Thus, by Lemma~\ref{complast}, $Q$ can be obtained from $P$ by a sequence of deletions and contractions with one compactification being done as the final move. By Lemma~\ref{complast1}, we can perform a compactification after each deletion and still obtain $Q$ at the end of the process. Since $P$ is compact and each contraction of a compact $2$-polymatroid is compact, we retain compactness throughout this sequence of moves, so the result holds. \end{proof} \begin{lemma} \label{compact2} Let $M$ be a polymatroid. Then $$(M^{\flat})^* = M^* = (M^*)^{\flat}.$$ \end{lemma} \begin{proof} By Lemma~\ref{compact0}(i), $M^*$ is compact, so $M^* = (M^*)^{\flat}.$ Also, by Lemma~\ref{compact0}(ii), $(M^{\flat})^* = ((M^*)^*)^* = (M^*)^{\flat}$. \end{proof}
3,501
142,722
en
train
0.91.6
\begin{proof}[Proof of Lemma~\ref{complast}.] We may assume that there are disjoint subsets $A_1,A_2,\ldots,A_n$ of $E$ such that, in forming $Q$ from $P$, these sets are removed in order via deletion or contraction with the possibility that, after each such move, a compactification is performed. To prove the lemma, we argue by induction on $n$. It follows immediately from Lemma~\ref{complast1} that the lemma holds if $n= 1$. Assume the result holds for $n < m$ and let $n = m\ge 2$. Then there is a $2$-polymatroid $R$ such that $Q$ is $(R \backslash A_n)^{\flat}$ or $(R / A_n)^{\flat}$, so, by Lemma~\ref{complast1}, $Q$ is $(R^{\flat} \backslash A_n)^{\flat}$ or $(R^{\flat} / A_n)^{\flat}$, respectively. In forming $R$, a certain sequence of deletions, contractions, and compactifications is performed. Let $R_0$ be the $2$-polymatroid that is obtained from $P$ by performing the same sequence of operations except for the compactifications. Then, by the induction assumption, $R^{\flat} = R_0^{\flat}$. Since $(R^{\flat} \backslash A_n)^{\flat} = (R_0^{\flat} \backslash A_n)^{\flat} = (R_0 \backslash A_n)^{\flat}$ and $(R^{\flat} / A_n)^{\flat} =(R_0^{\flat} / A_n)^{\flat} = (R_0 / A_n)^{\flat}$, the lemma follows by induction. \end{proof} The following are straightforward consequences of Lemma~\ref{complast}. We prove only the second of these. \begin{corollary} \label{complast3} Let $P$ and $Q$ be $2$-polymatroids such that $Q$ is compact. Then $Q$ is a c-minor of $P$ if and only if $Q$ can be obtained from $P$ by a sequence of deletions and contractions followed by a single compactification. \end{corollary} \begin{corollary} \label{complast2} Let $P$ and $Q$ be compact $2$-polymatroids. Then $Q$ is a c-minor of $P$ if and only if $Q$ can be obtained from $P$ by a sequence of operations each of which consists of either a contraction or a deletion followed by a compactification. \end{corollary} \begin{proof} We need to show that if $Q$ is a c-minor of $P$, then $Q$ can be obtained as described. Now $Q^{\flat} = Q$. Thus, by Lemma~\ref{complast}, $Q$ can be obtained from $P$ by a sequence of deletions and contractions with one compactification being done as the final move. By Lemma~\ref{complast1}, we can perform a compactification after each deletion and still obtain $Q$ at the end of the process. Since $P$ is compact and each contraction of a compact $2$-polymatroid is compact, we retain compactness throughout this sequence of moves, so the result holds. \end{proof} \begin{lemma} \label{compact2} Let $M$ be a polymatroid. Then $$(M^{\flat})^* = M^* = (M^*)^{\flat}.$$ \end{lemma} \begin{proof} By Lemma~\ref{compact0}(i), $M^*$ is compact, so $M^* = (M^*)^{\flat}.$ Also, by Lemma~\ref{compact0}(ii), $(M^{\flat})^* = ((M^*)^*)^* = (M^*)^{\flat}$. \end{proof} \begin{lemma} \label{csm} Let $P$ and $Q$ be $2$-polymatroids, where $Q$ is compact. Then $P$ has a c-minor isomorphic to $Q$ if and only if $P^*$ has a c-minor isomorphic to $Q^*$. \end{lemma} \begin{proof} Suppose $P$ has a c-minor isomorphic to $Q$. By Corollary~\ref{complast3}, $Q$ can be obtained from $P$ by a sequence of deletions and contractions with one compactification being done as the final move. By Lemma~\ref{complast1}, we can perform a compactification after each deletion and after each contraction and still obtain $Q$ at the end of the process. Indeed, since $(P^{\flat}\backslash A)^{\flat} = (P\backslash A)^{\flat}$ and $(P^{\flat}/ A)^{\flat} = (P/ A)^{\flat}$, we see that $P^{\flat}$ has a c-minor isomorphic to $Q$. Thus we may assume that, in forming $Q$ from $P^{\flat}$, we remove, in order, disjoint sets $A_1, A_2,\ldots, A_n$ where each such removal is followed by a compactification. To prove that $P^*$ has a c-minor isomorphic to $Q^*$, we shall argue by induction on $n$. Suppose $n = 1$. Then $Q$ is $(P^{\flat}\backslash A_1)^{\flat}$ or $(P^{\flat}/ A_1)^{\flat}$. Then, by Lemmas~\ref{compact0} and \ref{compact2}, $$((P^{\flat}\backslash A_1)^{\flat})^* = (((P^*)^*\backslash A_1)^{\flat})^*= ((P^*/A_1)^*)^* = (P^*/A_1)^{\flat} = P^*/A_1$$ and $$((P^{\flat}/ A_1)^{\flat})^* = (P^{\flat}/ A_1)^* = ((P^{\flat})^*\backslash A_1)^{\flat} = (P^*\backslash A_1)^{\flat}.$$ Since $Q$ is compact, we deduce that the result holds for $n = 1$. Assume it holds for $n< k$ and let $n = k \ge 2$. Then there is a compact $2$-polymatroid $R$ that is a c-minor of $P$ such that $Q$ is $(R\backslash A_n)^{\flat}$ or $(R/A_n)^{\flat}$. By the induction assumption, $R^*$ is a c-minor of $P^*$, and $Q^*$ is a c-minor of $R^*$. Hence $Q^*$ is a c-minor of $P^*$. For the converse, we note that, by what we have just proved, if $Q^*$ is a c-minor of $P^*$, then $(Q^*)^*$ is a c-minor of $(P^*)^*$, that is, $Q^{\flat}$ is a c-minor of $P^{\flat}$. But $Q$ is compact so $Q$ is a c-minor of $P^{\flat}$. Hence $Q$ is a c-minor of $P$. \end{proof} When we compactify a $2$-polymatroid, loosely speaking what we are doing is dealing simultaneously with a number of $2$-separations. It will be helpful to be able to treat these $2$-separations one at a time. In the introduction, we defined the compression $M\downarrow x$ for an element $x$ of a $2$-polymatroid $M$. Ultimately, that operation removes $x$. Let $M\underline{\downarrow} x$ be the $2$-polymatroid that is obtained from $M$ by freely adding an element $x'$ on $x$ and then contracting $x'$. Thus $M\underline{\downarrow} x$ has ground set $E$ and rank function given, for all subsets $X$ of $E$, by \begin{equation} \label{getdown4 } r_{M\underline{\downarrow} x}(X) = \begin{cases} r(X), & \text{if $r(x) = 0$, or $r(X \cup x) > r(X)$; and}\\ r(X) - 1, & \text{otherwise.} \end{cases} \end{equation} We shall say that $M\underline{\downarrow} x$ has been obtained by {\it compactifying} $x$. Evidently $$M\downarrow x = (M{\underline{\downarrow}\,} x) \backslash x.$$ \begin{lemma} \label{compel} Let $M$ be a $2$-connected $2$-polymatroid that is not compact. Let $Z$ be the set of lines $z$ of $M$ such that $\lambda(\{z\}) = 1$. Then $$M^{\flat} = ((\ldots((M\underline{\downarrow} z_1)\underline{\downarrow} z_2)\ldots)\underline{{\downarrow}} z_n)$$ where $Z = \{z_1,z_2,\ldots,z_n\}$. \end{lemma} \begin{proof} We argue by induction on $n$. Suppose $n = 1$. Let $X \subseteq E(M)$. Then \begin{equation*} \label{getdown2} r_{M^{\flat}}(X) = \begin{cases} r(X), & \text{if $z_1 \not \in X$; and}\\ r(X) - 1, & \text{otherwise.} \end{cases} \end{equation*} On the other hand, \begin{equation*} \label{getdown3} r_{M\underline{\downarrow} z_1}(X) = \begin{cases} r(X), & \text{if $r(X \cup z_1) > r(X)$; and}\\ r(X) - 1, & \text{otherwise.} \end{cases} \end{equation*} The result is easily checked in this case. Now assume that $n \ge 2$ and that the lemma holds if $|Z| \le n-1$. Let $M_1 = M\underline{\downarrow} z_1$. Then $M_1$ is easily shown to be $2$-connected having $\{z_2,z_3,\ldots,z_n\}$ as its set of lines $z$ for which $\lambda_{M_1}(\{z\}) = 1$. Thus, by the induction assumption, $$M_1^{\flat} = ((\ldots((M_1\underline{\downarrow} z_2)\underline{\downarrow} z_3)\ldots)\underline{{\downarrow}} z_n).$$ Since $M_1 = M\underline{\downarrow} z_1$, it suffices to show that $M_1^{\flat} = M^{\flat}.$ Suppose $X \subseteq E$. Then $$r^{\flat}(X) = r(X) + \sum_{x \in X}(\lambda(\{x\}) - r(\{x\})).$$ Now \begin{equation*} \label{getdown5.1} r_{M_1}(X) = \begin{cases} r(X), & \text{if $r(X \cup z_1) > r(X)$; and}\\ r(X) - 1, & \text{otherwise.} \end{cases} \end{equation*} Thus \begin{equation*} \label{getdown6} r_{M_1}(X) = \begin{cases} r(X), & \text{if $z_1 \not \in X$; and}\\ r(X) - 1, & \text{otherwise.} \end{cases} \end{equation*} Hence \begin{eqnarray*} r_{M_1^{\flat}}(X) & = & r_{M_1}(X) + \sum_{x \in X}(\lambda_{M_1}(\{x\}) - r_{M_1}(\{x\}))\\ & = & r_{M_1}(X) + \sum_{x \in X \cap (Z - z_1)}(\lambda_{M}(\{x\}) - r_{M}(\{x\}))\\ & = & r_{M}(X) + \sum_{x \in X \cap Z}(\lambda_{M}(\{x\}) - r_{M}(\{x\}))\\ & = & r^{\flat}(X). \end{eqnarray*} We conclude, by induction, that the lemma holds. \end{proof} We will need some elementary properties of deletion, contraction, and series compression. \begin{lemma} \label{elemprop} Let $A$ and $B$ be disjoint subsets of the ground set $E$ of a $2$-polymatroid $P$. Then \begin{itemize} \item[(i)] $P/A/B = P/ (A \cup B) = P/B/A$; \item[(ii)] $P\backslashba A\backslashba B = P\backslashba (A \cup B) = P\backslashba B \backslashba A$; and \item[(iii)] $P/ A\backslashba B = P\backslashba B / A$. \end{itemize} \end{lemma} \begin{proof} Because the proofs of all three parts are routine, we only include a proof of (iii). Suppose $X \subseteq E- (A \cup B)$. Then \begin{align*} r_{P/A\backslashba B}(X) &= r_{((P/A)\backslash B)^{\flat}}(X)\\ &= r_{P/A}(X) + \sum_{x \in X}[\lambda_{P/A\backslash B}(\{x\}) - r_{P/A\backslash B}(\{x\})]\\ & = r_{P/A}(X) + \sum_{x \in X}[r_{P/A}(E-A-B-x) - r_{P/A}(E-A-B)]\\ & = r(X\cup A) - r(A) + \sum_{x \in X}[r(E-B-x) - r(E-B)]\\ & = r(X\cup A) - r(A) + \sum_{x \in X}[\lambda_{P\backslash B}(\{x\}) - r_{P\backslash B}(\{x\})]\\ & = r_{P\backslashba B}(X\cup A) - r_{P\backslashba B}(A)\\ & = r_{P\backslashba B/A}(X). \end{align*} We conclude that (iii) holds. \end{proof} The remainder of this section presents a number of basic properties of 2-element prickly 3-separators and of the compression operation. \begin{lemma} \label{atlast} Let $P$ be a $2$-polymatroid having $j$ and $k$ as lines and with $r(\{j,k\}) = 3$. Suppose $X \subseteq E(P) - k$ and $j \in X$. Then $r_{P\downarrow k}(X) = r(X \cup k) - 1$. \end{lemma} \begin {proof} By definition, \begin{equation*} r_{P\downarrow k}(X) = \begin{cases} r(X), & \text{if $r(X \cup k) > r(X)$;}\\ r(X) - 1, & \text{otherwise.} \end{cases} \end{equation*} As $j \in X$ and $\sqcap(j,k) = 1$, it follows that $r(X \cup k)$ is $r(X)$ or $r(X) + 1$. It follows that $r_{P\downarrow k}(X) = r(X \cup k) - 1$. \end{proof}
3,930
142,722
en
train
0.91.7
We will need some elementary properties of deletion, contraction, and series compression. \begin{lemma} \label{elemprop} Let $A$ and $B$ be disjoint subsets of the ground set $E$ of a $2$-polymatroid $P$. Then \begin{itemize} \item[(i)] $P/A/B = P/ (A \cup B) = P/B/A$; \item[(ii)] $P\backslashba A\backslashba B = P\backslashba (A \cup B) = P\backslashba B \backslashba A$; and \item[(iii)] $P/ A\backslashba B = P\backslashba B / A$. \end{itemize} \end{lemma} \begin{proof} Because the proofs of all three parts are routine, we only include a proof of (iii). Suppose $X \subseteq E- (A \cup B)$. Then \begin{align*} r_{P/A\backslashba B}(X) &= r_{((P/A)\backslash B)^{\flat}}(X)\\ &= r_{P/A}(X) + \sum_{x \in X}[\lambda_{P/A\backslash B}(\{x\}) - r_{P/A\backslash B}(\{x\})]\\ & = r_{P/A}(X) + \sum_{x \in X}[r_{P/A}(E-A-B-x) - r_{P/A}(E-A-B)]\\ & = r(X\cup A) - r(A) + \sum_{x \in X}[r(E-B-x) - r(E-B)]\\ & = r(X\cup A) - r(A) + \sum_{x \in X}[\lambda_{P\backslash B}(\{x\}) - r_{P\backslash B}(\{x\})]\\ & = r_{P\backslashba B}(X\cup A) - r_{P\backslashba B}(A)\\ & = r_{P\backslashba B/A}(X). \end{align*} We conclude that (iii) holds. \end{proof} The remainder of this section presents a number of basic properties of 2-element prickly 3-separators and of the compression operation. \begin{lemma} \label{atlast} Let $P$ be a $2$-polymatroid having $j$ and $k$ as lines and with $r(\{j,k\}) = 3$. Suppose $X \subseteq E(P) - k$ and $j \in X$. Then $r_{P\downarrow k}(X) = r(X \cup k) - 1$. \end{lemma} \begin {proof} By definition, \begin{equation*} r_{P\downarrow k}(X) = \begin{cases} r(X), & \text{if $r(X \cup k) > r(X)$;}\\ r(X) - 1, & \text{otherwise.} \end{cases} \end{equation*} As $j \in X$ and $\sqcap(j,k) = 1$, it follows that $r(X \cup k)$ is $r(X)$ or $r(X) + 1$. It follows that $r_{P\downarrow k}(X) = r(X \cup k) - 1$. \end{proof} \begin{lemma} \label{atlast2} Let $P$ be a $2$-polymatroid having $j$ and $k$ as lines and with $r(\{j,k\}) = 3$. Suppose $\ell$ is a line of $P$ that is not in $\{j,k\}$ and is not parallel to $k$. Then $\{\ell\}$ is $2$-separating in $P$ if and only if it is $2$-separating in $P\downarrow k$. \end{lemma} \begin {proof} Clearly $\{\ell\}$ is $2$-separating in $P$ if and only if $r(E- \ell) \le r(E) - 1$. Since $\ell$ is not parallel to $k$, we see that $r_{P\downarrow k}(\ell) = r(\ell) = 2$. Now $\{\ell\}$ is $2$-separating in $P\downarrow k$ if and only if $r_{P\downarrow k}(E- \{k,\ell\}) \le r_{P\downarrow k}(E- k) - 1$. By Lemma~\ref{atlast}, the last inequality holds if and only if $r(E - \ell) - 1 \le r(E) - 1 - 1.$ We conclude that the lemma holds. \end{proof} \begin{lemma} \label{symjk} Let $\{j,k\}$ be a prickly $3$-separator in a $2$-polymatroid $P$. Then $P\downarrow j$ can be obtained from $P\downarrow k$ by relabelling $j$ as $k$. \end{lemma} \begin{proof} Suppose $X \subseteq E - \{j,k\}$. Then, since both $r(X \cup j)$ and $r(X \cup k)$ exceed $r(X)$, $$r_{P\downarrow j}(X) = r_P(X) = r_{P\downarrow k}(X).$$ Now, as $\sqcap(j,k) = 1$, it follows that either $r(X \cup j \cup k) = r(X \cup j) + 1$, or $r(X \cup j \cup k) = r(X \cup j)$. Thus $r_{P\downarrow k}(X \cup j) = r(X \cup j \cup k) - 1$. By symmetry, $r_{P\downarrow j}(X \cup k) = r(X \cup j \cup k) - 1$, and the lemma follows. \end{proof} The first part of the next lemma was proved by Jowett, Mo, and Whittle~\cite[Lemma 3.6]{jmw}. \begin{lemma} \label{elemprop23} Let $P$ be a compact polymatroid $(E,r)$. For $A\subseteq E$, \begin{itemize} \item[(i)] $P/A$ is compact; and \item[(ii)] if $P$ is a $2$-polymatroid and $\{j,k\}$ is a prickly $3$-separator of $P$, then $P\downarrow k$ is compact. \end{itemize} \end{lemma} \begin{proof} We prove (ii). It suffices to show that $r_{P\downarrow k}(E - k - y) = r_{P\downarrow k}(E-k)$ for all $y$ in $E - k$. Since $P$ is compact, $r(E- k) = r(E)$, so $r_{P\downarrow k}(E-k) = r(E) - 1$. Now \begin{equation*} \label{getdown5} r_{P\downarrow k}(E - k - y) = \begin{cases} r(E-k-y), & \text{if $r(E-y) -1 \ge r(E-y - k)$; and}\\ r(E-k-y) - 1, & \text{otherwise.} \end{cases} \end{equation*} It follows that $r_{P\downarrow k}(E - k - y) = r(E) - 1 = r_{P\downarrow k}(E-k)$ unless $r(E- k - y) = r(E-y) - 2$. Consider the exceptional case. Evidently $y \neq j$ as $r(E- k - j) = r(E-j) - 1$. Thus $j \in E- k -y$. Since $\sqcap(j,k) = 1$, it follows that $r(E- y) \le r(E-k-y) + 1$. This contradiction completes the proof of (ii). \end{proof} \begin{lemma} \label{pricklytime0} In a $2$-polymatroid $P$, let $k$ and $y$ be distinct elements. Then \begin{itemize} \item[(i)] $P\downarrow k \backslash y = P\backslash y \downarrow k$; and \item[(ii)] $P\downarrow k /y = P/y \downarrow k.$ \end{itemize} \end{lemma} \begin{proof} Part (i) is essentially immediate. We now prove (ii). If $r(\{k\}) \le 1$, then $P\downarrow k = P/k$, so $$P\downarrow k /y = P/k/y = P/y/k = P/y \downarrow k.$$ Thus we may assume that $r(\{k\}) = 2$. Suppose $y$ is a line such that $r(\{y,k\}) = 2$. Then $$P/y\downarrow k = P/y/k = P/k/y = P\downarrow k/y$$ where the last equality follows by considering how $P\downarrow k$ is constructed. Thus we may assume that $y$ is not a line that is parallel to $k$. Hence $$r(\{y,k\}) > r(\{y\}).$$ Let $X$ be a subset of $E - k - y$. Then $$r_{P\downarrow k /y}(X) = r_{P\downarrow k}(X \cup y) - r_{P\downarrow k}(\{y\}) = r_{P\downarrow k}(X \cup y) - r(\{y\})$$ where the second equality follows because $r(\{y,k\}) > r(\{y\}).$ We deduce that \begin{equation*} r_{P\downarrow k /y}(X) = \begin{cases} r(X\cup y) - r(\{y\}), & \text{if $r(X \cup y \cup k) > r(X \cup y)$};\\ r(X\cup y) - r(\{y\}) - 1, & \text{otherwise}. \end{cases} \end{equation*} On the other hand, since $r_{P/y}(X \cup k) = r(X \cup k \cup y) - r(\{y\})$ and $r_{P/y}(X) = r(X \cup y) - r(\{y\})$, we see that \begin{equation*} r_{P/y\downarrow k }(X) = \begin{cases} r(X\cup y) - r(\{y\}), & \text{if $r(X \cup y \cup k) > r(X \cup y)$};\\ r(X\cup y) - r(\{y\}) - 1, & \text{otherwise}. \end{cases} \end{equation*} Thus $$r_{P\downarrow k /y}(X) = r_{P/y\downarrow k }(X)$$ so the lemma holds. \end{proof} \begin{lemma} \label{elemprop24} Let $P$ be a compact $2$-polymatroid $(E,r)$ having $\{j,k\}$ as a prickly $3$-separator. Suppose $y \in E - \{j,k\}$. If $\{j,k\}$ is not a prickly $3$-separator of $P/y$, then \begin{itemize} \item[(i)] $r(\{j,k,y\}) = 3$ and $P/y$ has $\{j,k\}$ as a $1$-separating set; or \item[(ii)] $P\downarrow k/y = P/y\backslashba k$; or \item[(iii)] $P\downarrow k/y = P/y/k$; or \item[(iv)] $P\downarrow k/y$ can be obtained from $P/y\backslashba j$ by relabelling $k$ as $j$. \end{itemize} \end{lemma} \begin{proof} Suppose first that $r_{P/y}(\{j,k\}) = 1$. Then $y$ is a line of $P$ that is in the closure of $\{j,k\}$. Thus $\lambda_{P/y}(\{j,k\}) = 0$ and (i) holds. Next assume that $r_{P/y}(\{j,k\}) = 2$. Then $\lambda_{P/y}(\{j,k\}) = 1$. Thus $P/y$ can be written as the 2-sum, with basepoint $p$ of two polymatroids, one of which, $P_1$, has ground set $\{j,k,p\}$ and has rank $2$. As $P$ is compact, so is $P/y$. There are four choices for $P_1$: \begin{itemize} \item[(a)] $j$ and $k$ are parallel lines and $p$ is a point lying on them both; \item[(b)] $P_1$ is isomorphic to the matroid $U_{2,3}$; \item[(c)] $P_1$ has $k$ as a line and has $j$ and $p$ as distinct points on this line; or \item[(d)] $P_1$ has $j$ as a line and has $k$ and $p$ as distinct points on this line; \end{itemize} By Lemma~\ref{pricklytime0}, $P\downarrow k/y = P/y \downarrow k$. If $P_1$ is one of the $2$-polymatroids in (b) or (d), then, as $k$ is a point of $P/y$, it follows that $P/y \downarrow k = P/y / k$, so (iii) holds. Next suppose that $P_1$ is the $2$-polymatroid in (a). Then, as $P/y$ is compact, it follows that $P/y \downarrow k = P/y \backslashba k$, so (ii) holds. Finally, suppose that $P_1$ is the $2$-polymatroid in (c). Then $P\downarrow j/y = P/y \downarrow j = P/y \backslashba j$. By Lemma~\ref{symjk}, $P\downarrow j$ can be obtained from $P\downarrow k$ by relabelling $j$ as $k$. Thus $P\downarrow j/y$ can be obtained from $P/y \backslashba j$ by relabelling $k$ as $j$, that is, (iv) holds. We may now assume that $r_{P/y}(\{j,k\}) = 3$. Then $\sqcap(y, \{j,k\}) = 0$ and one easily checks that $\{j,k\}$ is a prickly $3$-separator of $P/y$; a contradiction. \end{proof}
3,738
142,722
en
train
0.91.8
\begin{lemma} \label{elemprop24} Let $P$ be a compact $2$-polymatroid $(E,r)$ having $\{j,k\}$ as a prickly $3$-separator. Suppose $y \in E - \{j,k\}$. If $\{j,k\}$ is not a prickly $3$-separator of $P/y$, then \begin{itemize} \item[(i)] $r(\{j,k,y\}) = 3$ and $P/y$ has $\{j,k\}$ as a $1$-separating set; or \item[(ii)] $P\downarrow k/y = P/y\backslashba k$; or \item[(iii)] $P\downarrow k/y = P/y/k$; or \item[(iv)] $P\downarrow k/y$ can be obtained from $P/y\backslashba j$ by relabelling $k$ as $j$. \end{itemize} \end{lemma} \begin{proof} Suppose first that $r_{P/y}(\{j,k\}) = 1$. Then $y$ is a line of $P$ that is in the closure of $\{j,k\}$. Thus $\lambda_{P/y}(\{j,k\}) = 0$ and (i) holds. Next assume that $r_{P/y}(\{j,k\}) = 2$. Then $\lambda_{P/y}(\{j,k\}) = 1$. Thus $P/y$ can be written as the 2-sum, with basepoint $p$ of two polymatroids, one of which, $P_1$, has ground set $\{j,k,p\}$ and has rank $2$. As $P$ is compact, so is $P/y$. There are four choices for $P_1$: \begin{itemize} \item[(a)] $j$ and $k$ are parallel lines and $p$ is a point lying on them both; \item[(b)] $P_1$ is isomorphic to the matroid $U_{2,3}$; \item[(c)] $P_1$ has $k$ as a line and has $j$ and $p$ as distinct points on this line; or \item[(d)] $P_1$ has $j$ as a line and has $k$ and $p$ as distinct points on this line; \end{itemize} By Lemma~\ref{pricklytime0}, $P\downarrow k/y = P/y \downarrow k$. If $P_1$ is one of the $2$-polymatroids in (b) or (d), then, as $k$ is a point of $P/y$, it follows that $P/y \downarrow k = P/y / k$, so (iii) holds. Next suppose that $P_1$ is the $2$-polymatroid in (a). Then, as $P/y$ is compact, it follows that $P/y \downarrow k = P/y \backslashba k$, so (ii) holds. Finally, suppose that $P_1$ is the $2$-polymatroid in (c). Then $P\downarrow j/y = P/y \downarrow j = P/y \backslashba j$. By Lemma~\ref{symjk}, $P\downarrow j$ can be obtained from $P\downarrow k$ by relabelling $j$ as $k$. Thus $P\downarrow j/y$ can be obtained from $P/y \backslashba j$ by relabelling $k$ as $j$, that is, (iv) holds. We may now assume that $r_{P/y}(\{j,k\}) = 3$. Then $\sqcap(y, \{j,k\}) = 0$ and one easily checks that $\{j,k\}$ is a prickly $3$-separator of $P/y$; a contradiction. \end{proof} \begin{lemma} \label{elemprop25} Let $\{j,k\}$ be a prickly $3$-separator in a $2$-polymatroid $P$. If $P\downarrow k$ is $3$-connected, then so is $P$. \end{lemma} \begin{proof} Let $(X,Y)$ be an exact $m$-separation of $P$ for some $m$ in $\{1,2\}$ where $k \in X$. Then $r(X) + r(Y) - r(P) = m-1$. Now $r(P\downarrow k) = r(P) -1$. Consider $r_{P\downarrow k}(X-k) + r_{P\downarrow k}(Y)$. Suppose first that $j \in X - k$. Then, by Lemma~\ref{atlast}, $r_{P\downarrow k}(X - k) = r(X) - 1$ and $r_{P\downarrow k}(Y) = r(Y)$. Hence $$r_{P\downarrow k}(X - k) + r_{P\downarrow k}(Y) - r(P\downarrow k) = m-1.$$ As $P\downarrow k$ is $2$-connected, we cannot have $m = 1$ since both $X - k$ and $Y$ are non-empty. Thus $m = 2$. Now $\max\{|X|,r(X)\} \ge 2$ and $\max\{|Y|,r(Y)\} \ge 2$. Thus $\max\{|Y|,r_{P\downarrow k}(Y)\} \ge 2$. If $X = \{j,k\}$, then $r(X) + r(Y) - r(P) = 1$; a contradiction to the fact that $\{j,k\}$ is a $3$-separator of $P$. We deduce that $|X - k| \ge 2$, so $(X-k,Y)$ is a $2$-separation of $P\downarrow k$; a contradiction. We may now assume that $j \in Y$. Then $$r_{P\downarrow k}(X - k) + r_{P\downarrow k}(Y) - r(P\downarrow k) \le r(X) - 1 + r(Y) - 1 -r(P) + 1 = m-2.$$ As $P\downarrow k$ is $2$-connected, it follows that $X - k$ is empty. Then $r(\{k\}) + r(E-k) - r(E) = 1$; a contradiction. Thus the lemma holds. \end{proof}
1,572
142,722
en
train
0.91.9
\section{Some results for connectivity and local connectivity} \label{clc} This section notes a number of properties of the connectivity and local-connectivity functions that will be used in the proof of the main theorem. First we show that compression is, in most situations, a self-dual operation. We proved this result in \cite[Proposition 3.1]{oswww} for the variant of duality used there. By making the obvious replacements in that proof, it is straightforward to check that the result holds with the modified definition of duality used here. We omit the details. \begin{proposition} \label{compdual} Let $e$ be a line of a $2$-polymatroid $M$ and suppose that $M$ contains no line parallel to $e$. Then $$M^*\downarrow e = (M\downarrow e)^*.$$ \end{proposition} The next result implies that the main theorem is a self-dual result. \begin{proposition} \label{sminordual} Let $P$ and $Q$ be compact $2$-polymatroids. Then $Q$ is an s-minor of $P$ if and only if $Q^*$ is an s-minor of $P^*$. \end{proposition} \begin{proof} By Lemma~\ref{compact0}, both $P^*$ and $Q^*$ are compact. Moreover, $(P^*)^* = P$ and $(Q^*)^* = Q$. Assume $Q$ is an s-minor of $P$. To prove the lemma, it suffices to show that $Q^*$ is an s-minor of $P^*$. By Lemma~\ref{compact0} again, for an element $\ell$ of $P$, we have that $(P\backslashba \ell)^* = P^*/ \ell$ and $(P/ \ell)^* = P^*\backslashba \ell$. Moreover, if $\{j,k\}$ is a prickly 3-separator of $P$, then one easily checks that it is a prickly 3-separator of $P^*$. By Lemma~\ref{elemprop23}, $P\downarrow k$ is compact and, by Proposition~\ref{compdual}, $(P\downarrow k)^* = P^*\downarrow k$. Because the dual of each allowable move on $P$ produces a 2-polymatroid that is obtained from $P^*$ by an allowable move, the lemma follows by a straightforward induction argument. \end{proof} There is an attractive link between the connectivity of a $2$-polymatroid $M$ and the connectivity of the natural matroid associated with $M$. \begin{lemma} \label{missinglink} Let $M$ be a $2$-polymatroid with at least two elements and let $M'$ be the natural matroid derived from $M$. Then \begin{itemize} \item[(i)] $M$ is $2$-connected if and only if $M'$ is $2$-connected; and \item[(ii)] $M$ is $3$-connected if and only if $M'$ is $3$-connected. \end{itemize} \end{lemma} \begin{proof} The result is immediate if $M$ is a matroid or has a loop, so we may assume that $M$ is loopless and has at least one line. Let $L$ be the set of lines of $M$ and let $M^+$ be the matroid that is obtained from $M$ by freely adding two points on each line in $L$. Then $M' = M^+\backslash L$. Suppose that $M$ has a $k$-separation $(X,Y)$ for some $k$ in $\{1,2\}$. Replacing each line in each of $X$ and $Y$ by two points freely placed on the line gives sets $X'$ and $Y'$ that partition $E(M')$ such that $r(X') = r(X)$ and $r(Y') = r(Y)$. Hence $(X',Y')$ is a $k$-separation of $M'$. Now suppose that $M'$ has a $k$-separation for some $k$ in $\{1,2\}$. Choose such a $k$-separation $(X',Y')$ to minimize the number $m$ of lines of $M$ that have exactly one of the corresponding points of $M'$ in $X'$. If $m = 0$, then there is a $k$-separation of $M$ that corresponds naturally to $(X',Y')$. Thus we may assume that $M$ has a line $\ell$ whose corresponding points, $s_{\ell}$ and $t_{\ell}$, are in $X'$ and $Y'$, respectively. Now \begin{equation} \label{kay} r(X') + r(Y') - r(M') = k-1. \end{equation} Suppose $|E(M')| = 3$. Then $M$ consists of a point and a line. For each $n$ in $\{2,3\}$, both $M$ and $M'$ are $n$-connected if and only if the point lies on the line. Thus the result holds if $|E(M')| = 3$. Now assume that $|E(M')| = 4$. Then $M$ consists of either two lines, or a line and two points. Again the result is easily checked. Thus we may assume that $|E(M')| \ge 5$. We may also assume that $|X'| \ge |Y'|$. Then $|X'|\ge 3$. Now $r(X'- s_{\ell}) + r(Y' \cup s_{\ell}) - r(M') \ge k$, otherwise the choice of $(X',Y')$ is contradicted. Thus $r(X' - s_{\ell}) = r(X')$ and $r(Y' \cup s_{\ell}) = r(Y') + 1$. Hence, in $M^+$, as $s_{\ell}$ and $t_{\ell}$ are freely placed on $\ell$, we see that $r((X' - s_{\ell}) \cup \ell) = r(X' - s_{\ell})$, so $$r(X' - s_{\ell}) = r((X' - s_{\ell}) \cup t_{\ell}) = r(X' \cup t_{\ell}).$$ Hence $(X' \cup t_{\ell},Y'-t_{\ell})$ violates the choice of $(X',Y')$ unless either $k=1$ and $Y' = \{t_{\ell}\}$, or $k =2$ and $Y'$ consists of two points. In the first case, $r(X') = r(M')$, so $r(X') + r(Y') - r(M') = 1,$ a contradiction to (\ref{kay}). In the second case, since one of the points in $Y'$ is $t_{\ell}$, the points are not parallel so $r(Y') = 2$ and $r(X') = r(M') - 1$. Thus $r(X' \cup t_{\ell}) = r(M') - 1$ and $r(Y'-t_{\ell}) = 1$; a contradiction to (\ref{kay}). \end{proof} Let $M$ be a polymatroid $(E,r)$. If $X$ and $Y$ are subsets of $E$, the {\it local connectivity} $\sqcap(X,Y)$ between $X$ and $Y$ is defined by $\sqcap(X,Y) = r(X) + r(Y) - r(X \cup Y)$. Sometimes we will write $\sqcap_M$ for $\sqcap$, and $\sqcap^*$ for $\sqcap_{M^*}$. It is straightforward to prove the following. Again this holds for both the version of duality used here and the variant used in \cite{oswww}. \begin{lemma} \label{sqcapdual} Let $M$ be a polymatroid $(E,r)$. For disjoint subsets $X$ and $Y$ of $E$, $$\sqcap_{M^*}(X,Y) = \sqcap_{M/(E - (X \cup Y))}(X,Y).$$ \end{lemma} The next lemma will be used repeatedly, often without explicit reference. Two sets $X$ and $Y$ in a polymatroid $M$ are {\it skew} if $\sqcap(X,Y) = 0$. \begin{lemma} \label{skewer} Let $M$ be a $2$-polymatroid and $z$ be an element of $M$ such that $(A,B)$ is a $2$-separation of $M/z$. Suppose $z$ is skew to $A$. Then $(A,B\cup z)$ is a $2$-separation of $M$. Moreover, if $M$ is $3$-connected, then $A$ is not a single line in $M/z$. \end{lemma} \begin{proof} Clearly $r(A \cup z) - r(z) = r(A)$, so $(A,B \cup z)$ is a $2$-separation\ of $M$. If $M$ is $3$-connected\ and $A$ consists of a single line $a$ of $M/z$, then $a$ is a line of $M$, so $a$ and $z$ are skew, and we obtain the contradiction\ that $M$ has a $2$-separation. \end{proof}
2,269
142,722
en
train
0.91.10
Let $M$ be a polymatroid $(E,r)$. If $X$ and $Y$ are subsets of $E$, the {\it local connectivity} $\sqcap(X,Y)$ between $X$ and $Y$ is defined by $\sqcap(X,Y) = r(X) + r(Y) - r(X \cup Y)$. Sometimes we will write $\sqcap_M$ for $\sqcap$, and $\sqcap^*$ for $\sqcap_{M^*}$. It is straightforward to prove the following. Again this holds for both the version of duality used here and the variant used in \cite{oswww}. \begin{lemma} \label{sqcapdual} Let $M$ be a polymatroid $(E,r)$. For disjoint subsets $X$ and $Y$ of $E$, $$\sqcap_{M^*}(X,Y) = \sqcap_{M/(E - (X \cup Y))}(X,Y).$$ \end{lemma} The next lemma will be used repeatedly, often without explicit reference. Two sets $X$ and $Y$ in a polymatroid $M$ are {\it skew} if $\sqcap(X,Y) = 0$. \begin{lemma} \label{skewer} Let $M$ be a $2$-polymatroid and $z$ be an element of $M$ such that $(A,B)$ is a $2$-separation of $M/z$. Suppose $z$ is skew to $A$. Then $(A,B\cup z)$ is a $2$-separation of $M$. Moreover, if $M$ is $3$-connected, then $A$ is not a single line in $M/z$. \end{lemma} \begin{proof} Clearly $r(A \cup z) - r(z) = r(A)$, so $(A,B \cup z)$ is a $2$-separation\ of $M$. If $M$ is $3$-connected\ and $A$ consists of a single line $a$ of $M/z$, then $a$ is a line of $M$, so $a$ and $z$ are skew, and we obtain the contradiction\ that $M$ has a $2$-separation. \end{proof} Numerous properties of the connectivity function of a matroid are proved simply by applying properties of the rank function; they do not rely on the requirement that $r(\{e\}) \le 1$ for all elements $e$. Evidently, such properties also hold for the connectivity function of a polymatroid. The next few lemmas note some of these properties. The first two are proved in \cite[Lemmas 8.2.3 and 8.2.4]{oxbook}. \begin{lemma} \label{8.2.3} Let $(E,r)$ be a polymatroid and let $X_1, X_2, Y_1$, and $Y_2$ be subsets of $E$ with $Y_1 \subseteq X_1$ and $Y_2 \subseteq X_2$. Then $$\sqcap(Y_1,Y_2) \le \sqcap(X_1,X_2).$$ \end{lemma} \begin{lemma} \label{8.2.4} Let $(E,r)$ be a polymatroid $M$ and let $X,C$, and $D$ be disjoint subsets of $E$. Then $$\lambda_{M\backslash D/C}(X) \le \lambda_M(X).$$ Moreover, equality holds if and only if $$r(X \cup C) = r(X) + r(C)$$ and $$r(E - X) + r(E-D) = r(E) + r(E- (X \cup D)).$$ \end{lemma} The following \cite[Corollary 8.7.6]{oxbook} is a straightforward consequence of the last lemma. \begin{corollary} \label{8.7.6} Let $X$ and $D$ be disjoint subsets of the ground set $E$ of a polymatroid $M$. Suppose that $r(M\backslash D) = r(M)$. Then \begin{itemize} \item[(i)] $\lambda_{M\backslash D}(X) = \lambda_M(X)$ if and only if $D \subseteq {\rm cl}_M(E - (X \cup D))$; and \item[(ii)] $\lambda_{M\backslash D}(X) = \lambda_M(X\cup D)$ if and only if $D \subseteq {\rm cl}_M(X)$. \end{itemize} \end{corollary} It is well known that, when $M$ is a matroid, for all subsets $X$ of $E(M)$, $$\lambda_M(X) = r_M(X) + r_{M^*}(X) - |X|.$$ It is easy to check that the following variant on this holds for polymatroids. Recall that $||X|| = \sum_{x \in X} r(\{x\}).$ \begin{lemma} \label{rr*} In a polymatroid $M$, for all subsets $X$ of $E(M)$, $$\lambda_M(X) = r_M(X) + r_{M^*}(X) - ||X||.$$ In particular, if every element of $X$ has rank one, then $$\lambda_M(X) = r_M(X) + r_{M^*}(X) - |X|.$$ \end{lemma} The next lemma contains another useful equation whose proof is straightforward. \begin{lemma} \label{obs1} Let $(X,Y)$ be a partition of the ground set of a polymatroid $M$. Suppose $z \in Y$. Then $$\sqcap(X,\{z\}) + \sqcap_{M/z}(X,Y-z) = \sqcap(X,\{z\}) + \lambda_{M/z}(X) = \lambda_M(X).$$ \end{lemma} The next two lemmas are natural generalizations of matroid results that appear in \cite{osw}. \begin{lemma} \label{univ} Let $(E,r)$ be a polymatroid and let $X$ and $Y$ be disjoint subsets of $E$. Then $$\lambda(X \cup Y) = \lambda(X) + \lambda(Y) - \sqcap(X,Y) - \sqcap^*(X,Y).$$ \end{lemma} \begin{lemma} \label{oswrules} Let $A, B, C$, and $D$ be subsets of the ground set of a polymatroid. Then \begin{itemize} \item[(i)] $\sqcap(A \cup B, C \cup D) + \sqcap(A,B) + \sqcap(C,D) = \sqcap(A \cup C, B \cup D) + \sqcap(A,C) + \sqcap(B,D)$; and \item[(ii)] $\sqcap(A \cup B, C) + \sqcap(A,B) = \sqcap(A \cup C, B) + \sqcap(A,C).$ \end{itemize} \end{lemma} \begin{lemma} \label{general} Let $M$ be a polymatroid and $(A,B,Z)$ be a partition of its ground set into possibly empty subsets. Then $$\lambda_{M/Z}(A) = \lambda_{M\backslash Z}(A) - \sqcap_M(A,Z) - \sqcap_M(B,Z) + \lambda_M(Z).$$ \end{lemma} \begin{proof} We have $B = A \cup Z$ and $A = B \cup Z$. Then \begin{align*} \lambda_{M/Z}(A) &= r_{M/Z}(A) + r_{M/Z}(B) - r(M/Z)\\ & = r(A \cup Z) - r(Z) + r(B\cup Z) - r(Z) -r(M) + r(Z)\\ & = r(B) + r(A) - r(M) - r(Z)\\ &= r(A) + r(B) - r(M\backslash Z) + r(M\backslash Z) - r(M) - r(Z)\\ & = \lambda_{M\backslash Z}(A) + r(M\backslash Z) - r(M) - r(Z). \end{align*} The required result holds if and only if $$\sqcap_M(A,Z) + \sqcap_M(B,Z) - \lambda_M(Z) = r(M) + r(Z) - r(M\backslash Z).$$ Now \begin{align*} \sqcap(A,Z) + \sqcap(B,Z) - \lambda_M(Z) &= r(A) + r(Z) - r(A \cup Z) + r(B) + r(Z) - r(B\cup Z)\\ & \hspace*{1.7in} - r(Z) - r(M\backslash Z) + r(M)\\ & = r(A) + r(Z) - r(B) + r(B) + r(Z) - r(A) - r(Z)\\ & \hspace*{1.7in} - r(M\backslash Z) + r(M)\\ & = r(M) + r(Z) - r(M\backslash Z), \end{align*} as required. \end{proof} \begin{corollary} \label{general2} Let $M$ be a polymatroid and $(A,B,Z)$ be a partition of its ground set into possibly empty subsets. Suppose $r(M\backslash Z) = r(M)$. Then $$\lambda_{M/Z}(A) = \lambda_{M\backslash Z}(A) - \sqcap_M(A,Z) - \sqcap_M(B,Z) + r(Z).$$ \end{corollary} \begin{proof} As $\lambda_M(Z) = r(Z) + r(M\backslash Z) - r(M)$ and $r(M\backslash Z) = r(M)$, the result is an immediate consequence of the last lemma. \end{proof} \begin{lemma} \label{general3} Let $M$ be a polymatroid and $(A,B,C)$ be a partition of its ground set into possibly empty subsets. Suppose $\lambda(A) = 1 = \lambda(C)$ and $\lambda(B) = 2$. Then $\sqcap(A,B) = 1$. \end{lemma} \begin{proof} We have $$2= r(B) + r(A\cup C) -r(M)$$ and \begin{align*} r(M) &= r(A\cup B) + r(C) - 1\\ & = r(A) + r(B) - \sqcap(A,B) + r(C) - 1\\ & = r(A) + r(B) + r(C) - 1 - \sqcap (A,B). \end{align*} Thus $$2 = r(B) + r(A \cup C) - r(A) - r(B) - r(C) + 1 + \sqcap(A,B)$$ so $\sqcap(A,B) = 1 + \sqcap(A,C) \ge 1$. By Lemma~\ref{8.2.3}, $\sqcap(A,B) \le \sqcap(A,B\cup C) = 1$, so $\sqcap(A,B) = 1$. \end{proof} \begin{lemma} \label{general4} Let $M$ be a polymatroid and $(A,B,C)$ be a partition of its ground set into possibly empty subsets. Then $\sqcap^*(A,B) = \lambda_{M/C}(A)$. \end{lemma} \begin{proof} By making repeated use of Lemma~\ref{compact0}, we have \begin{align*} \sqcap^*(A,B) & = \sqcap_{M^*}(A,B)\\ & = \lambda_{M^*\backslash C}(A)\\ & = \lambda_{(M^*\backslash C)^{\flat}}(A)\\ & = \lambda_{(M/C)^*}(A)\\ & = \lambda_{M/C}(A). \end{align*} \end{proof} The following is a consequence of a result of Oxley and Whittle~\cite[Lemma 3.1]{owconn}. \begin{lemma} \label{Tutte2} Let $M$ be a $2$-connected $2$-polymatroid with $|E(M)| \ge 2$. If $e$ is a point of $M$, then $M\backslash e$ or $M/e$ is $2$-connected. \end{lemma} The next result is another straightforward extension of a matroid result. \begin{lemma} \label{matroidef} Let $M$ be a $2$-connected $2$-polymatroid having $e$ and $f$ as points. Then \begin{itemize} \item[(i)] $\lambda_{M/ f}(\{e\}) = 0$ if and only if $e$ and $f$ are parallel points; and \item[(ii)] $\lambda_{M\backslash f}(\{e\}) = 0$ if and only if $e$ and $f$ form a series pair. \end{itemize} \end{lemma} \begin{proof} We prove (i) omitting the similar proof of (ii). If $e$ and $f$ are parallel points of $M$, then $\lambda_{M/ f}(\{e\}) = 0$. Now assume that $\lambda_{M/ f}(\{e\}) = 0$. Let $M'$ be the natural matroid derived from $M$. Then $M'/f$ has $\{e\}$ as a component. Hence $\{e,f\}$ is a series or parallel pair in $M'$. But if $\{e,f\}$ is a series pair, then $M'/f$ is $2$-connected; a contradiction. We conclude that $\{e,f\}$ is a parallel pair of points in $M$, so (i) holds. \end{proof}
3,589
142,722
en
train
0.91.11
\begin{corollary} \label{general2} Let $M$ be a polymatroid and $(A,B,Z)$ be a partition of its ground set into possibly empty subsets. Suppose $r(M\backslash Z) = r(M)$. Then $$\lambda_{M/Z}(A) = \lambda_{M\backslash Z}(A) - \sqcap_M(A,Z) - \sqcap_M(B,Z) + r(Z).$$ \end{corollary} \begin{proof} As $\lambda_M(Z) = r(Z) + r(M\backslash Z) - r(M)$ and $r(M\backslash Z) = r(M)$, the result is an immediate consequence of the last lemma. \end{proof} \begin{lemma} \label{general3} Let $M$ be a polymatroid and $(A,B,C)$ be a partition of its ground set into possibly empty subsets. Suppose $\lambda(A) = 1 = \lambda(C)$ and $\lambda(B) = 2$. Then $\sqcap(A,B) = 1$. \end{lemma} \begin{proof} We have $$2= r(B) + r(A\cup C) -r(M)$$ and \begin{align*} r(M) &= r(A\cup B) + r(C) - 1\\ & = r(A) + r(B) - \sqcap(A,B) + r(C) - 1\\ & = r(A) + r(B) + r(C) - 1 - \sqcap (A,B). \end{align*} Thus $$2 = r(B) + r(A \cup C) - r(A) - r(B) - r(C) + 1 + \sqcap(A,B)$$ so $\sqcap(A,B) = 1 + \sqcap(A,C) \ge 1$. By Lemma~\ref{8.2.3}, $\sqcap(A,B) \le \sqcap(A,B\cup C) = 1$, so $\sqcap(A,B) = 1$. \end{proof} \begin{lemma} \label{general4} Let $M$ be a polymatroid and $(A,B,C)$ be a partition of its ground set into possibly empty subsets. Then $\sqcap^*(A,B) = \lambda_{M/C}(A)$. \end{lemma} \begin{proof} By making repeated use of Lemma~\ref{compact0}, we have \begin{align*} \sqcap^*(A,B) & = \sqcap_{M^*}(A,B)\\ & = \lambda_{M^*\backslash C}(A)\\ & = \lambda_{(M^*\backslash C)^{\flat}}(A)\\ & = \lambda_{(M/C)^*}(A)\\ & = \lambda_{M/C}(A). \end{align*} \end{proof} The following is a consequence of a result of Oxley and Whittle~\cite[Lemma 3.1]{owconn}. \begin{lemma} \label{Tutte2} Let $M$ be a $2$-connected $2$-polymatroid with $|E(M)| \ge 2$. If $e$ is a point of $M$, then $M\backslash e$ or $M/e$ is $2$-connected. \end{lemma} The next result is another straightforward extension of a matroid result. \begin{lemma} \label{matroidef} Let $M$ be a $2$-connected $2$-polymatroid having $e$ and $f$ as points. Then \begin{itemize} \item[(i)] $\lambda_{M/ f}(\{e\}) = 0$ if and only if $e$ and $f$ are parallel points; and \item[(ii)] $\lambda_{M\backslash f}(\{e\}) = 0$ if and only if $e$ and $f$ form a series pair. \end{itemize} \end{lemma} \begin{proof} We prove (i) omitting the similar proof of (ii). If $e$ and $f$ are parallel points of $M$, then $\lambda_{M/ f}(\{e\}) = 0$. Now assume that $\lambda_{M/ f}(\{e\}) = 0$. Let $M'$ be the natural matroid derived from $M$. Then $M'/f$ has $\{e\}$ as a component. Hence $\{e,f\}$ is a series or parallel pair in $M'$. But if $\{e,f\}$ is a series pair, then $M'/f$ is $2$-connected; a contradiction. We conclude that $\{e,f\}$ is a parallel pair of points in $M$, so (i) holds. \end{proof} The next result is a generalization of a lemma of Bixby \cite{bixby} (see also \cite[Lemma 8.7.3]{oxbook}) that is widely used when dealing with $3$-connected matroids. \begin{lemma} \label{newbix} Let $M$ be a $3$-connected $2$-polymatroid and $z$ be a point of $M$. Then either \begin{itemize} \item[(i)] $M/z$ is $2$-connected having one side of every $2$-separation being a pair of points of $M$ that are parallel in $M/z$; or \item[(ii)] $M\backslash z$ is $2$-connected having one side of every $2$-separation being either a single line of $M$, or a pair of points of $M$ that form a series pair in $M\backslash z$. \end{itemize} \end{lemma} \begin{proof} If $z$ lies on a line in $M$, then $M\backslash z$ is $3$-connected. Thus we may assume that $z$ does not lie on a line in $M$. Take the matroid $M'$ that is naturally derived from $M$. Then, by Bixby's Lemma, either $M'/z$ is $2$-connected having one side of every $2$-separation being a pair of parallel points of $M'$, or $M'\backslash z$ is $2$-connected having one side of every $2$-separation being a series pair of points of $M'$. In the first case, if $\{a,b\}$ is a parallel pair of points of $M'/z$, then $\{a,b,z\}$ is a circuit of $M'$. Because the points added to $M$ to form $M'$ are freely placed on lines, we cannot have a circuit containing just one of them. Since $z$ is not on a line of $M$, we deduce that $a$ and $b$ are points of $M$. We conclude that, in the first case, (i) holds. Now suppose that $M'\backslash z$ is not $3$-connected\ and has $\{u,v\}$ as a series pair. Then either $u$ and $v$ are both matroid points of $M$, or $M$ has a line on which the points $u$ and $v$ are freely placed in the formation of $M'$. We deduce that (ii) holds. \end{proof} We recall from \cite{oswww} that, when $\{a,b,c\}$ is a set of three points in a $2$-polymatroid $Q$, we call $\{a,b,c\}$ a {\it triangle} if every subset of $\{a,b,c\}$ of size at least two has rank two. If, instead, $r(E - \{a,b,c\}) = r(Q) - 1$ but $r(X) = r(Q)$ for all proper supersets $X$ of $E - \{a,b,c\}$, then we call $\{a,b,c\}$ a {\it triad} of $Q$. When $Q$ is $3$-connected, $\{a,b,c\}$ is a triad of $Q$ if and only if $\{a,b,c\}$ is a triangle of $Q^*$. It is straightforward to check that a triangle and a triad of $Q$ cannot have exactly one common element. Just as for matroids, we call a sequence $x_1,x_2,\dots,x_k$ of distinct points of a $2$-polymatroid $Q$ a {\it fan} of {\it length} $k$ if $k \ge 3$ and the sets $\{x_1,x_2,x_3\}, \{x_2,x_3,x_4\},\dots,\{x_{k-2},x_{k-1},x_k\}$ are alternately triangles and triads beginning with either a triangle or a triad. The following lemma will be helpful in proving our main result when fans arise in the argument. \begin{lemma} \label{fantan} Let $M$ and $N$ be $3$-connected $2$-polymatroids where $|E(N)| \ge 4$ and $M$ is not a whirl or the cycle matroid of a wheel. Suppose $M$ has a fan $ x_1,x_2,x_3,x_4$ where $\{x_1,x_2,x_3\}$ is a triangle and $M/x_2$ has a c-minor isomorphic to $N$. Then $M$ has a point $z$ such that either $M\backslash z$ or $M/z$ is $3$-connected having a c-minor isomorphic to $N$, or both $M\backslash z$ and $M/z$ have c-minors isomorphic to $N$. \end{lemma} \begin{proof} Assume that the lemma fails. Extend $x_1,x_2,x_3,x_4$ to a maximal fan $x_1,x_2,\dots,x_n$. Since $M/x_2$ has a c-minor isomorphic to $N$ and has $x_1$ and $x_3$ as a parallel pair of points, it follows that each of $M/x_2\backslash x_1$ and $M/x_2\backslash x_3$ has a c-minor isomorphic to $N$. Thus each of $M\backslash x_1$ and $M\backslash x_3$ has a c-minor isomorphic to $N$. Hence $M/x_4$ has a c-minor isomorphic to $N$. A straightforward induction argument establishes that $M/x_i$ has a c-minor isomorphic to $N$ for all even $i$, while $M\backslash x_i$ has a c-minor isomorphic to $N$ for all odd $i$. Then $M/x_i$ is not $3$-connected\ when $i$ is even, while $M\backslash x_i$ is not $3$-connected\ when $i$ is odd. Next we show that \begin{sublemma} \label{whereare} $M$ has no triangle that contains more than one element $x_i$ with $i$ even; and $M$ has no triad that contains more than one element $x_i$ with $i$ odd. \end{sublemma} Suppose $M$ has a triangle that contains $x_i$ and $x_j$ where $i$ and $j$ are distinct even integers. Since $M/x_i$ has $x_j$ in a parallel pair of points, $M/x_i\backslash x_j$, and hence $M\backslash x_j$, has a c-minor isomorphic to $N$. As $M/ x_j$ also has a c-minor isomorphic to $N$, we have a contradiction. Thus the first part of \ref{whereare} holds. A similar argument proves the second part. Suppose $n$ is odd. Then, since neither $M\backslash x_n$ nor $M\backslash x_{n-2}$ is $3$-connected, by \cite[Lemma 4.2]{oswww}, $M$ has a triad $T^*$ containing $x_n$ and exactly one of $x_{n-1}$ and $x_{n-2}$. By \ref{whereare}, $T^*$ contains $x_{n-1}$. Then, by the maximality of the fan, the third element of $T^*$ lies in $\{x_1,x_2,\dots,x_{n-2}\}$. But, as each of the points in the last set is in a triangle that is contained in that set, we obtain the contradiction\ that $M$ has a triangle having a single element in common with the triad $T^*$. We may now assume that $n$ is even. As neither $M/x_n$ nor $M/x_{n-2}$ is $3$-connected, by \cite[Lemma 4.2]{oswww}, $M$ has a triangle $T$ that contains $x_n$ and exactly one of $x_{n-1}$ and $x_{n-2}$. By \ref{whereare}, $x_{n-1} \in T$. The maximality of the fan again implies that the third element of $T$ is in $\{x_1,x_2,x_3,\dots,x_{n-2}\}$. As every element of the last set, except $x_1$, is in a triad that is contained in the set, to avoid having $T$ meet such a triad in a single element, we must have that $T = \{x_n,x_{n-1},x_1\}$. If $n= 4$, then $M|\{x_1,x_2,x_3,x_4\} {\rm co}ng U_{2,4}$ so $\{x_2,x_3,x_4\}$ is a triangle; a contradiction\ to \ref{whereare}. We deduce that $n> 4$. Now neither $M\backslash x_1$ nor $M\backslash x_{n-1}$ is $3$-connected. Thus, by \cite[Lemma 4.2]{oswww}, $M$ has a triad $T^*_2$ containing $x_1$ and exactly one of $x_n$ and $x_{n-1}$. By \ref{whereare}, $x_n \in T^*_2$. The triangles $\{x_1,x_2,x_3\}$ and $\{x_3,x_4,x_5\}$ imply that $x_2 \in T^*_2$. Let $X = \{x_1,x_2,\dots, x_n \}$. Then, using the triangles we know, including $\{x_n,x_{n-1},x_1\}$, we deduce that $r(X) \le \tfrac{n}{2}$. Similarly, the triads in $M$, which are triangles in $M^*$, imply that $r^*(X) \le \tfrac{n}{2}$. Thus, by Lemma~\ref{rr*}, $\lambda(X) = 0$. Hence $X = E(M)$. As every element of $M$ is a point, $M$ is a matroid. Since every point of $M$ is in both a triangle and a triad, by Tutte's Wheels-and-Whirls-Theorem \cite{wtt}, we obtain the contradiction\ that $M$ is a whirl or the cycle matroid of a wheel. \end{proof} \begin{lemma} \label{hath} Let $M$ be a $3$-connected $2$-polymatroid having $a$ and $\ell$ as distinct elements. Then $(E(M) - \{a,\ell\},\{\ell\})$ is not a $2$-separation of $M/a$. \end{lemma} \begin{proof} Assume the contrary. Then $\ell$ is a line in $M/a$, so $\sqcap(a,\ell) = 0$. We have $$r_{M/a}(E(M) - \{a,\ell\}) + r_{M/ a}(\ell) = r(M/a) + 1.$$ As $\sqcap(a,\ell) = 0$, it follows that $(E(M) - \ell,\{\ell\})$ is a $2$-separation of $M$; a contradiction. \end{proof}
3,959
142,722
en
train
0.91.12
\section{Parallel connection and $2$-sum} \label{pc2s} In this section, we follow Mat\'{u}\v{s}~\cite{fm} and Hall~\cite{hall} in defining the parallel connection and $2$-sum of polymatroids. For a positive integer $k$, let $M_1$ and $M_2$ be $k$-polymatroids $(E_1,r_1)$ and $(E_2,r_2)$. Suppose first that $E_1 \cap E_2 = \emptyset$. The {\it direct sum} $M_1 \oplus M_2$ of $M_1$ and $M_2$ is the $k$-polymatroid $(E_1 \cup E_2,r)$ where, for all subsets $A$ of $E_1 \cup E_2$, we have $r(A) = r(A\cap E_1) + r(A \cap E_2)$. The following result is easily checked. \begin{lemma} \label{dualsum} For $k$-polymatroids $M_1$ and $M_2$ on disjoint sets, $$(M_1 \oplus M_2)^* = M_1^* \oplus M_2^*.$$ \end{lemma} Clearly a $2$-polymatroid is $2$-connected if and only if it cannot be written as the direct sum of two non-empty $2$-polymatroids. Now suppose that $E_1 \cap E_2 = \{p\}$ and $r_1(\{p\}) = r_2(\{p\})$. Let $P(M_1,M_2)$ be $(E_1 \cup E_2, r)$ where $r$ is defined for all subsets $A$ of $E_1 \cup E_2$ by $$r(A) = \min\{r_1(A \cap E_1) + r_2(A\cap E_2), r_1((A \cap E_1)\cup p) + r_2((A \cap E_2)\cup p) - r_1(\{p\})\}.$$ As Hall notes, it is routine to check that $P(M_1,M_2)$ is a $k$-polymatroid. We call it the {\it parallel connection} of $M_1$ and $M_2$ with respect to the {\it basepoint} $p$. When $M_1$ and $M_2$ are both matroids, this definition coincides with the usual definition of the parallel connection of matroids. Hall extends the definition of parallel connection to deal with the case when $r_1(\{p\}) \neq r_2(\{p\})$ but we shall not do that here. Now suppose that $M_1$ and $M_2$ are $2$-polymatroids having at least two elements, that $E(M_1) \cap E(M_2) = \{p\}$, that neither $\lambda_{M_1}(\{p\})$ nor $\lambda_{M_2}(\{p\})$ is $0$, and that $r_1(\{p\}) = r_2(\{p\}) = 1$. We define the {\it $2$-sum}, $M_1 \oplus_2 M_2$, of $M_1$ and $M_2$ to be $P(M_1,M_2)\backslash p$. We remark that this extends Hall's definition since, to ensure that $M_1 \oplus_2 M_2$ has more elements than each of $M_1$ and $M_2$, he requires that they each have at least three elements. He imposes the same requirement in his Proposition 3.6. The next result is that result with this restriction omitted. Hall's proof \cite{hall} remains valid. \begin{proposition} \label{dennis3.6} Let $M$ be a $2$-polymatroid $(E,r)$ having a partition $(X_1,X_2)$ such that $r(X_1) + r(X_2) = r(E) + 1$. Then there are $2$-polymatroids $M_1$ and $M_2$ with ground sets $X_1 \cup p$ and $X_2 \cup p$, where $p$ is a new element not in $E$, such that $M = P(M_1,M_2)\backslash p$. In particular, for all $A \subseteq X_1 \cup p$, \begin{equation*} r_1(A) = \begin{cases} r(A), & \text{if $p \not\in A$;}\\ r((A-p) \cup X_2) - r(X_2) + 1, & \text{if $p \in A$.} \end{cases} \end{equation*} \end{proposition} \begin{lemma} \label{dennisplus} Let $(X,Y)$ be a partition of the ground set of a $2$-polymatroid $M$ such that $\lambda(X) = 1$. Then, for some element $p$ not in $E(M)$, there are $2$-polymatroids $M_X$ and $M_Y$ on $X \cup p$ and $Y \cup p$, respectively, such that $M = M_X \oplus_2 M_Y$. Moreover, for $y \in Y$, \begin{itemize} \item[(i)] $\lambda_{M_Y \backslash y}(\{p\}) = \sqcap(X,Y-y)$; \item[(ii)] $\lambda_{M_Y/y}(\{p\}) + \sqcap(X,\{y\}) = \lambda(X) = 1$; \item[(iii)] if $\sqcap(X,Y-y) = 1$, then $M\backslash y = M_X \oplus_2 (M_Y\backslash y)$; \item[(iv)] if $\sqcap(X,\{y\}) = 0$, then $M/ y = M_X \oplus_2 (M_Y/ y)$; and \item[(v)] if $r(\{y\}) \le 1$, then \begin{equation*} M\downarrow y = \begin{cases} (M_X/p) \oplus (M_Y\backslash y /p), & \text{if $\sqcap(X,\{y\}) = 1$;}\\ M_X \oplus_2 (M_Y\downarrow y), & \text{if $\sqcap(X,\{y\}) = 0$.} \end{cases} \end{equation*} \item[(vi)] if $y$ is a line, then \begin{equation*} M\downarrow y = \begin{cases} (M_X\backslash p) \oplus (M_Y\downarrow y \backslash p), & \text{if $r(Y) = r(Y - y) +2$;}\\ M_X \oplus_2 (M_Y \downarrow y) & \text{if $r(Y) \le r(Y - y) +1$.} \end{cases} \end{equation*} In particular, $M\downarrow y = M_X \oplus_2 (M_Y \downarrow y)$ when $\sqcap_{M\downarrow y}(X,Y-y) = 1.$ \end{itemize} \end{lemma} \begin{proof} The existence of $M_X$ and $M_Y$ such that $M = P(M_X,M_Y)\backslash p$ is an immediate consequence of Proposition~\ref{dennis3.6}. To see that $P(M_X,M_Y)\backslash p = M_X \oplus_2 M_Y$, one needs only to check that $r_{M_X}(\{p\}) = 1 = r_{M_Y}(\{p\})$ and $\lambda_{M_X}(\{p\}) = 1 = \lambda_{M_Y}(\{p\})$. The proof of (i) follows by a straightforward application of the rank formula in Proposition~\ref{dennis3.6}. We omit the details. To see that (ii) holds, note that \begin{align*} \lambda_{M_Y/y}(\{p\}) & = r_{M_Y}(\{p,y\}) - r(\{y\}) + r_{M_Y}(Y) - r_{M_Y}(Y\cup p)\\ & = r(y \cup X) - r(X) + 1 - r(\{y\}) + r(Y) - r(X \cup Y) + r(X) - 1\\ & = r(y \cup X) - r(\{y\}) + r(Y) - r(X \cup Y) \\ & = r(X) - \sqcap(X,\{y\}) + r(Y) - r(X \cup Y)\\ & = \lambda_M(X) - \sqcap(X,\{y\}). \end{align*} By Hall~\cite[Proposition 3.1]{hall}, $M\backslash y = P(M_X,M_Y\backslash y)\backslash p$. If $\sqcap(X,Y-y) = 1$, then, by (i), $\lambda_{M_Y \backslash y}(\{p\}) = 1$. Hence, by Hall~\cite[Proposition 3.1]{hall}, $M\backslash y = M_X \oplus_2(M_Y \backslash y)$; that is, (iii) holds. To prove (iv), assume that $\sqcap(X,\{y\}) = 0$. We could again follow Hall~\cite[Proposition 3.1]{hall} to get that $M/y = P(M_X, M_Y/y)\backslash p$. But since he omits a full proof of this fact, we include it for completeness. By Proposition~\ref{dennis3.6}, $M/y = P(M_1,M_2)\backslash p$ for some $M_1$ and $M_2$. For $A \subseteq X \cup p$, \begin{align*} r_{M_1}(A) &= \begin{cases} r_{M/y}(A), & \text{if $p \not\in A$;}\\ r_{M/y}((A-p) \cup (Y-y)) - r_{M/y}(Y-y) + 1, & \text{if $p \in A$;} \end{cases}\\ &= \begin{cases} r(A\cup y) - r(\{y\}), & \text{if $p \not\in A$;}\\ r((A-p) \cup Y) - r(Y) + 1, & \text{if $p \in A$;} \end{cases}\\ & = r_{M_X}(A). \end{align*} Thus $M_1 = M_X$. Now, for $A \subseteq (Y-y) \cup p$, \begin{align*} r_{M_2}(A) &= \begin{cases} r_{M/y}(A), & \text{if $p \not\in A$;}\\ r_{M/y}((A-p) \cup X) - r_{M/y}(X) + 1, & \text{if $p \in A$;} \end{cases}\\ &= \begin{cases} r(A\cup y) - r(\{y\}), & \text{if $p \not\in A$;}\\ r((A-p) \cup X \cup y) - r(\{y\}) - r(X \cup y) + r(\{y\}) + 1, & \text{if $p \in A$;} \end{cases}\\ & = \begin{cases} r(A\cup y) - r(\{y\}), & \text{if $p \not\in A$;}\\ r((A-p) \cup X \cup y) - r(\{y\}) - r(X) + 1, & \text{if $p \in A$.} \end{cases} \end{align*} But \begin{align*} r_{M_Y/y}(A) &= r_{M_Y}(A \cup y) - r_{M_Y}(\{y\})\\ &= \begin{cases} r(A\cup y) - r(\{y\}), & \text{if $p \not\in A$;}\\ r((A-p) \cup X \cup y) - r(\{y\}) - r(X) + 1, & \text{if $p \in A$;} \end{cases}\\ &= r_{M_2}(A). \end{align*} Thus $M_2 = M_Y/y$, so $M/y = P(M_X,M_Y/y)\backslash p$. As $\sqcap(X,\{y\}) = 0$, we see, by (ii), that $\lambda_{M_Y/y}(\{p\}) = 1$. Hence $M/y = M_X \oplus_2 (M_Y/y)$; that is, (iv) holds. For (v), since $r(\{y\}) \le 1$, we have $M\downarrow y = M/y$. If $\sqcap(X,\{y\}) = 1$, then $y$ is parallel to $p$ in $M_Y$, so, by \cite[Proposition 3.1]{hall}, $M\downarrow y = (M_X/p) \oplus (M_Y/p)$. If $\sqcap(X,\{y\}) = 0$, then, as $M_Y \downarrow y = M_Y/y$, it follows by (iv) that $$M\downarrow y = M/y = M_X \oplus_2 (M_Y/y) = M_X \oplus_2 (M_Y \downarrow y).$$ To prove (vi), suppose first that $r(Y) = r(Y- y) +2$. We have $$r_{M_Y}(\{y,p\}) = r(y \cup X) - r(X) + 1 = 3 - \sqcap(X, \{y\}).$$ Assume $\sqcap(X, \{y\}) = 0$. Then $M_Y$ is the 2-sum, with basepoint $q$, say, of two 2-polymatroids, one of which has ground set $\{q,y,p\}$ and consists of two points and the line $y$ freely placed in the plane. Clearly, $M\downarrow y = (M_X \backslash p) \oplus (M_Y \backslash y \backslash p)$. Now assume that $\sqcap(X, \{y\}) = 1$. Then $M_Y$ is the direct sum of two 2-polymatroids, one of which has rank $2$ and consists of the line $y$ with the point $p$ on it. Once again, we see that $M\downarrow y = (M_X \backslash p) \oplus (M_Y \backslash y \backslash p)$. We may now assume that $r(Y) \le r(Y-y) + 1$. Hence $r_{M\downarrow y}(Y-y) = r(Y) - 1$. Clearly $r(X \cup y) > r(X)$. Thus \begin{equation} \label{numbth} \sqcap_{M\downarrow y}(X,Y-y) = 1. \end{equation} By Proposition~\ref{dennis3.6}, $M\downarrow y = P(M_1,M_2)\backslash p$ for some $2$-polymatroids $M_1$ and $M_2$ with ground sets $X \cup p$ and $(Y-y) \cup p$, respectively. We shall show that $M_1 = M_X$ and $M_2 = M_Y\downarrow y$. First observe that, for $A \subseteq X$, we have
3,949
142,722
en
train
0.91.13
Now, for $A \subseteq (Y-y) \cup p$, \begin{align*} r_{M_2}(A) &= \begin{cases} r_{M/y}(A), & \text{if $p \not\in A$;}\\ r_{M/y}((A-p) \cup X) - r_{M/y}(X) + 1, & \text{if $p \in A$;} \end{cases}\\ &= \begin{cases} r(A\cup y) - r(\{y\}), & \text{if $p \not\in A$;}\\ r((A-p) \cup X \cup y) - r(\{y\}) - r(X \cup y) + r(\{y\}) + 1, & \text{if $p \in A$;} \end{cases}\\ & = \begin{cases} r(A\cup y) - r(\{y\}), & \text{if $p \not\in A$;}\\ r((A-p) \cup X \cup y) - r(\{y\}) - r(X) + 1, & \text{if $p \in A$.} \end{cases} \end{align*} But \begin{align*} r_{M_Y/y}(A) &= r_{M_Y}(A \cup y) - r_{M_Y}(\{y\})\\ &= \begin{cases} r(A\cup y) - r(\{y\}), & \text{if $p \not\in A$;}\\ r((A-p) \cup X \cup y) - r(\{y\}) - r(X) + 1, & \text{if $p \in A$;} \end{cases}\\ &= r_{M_2}(A). \end{align*} Thus $M_2 = M_Y/y$, so $M/y = P(M_X,M_Y/y)\backslash p$. As $\sqcap(X,\{y\}) = 0$, we see, by (ii), that $\lambda_{M_Y/y}(\{p\}) = 1$. Hence $M/y = M_X \oplus_2 (M_Y/y)$; that is, (iv) holds. For (v), since $r(\{y\}) \le 1$, we have $M\downarrow y = M/y$. If $\sqcap(X,\{y\}) = 1$, then $y$ is parallel to $p$ in $M_Y$, so, by \cite[Proposition 3.1]{hall}, $M\downarrow y = (M_X/p) \oplus (M_Y/p)$. If $\sqcap(X,\{y\}) = 0$, then, as $M_Y \downarrow y = M_Y/y$, it follows by (iv) that $$M\downarrow y = M/y = M_X \oplus_2 (M_Y/y) = M_X \oplus_2 (M_Y \downarrow y).$$ To prove (vi), suppose first that $r(Y) = r(Y- y) +2$. We have $$r_{M_Y}(\{y,p\}) = r(y \cup X) - r(X) + 1 = 3 - \sqcap(X, \{y\}).$$ Assume $\sqcap(X, \{y\}) = 0$. Then $M_Y$ is the 2-sum, with basepoint $q$, say, of two 2-polymatroids, one of which has ground set $\{q,y,p\}$ and consists of two points and the line $y$ freely placed in the plane. Clearly, $M\downarrow y = (M_X \backslash p) \oplus (M_Y \backslash y \backslash p)$. Now assume that $\sqcap(X, \{y\}) = 1$. Then $M_Y$ is the direct sum of two 2-polymatroids, one of which has rank $2$ and consists of the line $y$ with the point $p$ on it. Once again, we see that $M\downarrow y = (M_X \backslash p) \oplus (M_Y \backslash y \backslash p)$. We may now assume that $r(Y) \le r(Y-y) + 1$. Hence $r_{M\downarrow y}(Y-y) = r(Y) - 1$. Clearly $r(X \cup y) > r(X)$. Thus \begin{equation} \label{numbth} \sqcap_{M\downarrow y}(X,Y-y) = 1. \end{equation} By Proposition~\ref{dennis3.6}, $M\downarrow y = P(M_1,M_2)\backslash p$ for some $2$-polymatroids $M_1$ and $M_2$ with ground sets $X \cup p$ and $(Y-y) \cup p$, respectively. We shall show that $M_1 = M_X$ and $M_2 = M_Y\downarrow y$. First observe that, for $A \subseteq X$, we have \begin{equation*} r_{M_1}(A) = \begin{cases} r_{M\downarrow y}(A), & \text{if $p \not\in A$;}\\ r_{M\downarrow y}((A-p) \cup (Y- y)) - r_{M\downarrow y}(Y-y) + 1, & \text{if $p \in A$.} \end{cases} \end{equation*} Since $r(X \cup y) > r(X)$, we see that if $p \not\in A$, then $r_{M_1}(A) = r_{M\downarrow y}(A) = r_M(A) = r_{M_X}(A)$. Now suppose $p \in A$. Assume $r((A-p) \cup (Y-y)) = r((A-p) \cup Y)$. Then $$r_{M\downarrow y}((A-p) \cup (Y-y)) = r((A-p) \cup (Y-y)) - 1 = r((A-p) \cup Y) - 1.$$ Moreover, $r_{M\downarrow y}(Y-y) = r(Y) -1.$ Hence $$r_{M_1}(A) = r_M((A-p) \cup Y) - r_M(Y) + 1 = r_{M_X}(A).$$ To show that $M_1 = M_X$, it remains to consider when $p \in A$ and $r((A-p) \cup (Y-y)) < r((A-p) \cup Y)$. Then, as $r(Y-y) \ge r(Y) - 1$, we deduce that $r((A-p) \cup (Y-y)) = r((A-p) \cup Y) - 1$, so $r(Y -y) = r(Y) - 1$. Thus we have \begin{eqnarray*} r_{M_1}(A) & = & r_{M\downarrow y}((A-p) \cup (Y-y)) - r_{M\downarrow y}(Y-y) + 1\\ & = & r((A-p) \cup (Y-y)) - r(Y-y) + 1\\ & = & r((A-p) \cup Y) - 1 - r(Y) + 1 + 1\\ & = & r_{M_X}(A). \end{eqnarray*} We conclude that $M_1 = M_X$. To show that $M_2 = M_Y\downarrow y$, suppose that $A \subseteq (Y-y) \cup p$. Now \begin{equation*} r_{M_2}(A) = \begin{cases} r_{M\downarrow y}(A), & \text{if $p \not\in A$;}\\ r_{M\downarrow y}((A-p) \cup X) - r_{M\downarrow y}(X) + 1, & \text{if $p \in A$.} \end{cases} \end{equation*} Suppose $p \not\in A$. Then \begin{align*} r_{M_2}(A) &= \begin{cases} r(A), & \text{if $r(A \cup y) > r(A)$;}\\ r(A) - 1, & \text{otherwise;} \end{cases}\\ &=r_{M_Y\downarrow y}(A). \end{align*} Now assume that $p \in A$. Then $r_{M\downarrow y}(X) = r(X)$. Thus \begin{equation*} r_{M_2}(A) = \begin{cases} r((A- p) \cup X) - r(X) + 1, & \text{if $r((A - p) \cup X \cup y) > r((A-p) \cup X)$;}\\ r((A- p) \cup X) - 1 - r(X) + 1, & \text{otherwise.} \end{cases} \end{equation*} Moreover, \begin{equation*} r_{M_Y\downarrow y}(A) = \begin{cases} r_{M_Y}(A), & \text{if $r_{M_Y}(A\cup y) > r_{M_Y}(A)$;}\\ r_{M_Y}(A) - 1, & \text{otherwise.} \end{cases} \end{equation*} Now $r_{M_Y}(A) = r((A-p) \cup X) - r(X) + 1$. Thus \begin{align*} r_{M_Y}(A\cup y) - r_{M_Y}(A) &= r((A-p) \cup y \cup X) - r(X) + 1 - r((A-p) \cup X) \\ & \hspace*{2.5in}+r(X) -1\\ & = r((A-p) \cup y \cup X) - r((A-p) \cup X). \end{align*} We conclude that, when $p \in A$, we have $r_{M_Y\downarrow y}(A) = r_{M_2}(A)$. Thus $M_Y\downarrow y = M_2.$ Hence $M\downarrow y = P(M_X, M_Y\downarrow y) \backslash p$. Using (\ref{numbth}), it is straightforward to show that $\lambda_{M_Y\downarrow y}(\{p\}) = 1$. It follows that $M\downarrow y = M_X \oplus_2 (M_Y \downarrow y)$. \end{proof}
2,684
142,722
en
train
0.91.14
The following was shown by Hall~\cite[Corollary 3.5]{hall}. \begin{proposition} \label{connconn} Let $M_1$ and $M_2$ be $2$-polymatroids $(E_1,r_1)$ and $(E_2,r_2)$ where $E_1 \cap E_2 = \{p\}$ and $r_1(\{p\}) = r_2(\{p\}) = 1$ and each of $M_1$ and $M_2$ has at least two elements. Then the following are equivalent. \begin{itemize} \item[(i)] $M_1$ and $M_2$ are both $2$-connected; \item[(ii)] $M_1 \oplus_2 M_2$ is $2$-connected; \item[(iii)] $P(M_1,M_2)$ is $2$-connected. \end{itemize} \end{proposition} One situation that will often occur will be when we have a certain $3$-connected $2$-polymatroid $N$ arising as a c-minor of a $2$-polymatroid $M$ that has a $2$-separation. Recall that a special $N$-minor of $M$ is a c-minor of $M$ that either equals $N$ or differs from $N$ by having a single point relabelled. \begin{lemma} \label{p49} Let $M$ be a $2$-polymatroid that can be written as the $2$-sum $M_X \oplus_2 M_Y$ of $2$-polymatroids $M_X$ and $M_Y$ with ground sets $X \cup p$ and $Y \cup p$, respectively. Let $N$ be a $3$-connected $2$-polymatroid with $|E(N)| \ge 4$ and $E(N) \subseteq E(M)$. If $M_X$ has a special $N$-minor, then $M$ has a special $N$-minor. \end{lemma} \begin{proof} Since $M_X\backslash p = M\backslash Y$ and $M_X/p = M/Y$, we may assume that the special $N$-minor of $M_X$ uses $p$. Hence every other element of the special $N$-minor of $M_X$ is in $E(N)$. For $y$ in $Y$, we will denote by $M_X(y)$ the $2$-polymatroid that is obtained from $M_X$ by relabelling $p$ by $y$. We argue by induction on $|Y|$. Suppose $|Y| = 1$ and let $y$ be the element of $Y$. If $y$ is a point, then the result is immediate since $M = M_X(y)$. If $y$ is a line, then compactifying this line gives $M_X(y)$ and again the result holds. Now suppose that $|Y| > 1$ and choose $y$ in $Y$. Suppose $\sqcap(\{y\},X) = 1$, which is certainly true if $|Y| = 1$. Then $M|(X \cup y) = M_X(y)$ if $y$ is a point. If $y$ is a line, then compactifying $y$ in $M|(X \cup y)$ gives $M_X(y)$. In each case, the result holds. We may now assume that $|Y| > 1$ and $\sqcap(\{y\},X) = 0$. Then, by Lemma~\ref{dennisplus}(iv), $M/y = M_X \oplus_2 (M_Y/y)$ so the result follows by induction. \end{proof} \begin{lemma} \label{p69} Let $M$ be a $2$-polymatroid that can be written as the $2$-sum $M_X \oplus_2 M_Y$ of $2$-polymatroids $M_X$ and $M_Y$ with ground sets $X \cup p$ and $Y \cup p$, respectively. Let $N$ be a $3$-connected $2$-polymatroid with $|E(N)| \ge 4$ such that $N$ is a c-minor of $M$. If $|E(N) \cap X| \ge |E(N)| - 1$, then $M_X$ has a special $N$-minor that uses $E(N) \cap X$. \end{lemma} \begin{proof} As $N$ is a c-minor of $M$, it follows by Corollary~\ref{complast3} that $N$ can be obtained from $M$ by a sequence of deletions and contractions followed by one compactification at the end. Let $N_1$ be the $2$-polymatroid that is obtained prior to the last compactification. We know that we can shuffle these deletions and contractions at will. In producing $N_1$ from $M$, let $C_Y$ and $D_Y$ be the sets of elements of $Y$ that are contracted and deleted, respectively. Suppose $\sqcap(X,C_Y) = 1$. Now $M = P(M_X,M_Y) \backslash p$. Consider $P(M_X,M_Y) /C_Y\backslash D_Y$. This has $p$ as a loop, so $P(M_X,M_Y) \backslash p /C_Y\backslash D_Y = P(M_X,M_Y)/ p /C_Y\backslash D_Y$. Since $P(M_X,M_Y)/ p= (M_X/p) \oplus (M_Y/p)$, we deduce that $Y = D_Y \cup C_Y$, so $N_1$ is a c-minor of $M_X/p$. Thus $N$ is a c-minor of $(M_X)^{\flat}$ and hence of $M_X$. We may now assume that $\sqcap(X,C_Y) = 0$. Suppose $Y \cap E(N) = \emptyset$. Then $M\backslash D_Y/C_Y = M\backslash Y = M_X\backslash p$. Hence $N_1$ is a c-minor of $M_X$. As we can perform a compactification whenever we want, we deduce that $N$ is a c-minor of $(M_X)^{\flat}$. It remains to consider the case when $Y \cap E(N)$ consists of a single element, $y$. In $M/C_Y\backslash D_Y$, we must have $\sqcap(X,\{y\}) = 1$, otherwise $\sqcap(X,\{y\}) = 0$ and $\{y\}$ is 1-separating in $N_1$ and hence in $N$; a contradiction. We deduce that, in $M_Y/C_Y\backslash D_Y$, the element $y$ is either a point parallel to the basepoint $p$ or a line through $p$. In the latter case, $(M/C_Y\backslash D_Y)^{\flat}$ is $(M_X(y))^{\flat}$ where $M_X(y)$ is obtained from $M_X$ by relabelling $p$ by $y$. In both cases, $(M_X(y))^{\flat}$ has $N$ as a c-minor so $(M_X)^{\flat}$ and hence $M_X$ has a special $N$-minor. \end{proof} \begin{lemma} \label{useful} Let $p$ be a point in a $2$-polymatroid $P$ having ground set $E$. If $\sqcap(p,E-p) = 1$, then $P$ has as a minor a $2$-element $2$-connected $2$-polymatroid using $p$. \end{lemma} \begin{proof} We argue by induction on $|E-p|$. the result is certainly true if $|E-p| = 1$. Assume it true for $|E-p| < n$ and let $|E-p| = n$. If $E-p$ contains an element $z$ such that $\sqcap(p,z) = 1$, then the result is immediate. Thus $E- p$ contains an element $z$ such that $\sqcap (p,z) = 0$. Then $\sqcap_{P/z}(p,E - \{p,z\}) = r(p) + r(E-p) - r(P) = 1.$ Thus, by the induction assumption, $P/z$ and hence $P$ has, as a minor, a $2$-element $2$-connected $2$-polymatroid using $p$. \end{proof} \begin{lemma} \label{claim1} Let $(X,Y)$ be an exact $2$-separation of a $2$-polymatroid $M$ and let $N$ be a $3$-connected $2$-polymatroid that is a c-minor of $M$. Suppose that $|X - E(N)| \le 1$ and $y \in Y$. \begin{itemize} \item[(i)] If $\sqcap_{M\backslash y}(X,Y-y) = 1$, then $M\backslash y$ has a special $N$-minor. \item[(ii)] If $\sqcap_{M/ y}(X,Y-y) = 1$, then $M/y$ has a special $N$-minor. \item[(iii)] If $\sqcap_{M\downarrow y}(X,Y-y) = 1$, then $M\downarrow y$ has a special $N$-minor. \end{itemize} \end{lemma} \begin{proof} By Lemma~\ref{dennisplus}, $M = M_X \oplus_2 M_Y$ where $M_X$ and $M_Y$have ground sets $X \cup p$ and $Y\cup p$, respectively. By Lemma~\ref{p69}, $M_X$ has a special $N$-minor using $E(N) \cap X$. Suppose $\sqcap_{M\backslash y}(X,Y-y) = 1$. Then $\sqcap_{M_Y}(\{p\},Y-y) = 1$. Thus, by Lemma~\ref{useful}, $M_Y\backslash y$ has as a minor a $2$-polymatroid with ground set $\{p,z\}$ for some $z$ in $Y-y$ where either $p$ and $z$ are parallel points, or $z$ is a line and $p$ is a point on this line. It follows that $(M \backslash y)^{\flat}$ has as a c-minor the $2$-polymatroid that is obtained from $(M_X)^{\flat}$ by relabelling $p$ by $z$. Hence $M\backslash y$ has a special $N$-minor and (i) holds. Now suppose that $\sqcap_{M/ y}(X,Y-y) = 1$. Then, by Lemma~\ref{obs1}, $\sqcap(X,\{y\}) = 0$. Thus, by Lemma~\ref{dennisplus}(iv), $M/y = M_X \oplus_2 (M_Y/y)$. Then, by replacing $M_Y\backslash y$ by $M_Y/ y$ in the argument in the previous paragraph, we deduce that (ii) holds. Finally, suppose that $\sqcap_{M\downarrow y}(X,Y-y) = 1$. Assume first that $r(\{y\}) \le 1$. Then $M\downarrow y = M/y$, so $\sqcap_{M/ y}(X,Y-y) = 1$, and the result follows by (ii). Now let $y$ be a line of $M$. Then, by Lemma~\ref{dennisplus}(vi), $M\downarrow y = M_X \oplus_2 (M_Y \downarrow y)$. Again, by replacing $M_Y \backslash y$ by $M_Y \downarrow y$ in the argument in the first paragraph, we get that (iii) holds. \end{proof} \begin{lemma} \label{switch} Let $Q$ be a $2$-polymatroid having $k$ and $\ell$ as distinct elements and suppose that $\ell$ is a $2$-separating line. Then $$Q {\underline{\downarrow}\,} \ell \downarrow k = Q \downarrow k {\underline{\downarrow}\,} \ell.$$ \end{lemma} \begin{proof} The result is easily checked if $\lambda(\ell) = 0$, so assume that $\lambda(\ell) = 1$. Then, by Lemma~\ref{dennisplus}, $Q = P(Q_1,Q_2)\backslash p$ for some $2$-polymatroids $Q_1$ and $Q_2$ with ground sets $(E(Q) - \ell) \cup p$ and $\{\ell, p\}$ where $Q_2$ consists of the line $\ell$ with the point freely placed on it. Moreover, either \begin{itemize} \item[(i)] $k$ is a point that is parallel to $p$ in $Q_1$; or \item[(ii)] $Q \downarrow k = P(Q_1 \downarrow k,Q_2)\backslash p$. \end{itemize} Consider the first case. Then $Q\downarrow k = Q/k$ and $Q\downarrow k {\underline{\downarrow}\,} \ell$ can be obtained from $Q_1/p$ by adjoining $\ell$ as a loop. On the other hand, $Q {\underline{\downarrow}\,} \ell$ can be obtained from $Q_1$ by relabelling $p$ as $\ell$. Thus $Q {\underline{\downarrow}\,} \ell \downarrow k$, which equals $Q {\underline{\downarrow}\,} \ell/k$, can be obtained from $Q_1/p$ by adjoining $\ell$ as a loop. Hence the result holds in case (i). Now suppose (ii) holds. Then $Q \downarrow k {\underline{\downarrow}\,} \ell$ can be obtained from $Q_1 \downarrow k$ by relabelling $p$ as $\ell$. On the other hand, $Q{\underline{\downarrow}\,} \ell$ can be obtained from $Q_1$ by relabelling $p$ as $\ell$. Hence $Q{\underline{\downarrow}\,} \ell \downarrow k$ can be obtained from $Q_1\downarrow k$ by relabelling $p$ as $\ell$. Thus the lemma holds. \end{proof} We end this section with three lemmas concerning $2$-element prickly $3$-separators. \begin{lemma} \label{portia} Let $\{j,k\}$ be a prickly $3$-separator in a $3$-connected $2$-polymatroid $M$. Then $M\downarrow j$ and $M\downarrow k$ are $3$-connected. \end{lemma} \begin{proof} It suffices to show that $M\downarrow j$ is $3$-connected. We form $M\downarrow j$ by freely adding a point $j'$ to $j$, deleting $j$, and contracting $j'$. As $M$ is $3$-connected, so is the $2$-polymatroid $M'$ we get by adding $j'$. Now $M\downarrow j = M'\backslash j/j'$. Assume this $2$-polymatroid is not $3$-connected, letting $(U,V)$ be an $m$-separation of it for some $m$ in $\{1,2\}$. Then $$r_{M'/j'}(U) + r_{M'/j'}(V) = r(M'/j') + m-1.$$ Thus $$r_{M'}(U\cup j') + r_{M'}(V\cup j') = r(M') + m.$$ Without loss of generality, $k \in V$. Then $r_{M'}(V\cup j') = r_M(V \cup j)$ and $r_{M'}(U\cup j') = r_M(U)+ 1$. Therefore $$r_{M}(U) + r_{M}(V\cup j) = r(M) + m- 1.$$ As $M$ is $3$-connected, we deduce that $m = 2$. Then $\max\{|U|, r_{M'/j'}(U)\} \ge 2$. Hence $(U, V \cup j)$ is a $2$-separation of $M$; a contradiction. \end{proof}
4,029
142,722
en
train
0.91.15
\begin{lemma} \label{switch} Let $Q$ be a $2$-polymatroid having $k$ and $\ell$ as distinct elements and suppose that $\ell$ is a $2$-separating line. Then $$Q {\underline{\downarrow}\,} \ell \downarrow k = Q \downarrow k {\underline{\downarrow}\,} \ell.$$ \end{lemma} \begin{proof} The result is easily checked if $\lambda(\ell) = 0$, so assume that $\lambda(\ell) = 1$. Then, by Lemma~\ref{dennisplus}, $Q = P(Q_1,Q_2)\backslash p$ for some $2$-polymatroids $Q_1$ and $Q_2$ with ground sets $(E(Q) - \ell) \cup p$ and $\{\ell, p\}$ where $Q_2$ consists of the line $\ell$ with the point freely placed on it. Moreover, either \begin{itemize} \item[(i)] $k$ is a point that is parallel to $p$ in $Q_1$; or \item[(ii)] $Q \downarrow k = P(Q_1 \downarrow k,Q_2)\backslash p$. \end{itemize} Consider the first case. Then $Q\downarrow k = Q/k$ and $Q\downarrow k {\underline{\downarrow}\,} \ell$ can be obtained from $Q_1/p$ by adjoining $\ell$ as a loop. On the other hand, $Q {\underline{\downarrow}\,} \ell$ can be obtained from $Q_1$ by relabelling $p$ as $\ell$. Thus $Q {\underline{\downarrow}\,} \ell \downarrow k$, which equals $Q {\underline{\downarrow}\,} \ell/k$, can be obtained from $Q_1/p$ by adjoining $\ell$ as a loop. Hence the result holds in case (i). Now suppose (ii) holds. Then $Q \downarrow k {\underline{\downarrow}\,} \ell$ can be obtained from $Q_1 \downarrow k$ by relabelling $p$ as $\ell$. On the other hand, $Q{\underline{\downarrow}\,} \ell$ can be obtained from $Q_1$ by relabelling $p$ as $\ell$. Hence $Q{\underline{\downarrow}\,} \ell \downarrow k$ can be obtained from $Q_1\downarrow k$ by relabelling $p$ as $\ell$. Thus the lemma holds. \end{proof} We end this section with three lemmas concerning $2$-element prickly $3$-separators. \begin{lemma} \label{portia} Let $\{j,k\}$ be a prickly $3$-separator in a $3$-connected $2$-polymatroid $M$. Then $M\downarrow j$ and $M\downarrow k$ are $3$-connected. \end{lemma} \begin{proof} It suffices to show that $M\downarrow j$ is $3$-connected. We form $M\downarrow j$ by freely adding a point $j'$ to $j$, deleting $j$, and contracting $j'$. As $M$ is $3$-connected, so is the $2$-polymatroid $M'$ we get by adding $j'$. Now $M\downarrow j = M'\backslash j/j'$. Assume this $2$-polymatroid is not $3$-connected, letting $(U,V)$ be an $m$-separation of it for some $m$ in $\{1,2\}$. Then $$r_{M'/j'}(U) + r_{M'/j'}(V) = r(M'/j') + m-1.$$ Thus $$r_{M'}(U\cup j') + r_{M'}(V\cup j') = r(M') + m.$$ Without loss of generality, $k \in V$. Then $r_{M'}(V\cup j') = r_M(V \cup j)$ and $r_{M'}(U\cup j') = r_M(U)+ 1$. Therefore $$r_{M}(U) + r_{M}(V\cup j) = r(M) + m- 1.$$ As $M$ is $3$-connected, we deduce that $m = 2$. Then $\max\{|U|, r_{M'/j'}(U)\} \ge 2$. Hence $(U, V \cup j)$ is a $2$-separation of $M$; a contradiction. \end{proof} \begin{lemma} \label{pricklytime} The set $\{j,k\}$ is a prickly $3$-separator of the $2$-polymatroid $M$ if and only if it is a prickly $3$-separator in $M^*$. \end{lemma} \begin{proof} Suppose $\{j,k\}$ is a prickly $3$-separator of $M$. By Lemma~\ref{compact0}, $\lambda_{M^*}(\{j,k\}) = \lambda_{M}(\{j,k\}) = 2$. Moreover, it is straightforward to check that $r_{M^*}(\{j\}) = 2 = r_{M^*}(\{k\})$, that $r_{M^*}(\{j,k\}) = 3$, and that $\sqcap_{M^*}(\{j\},E-\{j,k\}) = 1 = \sqcap_{M^*}(k,E-\{j,k\})$. Hence $\{j,k\}$ is a prickly $3$-separator of $M^*$. Conversely, suppose that $\{j,k\}$ is a prickly $3$-separator of $M^*$. Then, by what we have just shown, $\{j,k\}$ is a prickly $3$-separator of $(M^*)^*$, that is, of $M^{\flat}$. Now $2 = \lambda_{M^{\flat}}(\{j,k\}) = \lambda_{M}(\{j,k\})$. Moreover, since $r_{M^{\flat}}(\{j\}) = 2$, it follows that $\lambda(\{j\}) = 2$, so $r(\{j\}) = 2$ and $r(E-j) = r(E)$. Similarly, $\lambda(\{k\}) = 2 = r(\{k\})$ and $r(E-k) = r(E)$. It follows, since $r_{M^{\flat}}(\{j,k\}) = 3$, that $r(\{j,k\}) = 3$. By using the fact that $\sqcap_{M^{\flat}}(\{j\},E-\{j,k\}) = 1 = \sqcap_{M^{\flat}}(\{k\},E-\{j,k\})$, it is not difficult to check that $\sqcap(\{j\},E-\{j,k\}) = 1 = \sqcap(\{k\},E-\{j,k\})$. We conclude that $\{j,k\}$ is a prickly $3$-separator of $M$, so the lemma holds. \end{proof} \begin{lemma} \label{ess3} Let $\{j,k\}$ be a prickly $3$-separator in a $2$-polymatroid $P$. Then \begin{itemize} \item[(i)] $P\downarrow k \backslash j = P\backslash k,j$; and \item[(ii)] $P\downarrow k / j = P/ k,j$. \end{itemize} \end{lemma} \begin{proof} Suppose $X \subseteq E(P) - \{j,k\}$. Then $r_{P\downarrow k}(X) = r_P(X)$ as $r(X \cup k) > r(X)$. Thus (i) holds. To see (ii), observe that $r_{P\downarrow k /j}(X) = r_{P\downarrow k}(X \cup j) - r(\{j\})$ since $r(\{j,k\}) > r(\{j\})$. Now, as $\sqcap(\{j\},\{k\}) = 1$, we deduce that $r(X \cup j) \le r(X \cup j \cup k) \le r(X \cup j) +1$. Thus \begin{equation*} r_{P\downarrow k}(X \cup j) = \begin{cases} r(X \cup j), & \text{if $r(X \cup j \cup k) = r(X \cup j) + 1$;}\\ r(X \cup j) - 1, & \text{if $r(X \cup j \cup k) = r(X \cup j)$.} \end{cases} \end{equation*} Hence $r_{P\downarrow k}(X \cup j) = r(X \cup j \cup k) - 1.$ Thus $r_{P\downarrow k /j}(X) = r(X \cup j \cup k) - 3 = r_{P/k,j}(X)$, so (ii) holds. \end{proof}
2,195
142,722
en
train
0.91.16
\section{The strategy of the proof} \label{strat} The proof of Theorem~\ref{mainone} is long and will occupy the rest of the paper. In this section, we outline the steps in the proof. We shall assume that the theorem fails for $M$. Hence $|E(M)| \ge |E(N)| + 2$. As $|E(N)| \ge 4$, we deduce that $|E(M)| \ge 6$. We know that $M$ has $N$ as an s-minor. This means, of course, that $N$ can be obtained from $M$ by a sequence of contractions, deletions accompanied by compactifications, and series compressions. Our first goal will be to prove the following. \begin{lemma} \label{endtime} The $2$-polymatroid $M$ has an s-minor that is isomorphic to $N$ such that, in the production of this s-minor, all of the series compressions are done last in the process. \end{lemma} Next we focus on the c-minor $N_0$ of $M$ that is obtained in the above process after all of the contractions and compactified deletions are done but before doing any of the series compressions. By Lemma~\ref{elemprop25}, $N_0$ is $3$-connected. In view of this, we see that, to prove Theorem~\ref{mainone}, it suffices to prove Theorem~\ref{modc0}, which we restate here for the reader's convenience. \begin{theorem} \label{modc} Let $M$ and $N$ be distinct $3$-connected $2$-polymatroids such that $N$ is a c-minor of $M$ and $|E(N)| \ge 4$. Then \begin{itemize} \item[(i)] $r(M) \ge 3$ and $M$ is a whirl or the cycle matroid of a wheel; or \item[(ii)] $M$ has an element $\ell$ such that $M\backslashba \ell$ or $M/\ell$ is $3$-connected having a c-minor isomorphic to $N$; or \item[(iii)] $M$ has a prickly $3$-separator $\{y,z\}$ such that $M\downarrow y$ is $3$-connected having a c-minor isomorphic to $N$. \end{itemize} \end{theorem} Our focus then becomes proving Theorem~\ref{modc}. For the rest of this section, we assume that the pair $(M,N)$ a counterexample to that theorem. The first two steps in the argument, whose proofs appear in Section~\ref{edlp}, are as follows. \begin{lemma} \label{Step0} $M$ has no point $z$ such that both $M\backslashba z$ and $M/z$ have c-minors isomorphic to $N$. \end{lemma} \begin{lemma} \label{Step1} $M$ has no element $\ell$ such that $M\backslash \ell$ or $M/ \ell$ is disconnected having a c-minor isomorphic to $N$. \end{lemma} Note that the use of $\ell$ above, and in what follows, does not imply that $\ell$ is a line, although most of our attention will be focused on that case. Now $N$ occurs as a c-minor of $M$. Although we will often work with c-minors of $M$ that are isomorphic to $N$, at a certain point in the argument, we will settle on a particular labelled c-minor of $M$ that is isomorphic to $N$. When $M$ has $N$ as a c-minor and has a 2-separation $(X,Y)$, either $X$ or $Y$, say $X$, contains at least $|E(N)| - 1$ elements of $N$. We call $X$ the {\it $N$-side} of the 2-separation and $Y$ the {\it non-$N$-side}. Suppose $M\backslashba \ell$ has $N$ as a c-minor. Because the theorem fails, $M\backslashba \ell$ is not $3$-connected. Now, by Lemma~\ref{compact0}(iii), $\lambda_{M\backslashba \ell} = \lambda_{M\backslash \ell}$. Thus a partition $(X,Y)$ of $E - \ell$ with $\min\{|X|,|Y|\} \ge 2$ is a 2-separation of $M\backslashba \ell$ if and only if it is a 2-separation of $M\backslash \ell$. It follows that we can label the $N$- and non-$N$-sides of a non-trivial $2$-separation of $M\backslash \ell$ based on their labels in the corresponding $2$-separation of $M\backslashba \ell$. Among all $2$-separations of $M\backslashba \ell$, let the maximum cardinality of the non-$N$-side be $\mu(\ell)$. Similarly, if $M/\ell$ has $N$ as a c-minor, let $\mu^*(\ell)$ be the maximum cardinality of the non-$N$-side of a $2$-separation of $M/\ell$. We observe that $\mu(\ell)$ and $\mu^*(\ell)$ are not defined unless $M\backslashba \ell$ and $M/ \ell$, respectively, have $N$ as a c-minor. The next step in the argument establishes the following. \begin{noname} \label{Step2.2} $M$ has no element $\ell$ for which $\mu(\ell) = 2$ or $\mu^*(\ell)= 2$. \end{noname} The argument for (\ref{Step2.2}) is quite long since it involves a detailed analysis of the various structures that can arise on the non-$N$-side when $\mu(\ell) = 2$. We then use duality to eliminate the cases when $\mu^*(\ell)= 2$. These arguments appear in Section~\ref{alltwos}. Recall that a special $N$-minor of $M$ is any c-minor of $M$ that is either equal to $N$ or differs from $N$ by having a single point relabelled. The next major step in the argument, which is dealt with in Lemma~\ref{bubbly}, proves the following. \begin{noname} \label{Step3} If $(X,Y)$ is a $2$-separation of $M\backslash \ell$ where $X$ is the $N$-side and $|Y| = \mu(\ell)$, then $Y$ contains an element $y$ such that both $M\backslashba y$ and $M/ y$ have special $N$-minors. \end{noname} In Lemma~\ref{nonN}, we use the element found in the last step to prove the following. \begin{noname} \label{Step4} There is a c-minor $N'$ of $M$ that is isomorphic to $N$ such that $M$ has a $3$-separator $(X,Y)$ with $|E(N') \cap Y| \le 1$ such that if $|Y| = 2$, then both elements of $Y$ are lines. \end{noname} The particular c-minor $N'$ whose existence is proved in (\ref{Step4}) is the one used throughout the rest of the argument. From that point on in the argument, we use $N$ to denote $N'$. An exactly 3-separating set $Y$ is called a {\it non-$N$-$3$-separator} if $|E(N) \cap Y| \le 1$ and, when $|Y| = 2$, both elements of $Y$ are lines. By (\ref{Step4}), a non-$N$-$3$-separator exists. Hence there is a minimal such set. At the beginning of Section~\ref{bigtime}, we prove that \begin{noname} \label{Step5} $M$ has a minimal non-$N$-$3$-separator with at least three elements. \end{noname} The rest of Section~\ref{bigtime} is devoted to showing the following. \begin{noname} \label{Step5.5} A minimal non-$N$-$3$-separator of $M$ with exactly three elements consists of three lines. \end{noname} The purpose of Section~\ref{threeel} is to prove that \begin{noname} \label{Step6} $M$ has a minimal non-$N$-$3$-separator with at least four elements. \end{noname} The argument to show (\ref{Step6}) is quite long since it involves treating all non-$N$-$3$-separators that consist of exactly three lines. We say that an element $\ell$ of $M$ is {\it doubly labelled} if both $M\backslash \ell$ and $M/\ell$ have special $N$-minors. The next step, which is shown in Section~\ref{fourel}, establishes the following. \begin{noname} \label{Step7} If $Y_1$ is a minimal non-$N$-$3$-separator of $M$ with at least four elements, then $Y_1$ contains a doubly labelled element. \end{noname} Next we take the doubly labelled element $\ell$ identified in the last step. We then take non-trivial $2$-separations $(D_1,D_2)$ and $(C_1,C_2)$ of $M\backslash \ell$ and $M/ \ell$, respectively, having $D_1$ and $C_1$ as their $N$-sides. We show that these 2-separations can be chosen so that each of $D_2$ and $C_2$ is contained in $Y_1 - \ell$, and neither contains any points of $M$. We then show that each of $D_1\cap C_2, D_2 \cap C_1$, and $D_2 \cap C_2$ consists of a single line of $M$, that the union of these lines spans $\ell$, and these four lines together make up $Y_1$. The final contradiction is obtained by showing that $M/\ell_{22}$ is $3$-connected\ having a c-minor isomorphic to $N$, where $\ell_{22}$ is the unique element in $D_2 \cap C_2$.
2,553
142,722
en
train
0.91.17
\section{The reduction to c-minors} \label{redc} \setcounter{theorem}{1} The goal of this section is to prove Lemma~\ref{endtime} and thereby show that Theorem~\ref{mainone} can be proved by verifying Theorem~\ref{modc}. \begin{proof}[Proof of Lemma~\ref{endtime}.] Consider the s-minors of $M$ that are isomorphic to $N$ and are obtained using the minimum number of series compressions. Suppose $N_1$ is such an s-minor and let the number of series compressions used in its production be $m$. If $m = 0$, then $N_1$ is an s-minor of $M$ satisfying the requirements of the lemma. Hence we may assume that $m > 0$. Let $n_1$ be the number of elements that are removed after the last series compression has been completed. For $2 \le i \le m$, let $n_i$ be the number of elements that are removed via deletion or contraction between the $(m-i+1)$st and the $(m-i+2)$nd series compressions. Consider the sequence $(n_1,n_2,\dots,n_m)$ and let $N_0$ be a choice for $N_1$ for which the corresponding sequence is lexicographically minimal. If each $n_i$ is zero, then we have found, as desired, an s-minor of $M$ in which all of the series compressions are performed after all of the contractions and compactified deletions. Assume then that $n_i$ is the first non-zero $n_j$. Let $P$ be the $2$-polymatroid that we have immediately prior to the $(m-i+1)$st series compression, with this series compression involving compressing the line $k$ from the prickly $3$-separator $\{j,k\}$ of $P$. Let $Q$ be the 2-polymatroid we have immediately prior to the $(m - i +2)$nd series compression. By Lemma~\ref{ess3}, we may assume that $j$ is neither deleted or contracted in producing $N_0$ otherwise we can replace the compression of $k$ by a deletion followed by a compactification or by a contraction. By Lemma~\ref{elemprop}, we may assume that either \begin{itemize} \item[(a)] all of the elements removed in producing $Q$ from $P\downarrow k$ are done so by deletion followed by compactification; or \item[(b)] the next move in the production of $Q$ is the contraction of an element, say $y$. \end{itemize} Assume that (b) holds. By Lemma~\ref{pricklytime0}, $P\downarrow k /y = P/y \downarrow k.$ Assume that $\{j,k\}$ is not a prickly $3$-separator of $P/y$. We now apply Lemma~\ref{elemprop24}. If $r(\{y,j,k\}) = 3$, then $j$ is a loop of $P\downarrow k/y$ so $j$ must be deleted or contracted to produce $N_0$; a contradiction. If $P\downarrow k/y$ is $P/y\backslashba k$ or $P/y/k$, then we do not need to compress $k$ in the production of $N_0$, so the choice of $N_0$ is contradicted. We are left with the possibility that $P\downarrow k/y$ can be obtained from $P/y\backslashba j$ by relabelling $k$ as $j$. Again we obtain the contradiction\ that we can reduce the number of series compressions where, if $j \in E(N_0)$, we replace $N_0$ by the 2-polymatroid in which $j$ is relabelled by $k$. We conclude that $\{j,k\}$ is a prickly $3$-separator of $P/y$. In that case, interchanging the compression of $k$ and the contraction of $y$ in $P$ produces a $2$-polymatroid in which $n_i$ is reduced and so the choice of $N_0$ is contradicted. We deduce that (b) does not hold, so (a) holds. In the construction of $N_0$, let $y$ be the first element that is deleted following the compression of $k$. Now, by Lemma~\ref{pricklytime0}, $P\downarrow k \backslashba y = (P\downarrow k \backslash y)^{\flat} = (P\backslash y \downarrow k)^{\flat}.$ As $P$ is compact, $r(E - y) = r(E)$. If $r(E - \{y,j,k\}) = r(E) - 3$, then $P\backslash y$ has $\{j,k\}$ as a $1$-separating set. This is a contradiction as $j$ cannot be deleted or contracted in the production of $N_0$ from $P$. Hence \begin{equation} \label{2down} r(E - \{y,j,k\}) \ge r(E) - 2. \end{equation} Let $S$ be the set of $2$-separating lines in $P\backslash y$. Clearly no member of $S - k$ is parallel to $k$. We show next that \begin{sublemma} \label{newjk} $S \cap \{j,k\} \neq \emptyset.$ \end{sublemma} Suppose, instead, that neither $j$ nor $k$ is in $S$. Then, by Lemma~\ref{atlast2}, $S$ is the set of $2$-separating lines of $P\backslash y \downarrow k$. Let $S = \{\ell_1,\ell_2,\dots,\ell_t\}$. Then $$P\backslashba y \downarrow k = P\backslash y {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t \downarrow k.$$ Thus, by repeated application of Lemma~\ref{switch} and using Lemma~\ref{pricklytime0}, we see that \begin{align*} P\backslashba y \downarrow k & = P\backslash y \downarrow k {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t \\ & = P\downarrow k \backslash y {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t \\ & = P\downarrow k \backslashba y. \end{align*} To prevent us from being able to reduce $n_i$, we must have that \begin{sublemma} \label{newjk2} $\{j,k\}$ is not a prickly $3$-separator of $P\backslashba y$. \end{sublemma} Continuing with the proof of \ref{newjk}, suppose $\lambda_{P\backslash y}(\{j,k\}) = 1$. Then $P\backslash y$ is the $2$-sum with basepoint $p$ of two $2$-polymatroids $P_1$ and $P_2$ having ground sets $(E - \{y,j,k\}) \cup p$ and $\{j,k,p\}$, respectively. Since neither $j$ nor $k$ is 2-separating in $P\backslash y$, it follows that, in the rank-$3$ $2$-polymatroid $P_2$, the point $p$ does not lie on either of the lines $j$ or $k$. By Lemma~\ref{dennisplus}(iv), $P\backslash y \downarrow k = P_1 \oplus (P_2 \downarrow k)$. Now $P_2 \downarrow k$ consists of the line $j$ with the point $p$ lying on it. Hence $j$ is a $2$-separating line of $P\backslash y \downarrow k$, so $S \cup j$ is the set of $2$-separating lines of $P\backslash y \downarrow k$. Since $P_2 \downarrow k{\underline{\downarrow}\,} j = P_2 /k$, we deduce that $P\backslash y \downarrow k{\underline{\downarrow}\,} j = P\backslash y/k$. It follows that $S$ is the set of 2-separating lines of $P\backslash y/k$. Thus \begin{align*} P\downarrow k \backslashba y & = (P\backslash y \downarrow k)^{\flat}\\ & = P\backslash y \downarrow k {\underline{\downarrow}\,} j {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t \\ & = (P\backslash y \downarrow k {\underline{\downarrow}\,} j) {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t \\ & = P \backslash y /k {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t\\ & = P/k \backslash y {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t\\ & = P/k\backslashba y. \end{align*} We conclude that, instead of compressing $k$, we can contract it, which contradicts that choice of $N_0$. We conclude that $\lambda_{P\backslash y}(\{j,k\}) = 2$. Moreover, $\sqcap(j, E-\{y,j,k\}) = 1 = \sqcap(k, E-\{y,j,k\})$ since neither $j$ nor $k$ is in $S$. Thus $\{j,k\}$ is a prickly $3$-separator of $P\backslash y$. It follows without difficulty that $\{j,k\}$ is a prickly $3$-separator of $P\backslashba y$; a contradiction to \ref{newjk2}. We conclude that \ref{newjk} holds. We now know that $j$ or $k$ is in $S$. Suppose next that both $j$ and $k$ are in $S$. Thus $r(E - \{y,j\}) = r(E) - 1 = r(E - \{y,k\})$. By submodularity and (\ref{2down}), we deduce that $r(E - \{y,j,k\}) = r(E) - 2$. Hence $P\backslash y$ is the $2$-sum with basepoint $p$ of two $2$-polymatroids $P_1$ and $P_2$ having ground sets $(E - \{y,j,k\}) \cup p$ and $\{j,k,p\}$, respectively. Moreover, in $P_2$, the point $p$ lies on both $j$ and $k$. Now $P\backslashba y = P\backslash y {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t {\underline{\downarrow}\,} j {\underline{\downarrow}\,} k$. Hence $P\backslashba y$ has $j$ and $k$ as parallel points. Thus \begin{align*} P\downarrow k \backslashba y & = (P\backslash y \downarrow k)^{\flat}\\ & = P\backslash y \downarrow k {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t {\underline{\downarrow}\,} j \\ & = P \backslash y {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t {\underline{\downarrow}\,} j \downarrow k ~\text{~~~~by Lemma~\ref{switch};} \\ & = P \backslash y {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t {\underline{\downarrow}\,} j {\underline{\downarrow}\,} k \backslash k ~\text{~~~~as $Q\downarrow x = Q {\underline{\downarrow}\,} x \backslash x$ when $r(\{x\}) = 2$;} \\ & = P\backslashba y\backslash k\\ & = P\backslashba y\backslashba k, \end{align*} where the last step follows because $P\downarrow k \backslashba y$ is compact and so $P\backslashba y\backslash k$ is compact. Again we have a contradiction since we have managed to remove $k$ via deletion rather than by series compression. Now assume that $k$ is in $S$ but $j$ is not. Then \begin{align*} P\downarrow k \backslashba y & = (P\backslash y \downarrow k)^{\flat}\\ & = P\backslash y \ {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t \downarrow k \\ & = P \backslash y {\underline{\downarrow}\,} \ell_1 {\underline{\downarrow}\,} \ell_2{\underline{\downarrow}\,} \dots{\underline{\downarrow}\,} \ell_t {\underline{\downarrow}\,} k \backslash k\\ & = P\backslashba y \backslash k\\ & = P\backslashba y \backslashba k. \end{align*} Once again we have managed to avoid the need to perform a series compression on $k$; a contradiction. Finally, suppose $j$ is in $S$ but $k$ is not. Then we use the fact that $P\downarrow j$ is $P\downarrow k$ with $j$ relabelled as $k$. The argument in the last paragraph yields a contradiction\ where, when $j \in E(N_0)$, we replace $N_0$ by the 2-polymatroid in which $j$ is labelled as $k$. \end{proof}
3,668
142,722
en
train
0.91.18
\section{Eliminating doubly labelled points} \label{edlp} In this section, we prove that, when $(M,N)$ is a counterexample to Theorem~\ref{modc}, $M$ has no doubly labelled point and has no element whose deletion or contraction is disconnected having a c-minor isomorphic to $N$. The following elementary lemmas will be helpful. \begin{lemma} \label{helpful} Let $T$ be a set of three points in a $2$-polymatroid $Q$ and suppose $x \in T$. \begin{itemize} \item[(i)] If $T$ is a triangle of $Q$, then $\lambda_{Q/ x}(T-x) \le 1$. \item[(ii)] If $T$ is a triad of $Q$, then $\lambda_{Q\backslash x}(T-x) \le 1$. \end{itemize} \end{lemma} \begin{lemma} \label{tryto} Let $T_1$ and $T_2$ be distinct triads in a $2$-polymatroid $Q$. Then $r(E(Q) - (T_1 \cup T_2)) \le r(Q) - 2.$ \end{lemma} \begin{proof} We know that $r(E(Q) - T_i) = r(Q) - 1$ for each $i$. The lemma follows easily by applying the submodularity of the rank function. \end{proof} \begin{proof}[Proof of Lemma~\ref{Step0}.] Suppose $M$ has a point $z$ such that both $M\backslashba z$ and $M/z$ have c-minors isomorphic to $N$. Then neither $M\backslash z$ nor $M/z$ is $3$-connected. We may also assume that $M$ is neither a whirl nor the cycle matroid of a wheel. By \cite[Lemma 4.1]{oswww}, $M$ has points $s$ and $t$ such that $\{z,s,t\}$ is a triangle or a triad of $M$. By replacing $M$ by $M^*$ if necessary, we may assume that $\{z,s,t\}$ is a triangle of $M$. Then $M/z$ has $s$ and $t$ as a pair of parallel points. Thus both $M/z\backslash s$ and $M/z\backslash t$ have c-minors isomorphic to $N$. As the theorem fails, neither $M\backslash s$ nor $M\backslash t$ is $3$-connected. Thus, by \cite[Lemma 4.2(i)]{oswww}, $M$ has a triad that contains $z$ and exactly one of $s$ and $t$. We may assume that the triad is $\{z,s,u\}$. Then $t,z,s,u$ is a fan in $M$. Now take a fan $x_1,x_2,\dots,x_k$ in $M$ of maximal length such that both $M\backslash x_2$ and $M/x_2$ have c-minors isomorphic to $N$. Then $k \ge 4$. A straightforward induction argument, whose details we omit, gives the following. \begin{sublemma} \label{fant} For all $i$ in $\{2,3,\dots,k-1\}$, both $M\backslash x_i$ and $M/x_i$ have c-minors isomorphic to $N$. \end{sublemma} Now consider $\{x_{k-2},x_{k-1},x_k\}$. Suppose first that it is a triangle. As $M/x_{k-1}$ has a c-minor isomorphic to $N$, so do $M/x_{k-1}\backslash x_k$ and hence $M\backslash x_k$. As $M\backslash x_{k-1}$ also has a c-minor isomorphic to $N$, neither $M\backslash x_k$ nor $M\backslash x_{k-1}$ is $3$-connected. Thus, by \cite[Lemma 4.2]{oswww}, it follows that $M$ has a triad $T^*$ containing $x_k$ and exactly one of $x_{k-2}$ and $x_{k-1}$. Let its third element be $x_{k+1}$. By the choice of $k$, it follows that $x_{k+1} \in \{x_1,x_2,\dots,x_{k-3}\}$. Suppose $k = 4$. Then $x_1 \in T^*$. Then $\{x_1,x_2,x_3,x_4\}$ contains two distinct triads so, by Lemma~\ref{tryto}, $r(E - \{x_1,x_2,x_3,x_4\}) \le r(M) - 2$. Thus $\lambda(\{x_1,x_2,x_3,x_4\}) \le 1$; a contradiction since $|E| \ge 6$. We deduce that $k \ge 5$. As $M$ cannot have a triangle and a triad that meet in a single element, either \begin{itemize} \item[(i)] $x_{k+1} = x_1$ and $\{x_1,x_2,x_3\}$ is a triad; or \item[(ii)] $T^*$ contains $\{x_k,x_{k-2}\}$, and $x_{k+1} \in \{x_{k-3},x_{k-4}\}$. \end{itemize} In the latter case, let $X = \{x_{k-4},x_{k-3},x_{k-2},x_{k-1},x_k\}$. Then, by Lemma~\ref{tryto}, $$r(X) + r(E-X) - r(M) \le 3 + r(M) - 2 - r(M) = 1.$$ Since $M$ is $3$-connected, we obtain a contradiction\ unless $E-X$ is empty or contains a single element, which must be a point. In the exceptional case, $M$ is a $3$-connected matroid having 5 or 6 elements and containing a 5-element subset that contains two triangles and two triads. But there is no $3$-connected\ matroid with these properties. We deduce that (ii) does not hold. We now know that (i) holds and that $T^*$ contains $\{x_k,x_{k-1}\}$. Then $k$ is even. Let $X = \{x_1,x_2,\dots,x_k\}$. As $M \backslash x_2$ has a c-minor isomorphic to $N$ and has $\{x_1,x_3\}$ as a series pair of points, it follows that $M \backslash x_2/x_1$, and hence, $M/x_1$ has a c-minor isomorphic to $N$. Thus, by \cite[Lemma 4.2]{oswww}, $M$ has a triangle containing $x_1$ and exactly one of $x_2$ and $x_3$. This triangle must also contain $x_k$ or $x_{k-1}$. Hence $r(X) \le r(\{x_2,x_4,x_6,\dots,x_k\}) \le \tfrac{k}{2}.$ Also $r^*(X) \le r(\{x_1,x_3,x_5,\dots,x_{k-1}\})\le \tfrac{k}{2}.$ Thus, by Lemma~\ref{rr*}, $\lambda(X) = 0$, so $X = E(M)$. Hence $M$ is a $3$-connected\ matroid in which every element is in both a triangle and a triad, so $M$ is a whirl or the cycle matroid of a wheel; a contradiction. We still need to consider the case when $\{x_{k-2},x_{k-1},x_k\}$ is a triad of $M$. Then it is a triangle of $M^*$ and the result follows by replacing $M$ by $M^*$ in the argument above. \end{proof} \begin{proof}[Proof of Lemma~\ref{Step1}.] Suppose $M\backslash \ell$ is disconnected having a c-minor isomorphic to $N$. Then $E(M\backslash \ell)$ has a non-empty proper subset $X$ such that $\lambda_{M\backslash \ell}(X) = 0$ and $M\backslash \ell \backslash X$ has $N$ as a c-minor. Then, by Lemma~\ref{Step0}, every element of $X$ must be a line. Let $Y = E(M\backslash \ell) - X$. Since $r(M\backslash \ell) = r(M),$ we deduce that \begin{equation} \label{xym} r(X) + r(Y) = r(M). \end{equation} As $r(X) \ge 2$ and $(X, Y \cup \ell)$ is not a $2$-separation\ of $M$, we deduce that $r(Y \cup \ell) = r(Y) + 2$. It follows, since $Y$ and $\ell$ are skew and $M\backslash X$ has $N$ as a c-minor, that $M/ \ell$ has $N$ as a c-minor. Since $(M,N)$ is a counterexample to the theorem, $M/ \ell$ is not $3$-connected. Thus there is a partition $(C_1,C_2)$ of $E(M) - \ell$ such that, for some $k$ in $\{1,2\}$, \begin{equation} \label{eqk} r_{M/ \ell}(C_1) + r_{M/ \ell}(C_2) \le r(M/ \ell) + k-1 \end{equation} where, if $k = 2$, we may assume that $\min\{|C_1|, r_{M/ \ell}(C_1),|C_2|, r_{M/ \ell}(C_2)\} \ge 2$. Hence \begin{equation} \label{c12} r(C_1 \cup \ell) + r(C_2\cup \ell) \le r(M) + 3. \end{equation} By (\ref{xym}), (\ref{c12}), and submodularity, $r(X \cup C_1 \cup \ell) + r(X \cap C_1) + r(Y \cup C_2 \cup \ell) + r(Y \cap C_2) \le 2r(M) + 3$. Then $$r(X \cup C_1 \cup \ell) + r(Y \cap C_2) \le r(M) + 1 \text{~~ or}$$ $$r(Y \cup C_2 \cup \ell) + r(X \cap C_1) \le r(M) + 1,$$ so $$r(Y \cap C_2) \le 1 \text{~~or~~} r(X \cap C_1) \le 1.$$ By symmetry, $$r(Y \cap C_1) \le 1 \text{~~or~~} r(X \cap C_2) \le 1.$$ Since $X$ does not contain any points, either $r(Y \cap C_2) \le 1$ and $r(Y \cap C_1) \le 1$; or, for some $i$ in $\{1,2\}$, $$X \cap C_i = \emptyset \text{~~ and ~~} r(Y \cap C_i) \le 1.$$ In the former case, $ |Y| \le 2$; a contradiction\ since $Y$ contains $E(N)$. In the latter case, we may assume that $C_1$ consists of a single point $p$. Then we deduce that $k = 1$ in (\ref{eqk}). Thus $p$ is a point of $M$ and $\{p\}$ is a component of $M/ \ell$. Hence both $M\backslash p$ and $M/p$ have $N$ as c-minors; a contradiction\ to Lemma~\ref{Step0}. We conclude that if $M\backslash \ell$ has a c-minor isomorphic to $N$, then $M\backslash \ell$ is $2$-connected. Now suppose that $M/ \ell$ is disconnected having a c-minor isomorphic to $N$. By Lemma~\ref{compact0}, $$\lambda_{M/ \ell} = \lambda_{(M/ \ell)^*} = \lambda_{(M^*\backslash \ell)^{\flat}} = \lambda_{M^*\backslash \ell}.$$ Thus, by replacing $M$ by $M^*$ in the argument above, we deduce that if $M/ \ell$ has a c-minor isomorphic to $N$, then $M/ \ell$ is $2$-connected. \end{proof}
3,195
142,722
en
train
0.91.19
\section{If all 2-separations have a side with at most two elements} \label{alltwos} The purpose of this section is to treat (\ref{Step2.2}). The argument here is long as it involves analyzing numerous cases. The setup is that $M$ and $N$ are $3$-connected\ $2$-polymatroids such that $|E(N)| \ge 4$. The pair $(M,N)$ is a counterexample to Theorem~\ref{modc} and $M$ has an element $\ell$ such that $M\backslash \ell$ has $N$ as a c-minor. Thus $M\backslashba \ell$ is not $3$-connected. We assume that the non-$N$-side of every non-trivial $2$-separation of $M\backslash \ell$ has exactly two elements. Thus $\mu(\ell) = 2$. Let $(X,Y)$ be a non-trivial 2-separation of $M\backslash \ell$ in which $Y$ is the non-$N$-side. Now $M\backslash \ell$ can be written as the 2-sum, with basepoint $p$, of $2$-polymatroids $M_X$ and $M_Y$ having ground sets $X \cup p$ and $Y \cup p$. The first lemma identifies the various possibilities for $M_Y$. \begin{lemma} \label{old2} Let $P$ be a $2$-connected $2$-polymatroid with three elements and rank at least two. Suppose $P$ has a distinguished point $p$. Then $P$ is one of the nine $2$-polymatroids, $P_1, P_2,\dots,P_9$, depicted in Figure~\ref{9lives}. \end{lemma} \begin{figure} \caption{The nine possible $3$-element $2$-polymatroids in Lemma~\ref{old2} \label{9lives} \end{figure} \begin{proof} As $P$ is $2$-connected having rank at least $2$, we see that $2 \le r(P) \le 4$. If $r(P) = 2$, then $P$ is one of $P_1,P_2,P_3,$ or $P_4$; if $r(P) = 3$, then $P$ is one of $P_5,P_6,P_7,$ or $P_8$; if $r(P) = 4$, then $P$ is $P_9$. \end{proof} We shall systematically eliminate the various possibilities for $M_Y$. In each case, we will label the two elements of $M_Y$ other than $p$ by $a$ and $b$. \begin{lemma} \label{not23} $M_Y$ is not isomorphic to $P_2$ or $P_3$. \end{lemma} \begin{proof} Assume the contrary. Then $M_Y$ and hence $M$ has a point $q$ on a line $y$ where $q\neq p$. Thus $M\backslash q$ is $3$-connected. As $M\backslashba \ell$ has $N$ as a c-minor, it follows that $M\backslashba \ell\backslash q$, and hence $M\backslash q$, has a c-minor isomorphic to $N$; a contradiction. \end{proof} \begin{lemma} \label{fourmost} $M_Y$ is not isomorphic to $P_4$. \end{lemma} \begin{proof} Assume the contrary. Let the two parallel lines in $M_Y$ be $y$ and $y'$ where we may assume that $y \not\in E(N)$. Now $M\backslash y$ is $3$-connected, so $M\backslash y$ does not have $N$ as a c-minor. Thus $M/y$ has $N$ as a c-minor. But $y'$ is a loop of $M/y$, so $y' \not\in E(N)$ and $M\backslash y'$ has $N$ as a c-minor. Since $M\backslash y'$ is $3$-connected, we have a contradiction. \end{proof} The next lemma is designed to facilitate the elimination of the cases when $M_Y$ is one of $P_1$, $P_7$, or $P_9$. \begin{lemma} \label{179} Suppose both $a$ and $b$ are skew to $p$ in $M_Y$, and both $M_Y/a$ and $M_Y/b$ are $2$-connected. Then $M/a$ and $M/b$ have $2$-separations $(X_a,Y_a)$ and $(X_b,Y_b)$ such that $\ell \in Y_a \cap Y_b$. Moreover, both $M/a$ and $M/b$ have special $N$-minors, and \begin{itemize} \item[(i)] $b \in X_a$ and $a \in X_b$; \item[(ii)] both $Y_a$ and $Y_b$ properly contain $\{\ell\}$; \item[(iii)] $(X_a,Y_a - \ell)$ and $(X_b,Y_b-\ell)$ are $2$-separating partitions of $M/a\backslash \ell$ and $M/b\backslash \ell$, respectively, and $\ell \in {\rm cl}_{M/a}(Y_a - \ell)$ and $\ell \in {\rm cl}_{M/b}(Y_b - \ell)$; \item[(iv)] $(X_a \cup a,Y_a - \ell)$ and $(X_b \cup b,Y_b-\ell)$ are $2$-separating partitions of $M\backslash \ell$; \item[(v)] for $c$ in $\{a,b\}$, provided $a$ or $b$ is a point, $(X_c,Y_c - \ell)$ is a $2$-separation of $M/c\backslash \ell$ and $(X_c \cup c,Y_c - \ell)$ is a $2$-separation of $M\backslash \ell$; \item[(vi)] either $(Y_a - \ell) \cap (Y_b - \ell) \neq \emptyset$; or each of $X_b \cap (Y_a - \ell)$ and $X_a \cap (Y_b - \ell)$ consists of a single point, both $a$ and $b$ are lines of $M$, and, when $r(\{a,b\}) = 4$, the element $\ell$ is a point of $M$. \end{itemize} \end{lemma} \begin{proof} Since both $M_Y/a$ and $M_Y/b$ are $2$-connected, it follows by Lemma~\ref{claim1} that both $M\backslash \ell/a$ and $M\backslash \ell/b$ have special $N$-minors. Hence so do both $M/a$ and $M/b$. Since the theorem fails, $M/a$ and $M/b$ have $2$-separations $(X_a,Y_a)$ and $(X_b,Y_b)$ such that $\ell \in Y_a \cap Y_b$. To see that (i) holds, it suffices to show that $b \in X_a$. Assume $b\in Y_a$. Then $$r_{M/a}(X_a) + r_{M/a}(Y_a) = r(M/a) + 1,$$ so $r_{M}(X_a \cup a) - r_M(\{a\})+ r_{M}(Y_a \cup a) = r(M) + 1$. As $a$ is skew to $p$ in $M_Y$, it follows that $a$ is skew to $X$ in $M$. Since $X_a \subseteq X$, it follows that $(X_a,Y_a \cup a)$ is a $2$-separation\ of $M$; a contradiction. Hence (i) holds. Part (ii) is an immediate consequence of Lemma~\ref{hath}. To prove (iii), first observe that, by Proposition~\ref{connconn}, $M/a\backslash \ell$ is $2$-connected. We show next that \begin{equation} \label{aell} r(M/a \backslash \ell) = r(M/a). \end{equation} Suppose not. Then $r(M/a \backslash \ell) \le r(M/a) - 1.$ Since $M/a$ is $2$-connected, it follows that equality must hold here and $\ell$ is a line of $M/a$. This gives a contradiction\ to Lemma~\ref{hath}. Hence (\ref{aell}) holds. Now \begin{align*} r(M/a\backslash \ell) + 1 & \le r_{M/a\backslash \ell}(X_a) + r_{M/a\backslash \ell}(Y_a - \ell)\\ & \le r_{M/a}(X_a) + r_{M/a}(Y_a)\\ & = r(M/a) +1\\ & = r(M/a \backslash \ell) +1, \end{align*} where the last equality follows from (\ref{aell}). We see that equality must hold throughout the last chain of inequalities. Hence $(X_a, Y_a - \ell)$ is a $2$-separating partition of $M/a\backslash \ell$, and $\ell \in {\rm cl}_{M/a}(Y_a - \ell)$. Using symmetry, we deduce that (iii) holds. Since $b\in X_a$, we see that $Y_a - \ell \subseteq X$, so $a$ is skew to $Y_a - \ell$. It follows by (iii) that $(X_a \cup a,Y_a - \ell)$ is a 2-separating partition of $M\backslash \ell$, and (iv) follows by symmetry. To show (v), observe that, since $Y_c - \ell$ avoids $\{a,b\}$, it follows that $c$ is skew to $Y_c - \ell$. Thus it suffices to show that $(X_c,Y_c - \ell)$ is a $2$-separation of $M/c\backslash \ell$. Assume it is not. Then $Y_c - \ell$ consists of a single point $e$ of $M/c\backslash \ell$. Then $e$ is a point of $M$ and, by (iii), $r_{M/c}(\{e,\ell\}) = r_{M/c}(\{e\}) = 1$, so \begin{equation} \label{cel} r_M(\{c,e,\ell\})= r_M(\{c,e\}) = 1 + r(\{c\}). \end{equation} Suppose $c$ is a point. If $\ell$ is a line, then $c$ and $e$ are on $\ell$, so $M\backslash e$ is $3$-connected. Since $M/c$ has $e$ and $\ell$ as parallel points, $M\backslash e$ is $3$-connected\ having a c-minor isomorphic to $N$; a contradiction. Thus we may assume that $\ell$ is a point. Then $\{e,\ell,c\}$ is a triangle in $M$. Thus, for $\{c,d\} = \{a,b\}$, we see that $(X \cup \ell \cup c,\{d\})$ is a $2$-separation\ of $M$ unless $d$ is a point of $M$. In the exceptional case, $M$ has $e,c,\ell,d$ as a fan with $M/c$ having a c-minor isomorphic to $N$. Thus, by Lemmas~\ref{fantan} and \ref{Step0}, we have a contradiction. We may now assume that $c$ is a line. Then $r(\{c,e\}) = 3$, so, by (\ref{cel}), $r(\{c,\ell\}) = 3$. Thus $(X,\{c,\ell\})$ is a $2$-separation\ of $M\backslash d$ where $\{c,d\} = \{a,b\}$. Moreover, by hypothesis, $d$ is a point. Thus, by Lemma~\ref{newbix}, we obtain the contradiction\ that $M/d$ is $3$-connected\ unless $M/d$ has a parallel pair $\{z_1,z_2\}$ of points. In the exceptional case, we deduce that $z_1$, say, is $\ell$. Hence $(X \cup \ell \cup d,\{c\})$ is a $2$-separation\ of $M$; a contradiction. We conclude that (v) holds. To prove (vi), assume that $(Y_a - \ell) \cap (Y_b - \ell) = \emptyset$. Then $Y_b - \ell \subseteq X_a \cup a$. But $\ell \in {\rm cl}((Y_b - \ell) \cup b)$ and $b \in X_a$, so $\ell \in {\rm cl}(X_a \cup a)$. Because $M$ is $3$-connected, it follows that $Y_a - \ell$ consists of a single point $a'$. By symmetry, $Y_b - \ell$ consists of a single point $b'$. Then $(X_a \cup a, Y_a - \ell)$ is not a $2$-separation\ of $M\backslash \ell$, so, by (v), each of $a$ and $b$ is a line of $M$. To finish the proof of (vi), it remains to show that, when $r(\{a,b\}) = 4$, the element $\ell$ is a point of $M$. Assume $\ell$ is a line. Then, in $M/a$, we have $a'$ and $\ell$ as parallel points, so $M/a \backslash a'$, and hence $M\backslash a'$, has a c-minor isomorphic to $N$. As $\ell \in {\rm cl}_{M/a}(\{a'\})$, it follows that $\ell \in {\rm cl}_M(X \cup a)$. By symmetry, $\ell \in {\rm cl}_M(X \cup b)$. Thus \begin{align*} r(X) + 2 + r(X) + 2 & = r(X \cup a) + r(X \cup b)\\ & = r(X \cup a \cup \ell) + r(X \cup b \cup \ell)\\ & \ge r(X \cup \ell) + r(M)\\ & = r(X \cup \ell) + r(X) + 3. \end{align*} Thus $$r(X \cup \ell) \le r(X) + 1.$$ Then \begin{align*} 3 + r(X) + 1 & \ge r(\{a',a,\ell\}) + r(X \cup \ell)\\ & \ge r(\{a',\ell\}) + r(X \cup \{a',a,\ell\})\\ & = r(\{a',\ell\}) + r(X \cup a)\\ & = r(\{a',\ell\}) + r(X) + 2. \end{align*} We deduce that $r(\{a',\ell\}) = 2$, so $a'$ is a point on the line $\ell$. Thus $M\backslash a'$ is $3$-connected\ having a c-minor isomorphic to $N$; a contradiction. \end{proof}
3,814
142,722
en
train
0.91.20
Next we eliminate the possibility that $M_Y$ is $P_1$. \begin{lemma} \label{noone} $M_Y$ is not isomorphic to $P_1$. \end{lemma} \begin{proof} Assume $M_Y$ is isomorphic to $P_1$. Since $\{a,b\}$ is a series pair in $M\backslash \ell$, it follows that both $M/a$ and $M/b$ have c-minors isomorphic to $N$. Hence neither $M/a$ nor $M/b$ is $3$-connected. We show next that \begin{sublemma} \label{nonesub0} $\ell$ is a line of $M$. \end{sublemma} Assume $\ell$ is a point. Then $\{\ell,a,b\}$ is a triad of $M$. Since neither $M/a$ nor $M/b$ is $3$-connected, it follows by \cite[Lemma 4.2]{oswww} that $M$ has a triangle containing $a$ and exactly one of $b$ and $\ell$. If $M$ has $\{a,b,c\}$ as a triangle, then $M/a$ has $\{b,c\}$ as a parallel pair of points. Thus $M/a\backslash b$, and hence $M\backslash b$, has a c-minor isomorphic to $N$. Thus $b$ is a doubly labelled point; a contradiction\ to Lemma~\ref{Step0}. We deduce that $M$ has $\{a,\ell\}$ in a triangle with a point $d$, say. Then $M$ has $d,a,\ell,b$ as a fan with $M/a$ having a c-minor isomorphic to $N$. Thus, by Lemmas~\ref{fantan} and \ref{Step0}, we have a contradiction. We conclude that \ref{nonesub0} holds. By Lemma~\ref{179}, $M/a$ and $M/b$ have $2$-separations $(X_a,Y_a)$ and $(X_b,Y_b)$ such that $\ell \in Y_a \cap Y_b$. Moreover, both $M/a$ and $M/b$ have special $N$-minors, and \begin{itemize} \item[(i)] $b \in X_a$ and $a \in X_b$; \item[(ii)] both $Y_a$ and $Y_b$ properly contain $\{\ell\}$; \item[(iii)] $(X_a,Y_a - \ell)$ and $(X_b,Y_b-\ell)$ are $2$-separating partitions of $M/a\backslash \ell$ and $M/b\backslash \ell$, respectively, and $\ell \in {\rm cl}_{M/a}(Y_a - \ell)$ and $\ell \in {\rm cl}_{M/b}(Y_b - \ell)$; \item[(iv)] $(X_a \cup a,Y_a - \ell)$ and $(X_b \cup b,Y_b-\ell)$ are $2$-separating partitions of $M\backslash \ell$; and \item[(v)] $(Y_a - \ell) \cap (Y_b - \ell) \neq \emptyset$. \end{itemize} \begin{sublemma} \label{noonesub3} $(Y_a - \ell)\cup (Y_b - \ell) = E - \{a,b,\ell\}$. \end{sublemma} We know that $\lambda_{M\backslash \ell}(Y_a - \ell) = 1 = \lambda_{M\backslash \ell}(Y_b - \ell)$ and $(Y_a - \ell) \cap (Y_b - \ell) \neq \emptyset$, so $\lambda_{M\backslash \ell}((Y_a - \ell)\cap (Y_b - \ell)) \geq 1$. Thus, by applying the submodularity of the connectivity function, we see that \begin{align*} 1 + 1 & = \lambda_{M\backslash \ell}(Y_a - \ell) + \lambda_{M\backslash \ell}(Y_b - \ell)\\ & \ge \lambda_{M\backslash \ell}((Y_a - \ell)\cap (Y_b - \ell)) + \lambda_{M\backslash \ell}((Y_a - \ell)\cup (Y_b - \ell))\\ & \ge 1 + \lambda_{M\backslash \ell}((Y_a - \ell)\cup (Y_b - \ell)). \end{align*} Since $M\backslash \ell$ is $2$-connected, we deduce that $\lambda_{M\backslash \ell}((Y_a - \ell)\cup (Y_b - \ell)) = 1$. This application of the submodularity of the connectivity function is an example of an `uncrossing' argument. For the rest of the paper, we will omit the details of such arguments and will follow our stated practice of using the abbreviation `by uncrossing' to mean `by applying the submodularity of the connectivity function.' Now $(X_a \cup a, Y_a)$ is not a 2-separation of $M$, so, as $\ell \in {\rm cl}_{M/a}(Y_a- \ell)$, we see that $$r(Y_a - \ell) < r(Y_a) \le r(Y_a \cup a) = r((Y_a - \ell) \cup a) \le r(Y_a - \ell) + 1.$$ Hence $$r(Y_a) = r(Y_a \cup a) = r((Y_a - \ell) \cup a) = r(Y_a - \ell) + 1.$$ Thus $r((Y_a - \ell) \cup (Y_b - \ell) \cup \{a,b\}) = r(Y_a \cup Y_b \cup \{a,b\}) = r(Y_a \cup Y_b) \le r((Y_a - \ell)\cup (Y_b - \ell)) + 1.$ Also, as $\{a,b\}$ is a series pair of points in $M\backslash \ell$, we see that $r(X_a \cap X_b) \le r((X_a \cap X_b) \cup \{a,b\}) - 1$. Therefore, $\lambda_{M}(Y_a \cup Y_b \cup \{a,b\}) \leq 1$. Thus, we may assume that $X_a \cap X_b$ consists of a single matroid point, $z$, otherwise $(Y_a - \ell)\cup (Y_b - \ell) = E- \{a,b,\ell\}$ as desired. Now $\lambda_{M\backslash \ell}(X_a \cap X_b) = \lambda_{M\backslash \ell}(\{a,b,z\}) = 1$. If $a \notin {\rm cl}(\{b,z\})$, then $\lambda_{M\backslash \ell}((Y_a - \ell) \cup (Y_b - \ell) \cup a) \le 1$, so $\lambda_{M}((Y_a-\ell) \cup (Y_b - \ell) \cup a \cup \ell) = \lambda_{M}(Y_a \cup Y_b \cup a) \le 1$; a contradiction. Thus $a \in {\rm cl}(\{b,z\})$. Hence $\{a,b,z\}$ is a triangle of $M$. It follows that the point $b$ is doubly labelled; a contradiction\ to Lemma~\ref{Step0}. We conclude that \ref{noonesub3} holds. \begin{sublemma} \label{noonesub4} $Y_a - Y_b \neq \emptyset$ and $Y_b - Y_a \neq \emptyset$. \end{sublemma} By symmetry, it suffices to prove the first of these. Assume $Y_a - Y_b = \emptyset$. Then, as $(Y_a - \ell) \cup (Y_b - \ell) = E - \{a,b,\ell\}$, we deduce that $X_b = \{a\}$, so $(X_b,Y_b - \ell)$ is not a $2$-separation\ of $M\backslash \ell/b$; a contradiction. Thus \ref{noonesub4} holds. By \ref{noonesub4} and the fact that $(X_a \cup a) \cap (X_b\cup b)$ contains $\{a,b\}$, we see that each of $X_a\cup a$ and $X_b\cup b$ has at least three elements. It follows by the definition of $\mu(\ell)$ that each of $Y_a- \ell$ and $Y_b- \ell$ has exactly two elements. Since each of $Y_a - Y_b, (Y_a \cap Y_b) - \ell,$ and $Y_b - Y_a$ is non-empty, each of these sets has exactly one element. As the union of these sets is $E - \{\ell,a,b\}$, we deduce that $|E(M)| = 6$ and $|X_a \cup a| = 3$. Since at least one of $a$ and $b$ is not in $E(N)$, we deduce that each of $X_a\cup a$ and $Y_a - \ell$ contains at most two elements of $N$; a contradiction\ as one of these sets must contain at least three elements of $E(N)$. We conclude that Lemma~\ref{noone} holds. \end{proof}
2,374
142,722
en
train
0.91.21
\begin{lemma} \label{no7} $M_Y$ is not isomorphic to $P_7$. \end{lemma} \begin{proof} Assume that $M_Y$ is isomorphic to $P_7$, letting $a$ be the line. Then, by Lemma~\ref{179}, $M/a$ and $M/b$ have $2$-separations $(X_a,Y_a)$ and $(X_b,Y_b)$ such that $\ell \in Y_a \cap Y_b$. Moreover, both $M/a$ and $M/b$ have special $N$-minors, and \begin{itemize} \item[(i)] $b \in X_a$ and $a \in X_b$; \item[(ii)] both $Y_a$ and $Y_b$ properly contain $\{\ell\}$; \item[(iii)] $(X_a,Y_a - \ell)$ and $(X_b,Y_b-\ell)$ are $2$-separating partitions of $M/a\backslash \ell$ and $M/b\backslash \ell$, respectively, and $\ell \in {\rm cl}_{M/a}(Y_a - \ell)$ and $\ell \in {\rm cl}_{M/b}(Y_b - \ell)$; \item[(iv)] $(X_a \cup a,Y_a - \ell)$ and $(X_b \cup b,Y_b-\ell)$ are $2$-separating partitions of $M\backslash \ell$; and \item[(v)] $(Y_a - \ell) \cap (Y_b - \ell) \neq \emptyset$. \end{itemize} We show next that \begin{sublemma} \label{*5} $X_a \cap X_b$ is empty or consists of a single point. \end{sublemma} Suppose $b \not\in {\rm cl}(Y_b)$. Then $r(Y_b \cup b) = r(Y_b) + 1$. Thus $(X_b \cup b, Y_b)$ is a $2$-separation\ of $M$; a contradiction. Hence $r(Y_b \cup b) = r(Y_b)$. Now \begin{align*} r((Y_a - \ell) \cup (Y_b - \ell)) + 2 & \ge r((Y_a - \ell) \cup (Y_b - \ell) \cup a)\\ & = r(Y_a \cup (Y_b - \ell) \cup a)\\ & = r(Y_a \cup Y_b \cup a)\\ & = r(Y_a \cup Y_b \cup a \cup b). \end{align*} Also $r(X_a \cap X_b) \le r((X_a \cup a) \cap (X_b \cup b)) - 2$ since $X_a \cap X_b \subseteq X$ and $\sqcap_M(X,Y) = 1$ while $r_M(\{a,b\}) = 3$. Thus \begin{align*} \lambda_M(X_a \cap X_b) & = r(Y_a \cup Y_b \cup a \cup b) + r(X_a \cap X_b) - r(M)\\ & \le r((Y_a - \ell) \cup (Y_b - \ell)) + 2 + r((X_a \cup a) \cap (X_b \cup b)) - 2 - r(M\backslash \ell)\\ & = \lambda_{M\backslash \ell}((X_a\cup a) \cap (X_b\cup b))\\ & = 1, \end{align*} where the second-last step follows by uncrossing $(X_a \cup a, Y_a - \ell)$ and $(X_b \cup b, Y_b - \ell)$. We deduce that \ref{*5} holds. \begin{sublemma} \label{*6} $E(M) - \{\ell,a,b\}$ contains no point $\gamma$ such that $\{a,b,\gamma\}$ is $2$-separating in $M\backslash \ell$. \end{sublemma} To see this, suppose that such a point $\gamma$ exists. Recall that $M\backslash \ell$ has $N$ as a c-minor so at most one element of $\{a,b\}$ is in $E(N)$. Thus at most two elements of $\{a,b,\gamma\}$ are in $E(N)$. But $|E(N)| \ge 4$. Hence $\{a,b,\gamma\}$ is the non-$N$-side of a $2$-separation of $M\backslash \ell$ contradicting the fact that $\mu(\ell) = 2$. We conclude that \ref{*6} holds. An immediate consequence of \ref{*6} is that $X_a \cap X_b$ does not consist of a single point. Hence, by \ref{*5}, $X_a\cap X_b = \emptyset$. As $(X_a,Y_a)$ is a $2$-separation\ of $M/a$, it follows that $X_a$ cannot contain just the element $b$. Thus $(X_a \cup a) \cap (Y_b - \ell) \neq \emptyset$. We show next that \begin{sublemma} \label{*7} $(X_b \cup b) \cap (Y_a - \ell) \neq \emptyset$. \end{sublemma} Suppose $(X_b \cup b) \cap (Y_a - \ell) = \emptyset$. Then $Y_b - \ell = E(M) - \{a,b,\ell\} = X$ so $r(Y_b - \ell) = r(M) - 2$. Hence $r((Y_b - \ell) \cup b) \le r(M) - 1$. But $\ell \in {\rm cl}_{M/b}(Y_b - \ell)$. Thus $r(Y_b \cup b) \le r(M) - 1$, so $\{a\}$ is 2-separating in $M$; a contradiction. We deduce that \ref{*7} holds. By uncrossing, $\lambda_{M\backslash \ell}((X_b\cup b) \cap (Y_a - \ell)) = 1 = \lambda_{M\backslash \ell}((X_a\cup a) \cap (Y_b - \ell))$. As $\ell$ is in both ${\rm cl}((Y_a - \ell) \cup a)$ and ${\rm cl}((Y_b - \ell) \cup b)$, we deduce that each of $(X_a \cup a) \cap (Y_b - \ell)$ and $(X_b \cup b) \cap (Y_a - \ell)$ consists of a single point. Thus we get a contradiction\ to \ref{*6} that completes the proof of Lemma~\ref{no7}. \end{proof}
1,741
142,722
en
train
0.91.22
On combining Lemmas~\ref{not23}, \ref{noone}, and \ref{no7}, we immediately obtain the following. \begin{corollary} \label{pointless} The non-$N$-side of every $2$-separation of $M\backslash \ell$ does not contain any points. \end{corollary} \begin{lemma} \label{no9} $M_Y$ is not isomorphic to $P_9$. \end{lemma} \begin{proof} Assume $M_Y$ is isomorphic to $P_9$. Since each of $M_Y\backslash \ell/a$ and $M_Y\backslash \ell/b$ consists of a line through $p$, it follows that both $M/a$ and $M/b$ have c-minors isomorphic to $N$. Hence neither $M/a$ nor $M/b$ is $3$-connected. Then $M/a$ and $M/b$ have $2$-separations $(X_a,Y_a)$ and $(X_b,Y_b)$ such that $\ell \in Y_a \cap Y_b$. Moreover, by Lemma~\ref{179}, \begin{itemize} \item[(i)] $b \in X_a$ and $a \in X_b$; \item[(ii)] both $Y_a$ and $Y_b$ properly contain $\{\ell\}$; \item[(iii)] $(X_a,Y_a - \ell)$ and $(X_b,Y_b-\ell)$ are $2$-separating partitions of $M/a\backslash \ell$ and $M/b\backslash \ell$, respectively, and $\ell \in {\rm cl}_{M/a}(Y_a - \ell)$ and $\ell \in {\rm cl}_{M/b}(Y_b - \ell)$; \item[(iv)] $(X_a \cup a,Y_a - \ell)$ and $(X_b \cup b,Y_b-\ell)$ are $2$-separating partitions of $M\backslash \ell$; and \item[(v)] either $(Y_a - \ell) \cap (Y_b - \ell) \neq \emptyset$; or each of $X_b \cap (Y_a - \ell)$ and $X_a \cap (Y_b - \ell)$ consists of a single point, both $a$ and $b$ are lines of $M$, and $\ell$ is a point of $M$. \end{itemize} \begin{sublemma} \label{no9.3} $(Y_a - \ell) \cap (Y_b - \ell) \neq \emptyset$. \end{sublemma} Assume the contrary. Then, by (v), $X_b \cap (Y_a - \ell)$ consists of a point, $a'$, say. By (iii), $\ell \in {\rm cl}_{M/a}(\{a'\})$, so $\ell \in {\rm cl}(\{a',a\})$. As $r(M) - 3 = r(X)$, it follows that $r(X \cup a \cup \ell) \le r(M) - 1$. Hence the line $\{b\}$ is 2-separating in $M$; a contradiction. Thus \ref{no9.3} holds. \begin{sublemma} \label{no9.3.5} $|(Y_a - \ell) \cup (Y_b - \ell)| \geq 2$. \end{sublemma} Assume $(Y_a - \ell) \cup (Y_b - \ell)$ contains a unique element, $z$. Then, by \ref{no9.3}, $z \in (Y_a - \ell) \cap (Y_b - \ell)$. Now $\ell \in {\rm cl}_{M/a}(\{z\})$, so $\ell \in {\rm cl}_M(\{z,a\})$. Thus $$r(X \cup a \cup \ell) = r(X \cup a) = r(X) + 2 = r(M) -1,$$ so$(X \cup a \cup \ell, \{b\})$ is a $2$-separation\ of $M$; a contradiction. Thus \ref{no9.3.5} holds. By \ref{no9.3} and uncrossing, we see that $\lambda_{M\backslash \ell}((X_a \cup a) \cap (X_b \cup b)) = 1$. Next we show the following. \begin{sublemma} \label{no9.4} $(Y_a - \ell) \cup (Y_b - \ell)$ is the non-$N$-side of a $2$-separation of $M\backslash \ell$ and it is a $2$-element set, both members of which are lines. \end{sublemma} By \ref{no9.3.5}, $((X_a \cup a) \cap (X_b \cup b),(Y_a - \ell) \cup (Y_b - \ell))$ is a $2$-separation\ of $M\backslash \ell$. Suppose $(X_a \cup a) \cap (X_b \cup b)$ is the non-$N$-side of this 2-separation. Then, as $\mu(\ell) = 2$, we deduce that $(X_a \cup a) \cap (X_b \cup b) = \{a,b\}$. Thus, as $\ell \in {\rm cl}((Y_a - \ell) \cup a)$, \begin{align*} r(M) + 1 & = r((Y_a - \ell) \cup (Y_b - \ell)) + r(\{a,b\})\\ & = r((Y_a - \ell) \cup (Y_b - \ell) \cup a) + r(\{b\})\\ & = r(Y_a \cup Y_b \cup a) + r(\{b\}). \end{align*} Hence $\{b\}$ is 2-separating in $M$; a contradiction. Thus $(Y_a - \ell) \cup (Y_b - \ell)$ must be the non-$N$-side of a 2-separation of $M\backslash \ell$, so this set has cardinality two. Moreover, by Corollary~\ref{pointless}, both elements of this set are lines. Thus \ref{no9.4} holds. We deduce from \ref{no9.4} that $Y_a - \ell$ and $Y_b - \ell$ are the non-$N$-sides of 2-separations of $M\backslash \ell$. Thus, by symmetry, we may assume that $Y_b - \ell \subseteq Y_a - \ell$. Hence \begin{equation} \label{unc} (Y_a - \ell) \cup (Y_b - \ell) = Y_a - \ell. \end{equation} \begin{sublemma} \label{no9.5} $(Y_a \cup \{a,b\}, X_a \cap X_b)$ is a $2$-separation of $M$. \end{sublemma} Since $Y_a - \ell \supseteq Y_b - \ell$, we have $$r(Y_a \cup a) = r((Y_a - \ell) \cup a) = r(Y_a - \ell) + 2$$ and $$r(Y_a \cup b) = r((Y_a - \ell) \cup b) = r(Y_a - \ell) + 2.$$ Moreover, $$r(Y_a \cup \{a,b\}) = r((Y_a- \ell) \cup \{a,b\}) \ge r(Y_a - \ell) + 3.$$ Thus, by submodularity, \begin{align*} r(Y_a - \ell) + 2 + r(Y_a - \ell) + 2 & = r(Y_a \cup a) + r(Y_a \cup b)\\ & \ge r(Y_a \cup a \cup b) + r(Y_a)\\ & \ge r(Y_a - \ell) + 3 + r(Y_a)\\ & \ge r(Y_a - \ell) + 3 + r(Y_a - \ell) + 1, \end{align*} where the last step follows because $\ell \notin {\rm cl}(Y_a - \ell)$. We see that equality must hold throughout the last chain of inequalities. Hence $r(Y_a) = r(Y_a - \ell) + 1$ and $r(Y_a \cup \{a,b\}) = r(Y_a - \ell) + 3 = r(Y_a) + 2$. As $\lambda_{M\backslash \ell}(Y_a - \ell) = 1$, it follows that $\lambda_{M}(Y_a) = 2$, that is, $$r(Y_a) + r((X_a \cup a) \cap (X_b \cup b)) - r(M) = 2.$$ Hence \begin{align*} r(Y_a \cup \{a,b\}) + r(X_a \cap X_b) - r(M) & \le r(Y_a) + 2 + r((X_a \cup a) \cap (X_b \cup b))\\ & \hspace*{2in} - 3 - r(M)\\ & = 1. \end{align*} Thus $(Y_a \cup \{a,b\}, X_a \cap X_b)$ is a 2-separating partition of $M$. Since $(X_a \cup a) \cap (X_b \cup b)$ is the $N$-side of a 2-separation of $M\backslash \ell$, it follows that $X_a \cap X_b$ contains at least two elements of $E(N)$ as $\{a,b\}$ contains at most one element of $E(N)$. Thus $(Y_a \cup \{a,b\}, X_a \cap X_b)$ is a 2-separation of $M$, that is, \ref{no9.5} holds. But \ref{no9.5} gives a contradiction\ and thereby completes the proof of Lemma~\ref{no9}. \end{proof} We now know that there are only three possibilities for $M_Y$, namely $P_5$, $P_6$, or $P_8$. The next few lemmas will be useful in treating all three cases. \begin{lemma} \label{cactus} Assume $M\backslash \ell$ has $(X,\{a,b\})$ as a $2$-separation where $r(\{a,b\}) = 3$ and each of $a$ and $b$ is a line. Then $r(X \cup \ell) = r(X) + 1$ if and only if $\{a,b\}$ is a prickly $3$-separating set in $M$. \end{lemma} \begin{proof} If $\{a,b\}$ is a $3$-separating set in $M$, then $r(X \cup \ell) = r(M) - 1$. But $r(X) = r(M) - 2$, so $r(X \cup \ell) = r(X) + 1$. Conversely, if $r(X \cup \ell) = r(X) + 1$, then $r(X \cup \ell) = r(M) - 1$, so $\{a,b\}$ is a 3-separating set in $M$. Now $r(X \cup \ell \cup a) = r(M)$ otherwise $\{b\}$ is 2-separating in $M$. By symmetry, $r(X \cup \ell \cup b) = r(M)$. Hence $\{a,b\}$ is a prickly $3$-separating set in $M$. \end{proof} \begin{lemma} \label{cactus2} Assume $M$ has $\{a,b\}$ as a prickly $3$-separating set that is $2$-separating in $M\backslash \ell$. Then $M\downarrow a$ and $M\downarrow b$ are $3$-connected having c-minors isomorphic to $N$. \end{lemma} \begin{proof} By Lemma~\ref{portia}, $M\downarrow a$ and $M\downarrow b$ are $3$-connected. Since $M_X$ and $M_Y$ have ground sets $X \cup p$ and $\{a,b,p\}$, we see that $r(M_Y) = 3$. By Lemma~\ref{pricklytime0}, $M\downarrow a \backslash \ell = M\backslash \ell \downarrow a$. But $M\backslash \ell \downarrow a$ equals the 2-sum of $M_X$ and the $2$-polymatroid consisting of a line $b$ through the point $p$. Compactifying $b$ in $M\backslash \ell \downarrow a$ gives the $2$-polymatroid that is obtained from $M_X$ by relabelling $p$ by $b$. Hence $M\downarrow a \backslashba \ell$ has a c-minor isomorphic to $N$. Thus, using symmetry, so do $M\downarrow a$ and $M\downarrow b$. \end{proof} \begin{lemma} \label{pixl} If $M_Y$ is $P_5$, $P_6$, or $P_8$, then $r(X \cup \ell) = r(X) +2$, so $\ell$ is a line. \end{lemma} \begin{proof} Assume $r(X \cup \ell) = r(X) +1$. Then, by Lemma~\ref{cactus}, $\{a,b\}$ is a prickly 3-separating set in $M$. Then, by Lemma~\ref{cactus2}, $M\downarrow a$ and $M\downarrow b$ are $3$-connected having c-minors isomorphic to $N$; a contradiction\ to the fact that $(M,N)$ is a counterexample to Theorem~\ref{modc}. Thus $r(X \cup \ell) \neq r(X) +1$. Since $\ell \not\in {\rm cl}(X)$, we deduce that $r(X \cup \ell) = r(X) +2$, so $\ell$ is a line. \end{proof}
3,567
142,722
en
train
0.91.23
We now know that there are only three possibilities for $M_Y$, namely $P_5$, $P_6$, or $P_8$. The next few lemmas will be useful in treating all three cases. \begin{lemma} \label{cactus} Assume $M\backslash \ell$ has $(X,\{a,b\})$ as a $2$-separation where $r(\{a,b\}) = 3$ and each of $a$ and $b$ is a line. Then $r(X \cup \ell) = r(X) + 1$ if and only if $\{a,b\}$ is a prickly $3$-separating set in $M$. \end{lemma} \begin{proof} If $\{a,b\}$ is a $3$-separating set in $M$, then $r(X \cup \ell) = r(M) - 1$. But $r(X) = r(M) - 2$, so $r(X \cup \ell) = r(X) + 1$. Conversely, if $r(X \cup \ell) = r(X) + 1$, then $r(X \cup \ell) = r(M) - 1$, so $\{a,b\}$ is a 3-separating set in $M$. Now $r(X \cup \ell \cup a) = r(M)$ otherwise $\{b\}$ is 2-separating in $M$. By symmetry, $r(X \cup \ell \cup b) = r(M)$. Hence $\{a,b\}$ is a prickly $3$-separating set in $M$. \end{proof} \begin{lemma} \label{cactus2} Assume $M$ has $\{a,b\}$ as a prickly $3$-separating set that is $2$-separating in $M\backslash \ell$. Then $M\downarrow a$ and $M\downarrow b$ are $3$-connected having c-minors isomorphic to $N$. \end{lemma} \begin{proof} By Lemma~\ref{portia}, $M\downarrow a$ and $M\downarrow b$ are $3$-connected. Since $M_X$ and $M_Y$ have ground sets $X \cup p$ and $\{a,b,p\}$, we see that $r(M_Y) = 3$. By Lemma~\ref{pricklytime0}, $M\downarrow a \backslash \ell = M\backslash \ell \downarrow a$. But $M\backslash \ell \downarrow a$ equals the 2-sum of $M_X$ and the $2$-polymatroid consisting of a line $b$ through the point $p$. Compactifying $b$ in $M\backslash \ell \downarrow a$ gives the $2$-polymatroid that is obtained from $M_X$ by relabelling $p$ by $b$. Hence $M\downarrow a \backslashba \ell$ has a c-minor isomorphic to $N$. Thus, using symmetry, so do $M\downarrow a$ and $M\downarrow b$. \end{proof} \begin{lemma} \label{pixl} If $M_Y$ is $P_5$, $P_6$, or $P_8$, then $r(X \cup \ell) = r(X) +2$, so $\ell$ is a line. \end{lemma} \begin{proof} Assume $r(X \cup \ell) = r(X) +1$. Then, by Lemma~\ref{cactus}, $\{a,b\}$ is a prickly 3-separating set in $M$. Then, by Lemma~\ref{cactus2}, $M\downarrow a$ and $M\downarrow b$ are $3$-connected having c-minors isomorphic to $N$; a contradiction\ to the fact that $(M,N)$ is a counterexample to Theorem~\ref{modc}. Thus $r(X \cup \ell) \neq r(X) +1$. Since $\ell \not\in {\rm cl}(X)$, we deduce that $r(X \cup \ell) = r(X) +2$, so $\ell$ is a line. \end{proof} Next we deal with the case when $M\backslash \ell$ has $(X,Y)$ as its only $2$-separation\ with $|Y| = 2$, beginning with the possibility that $M_Y= P_6$. \begin{lemma} \label{duh} Suppose $M_Y = P_6$ and $(X,Y)$ is the only non-trivial $2$-separation of $M\backslash \ell$. Then \begin{itemize} \item[(i)] $M\backslashba a$ or $M\backslashba b$ is $3$-connected having a special $N$-minor; or \item[(ii)] each of $\{a,\ell\}$ and $\{b,\ell\}$ is a prickly $3$-separator of $M$, and each of $M\downarrow a$ and $M\downarrow b$ is $3$-connected having a c-minor isomorphic to $N$. \end{itemize} \end{lemma} \begin{proof} By Lemma~\ref{pixl}, $\ell$ is a line of $M$ and $\sqcap(X,\ell) = 0$. In $M\backslashba \ell$, we see that $a$ and $b$ are parallel points. Hence each of $M\backslash a$ or $M\backslash b$ has a special $N$-minor. But $r(E - \{a,b,\ell\}) = r(M) - 2$ and $r(E - \{a,\ell\}) = r(M) - 1$, so $\{\ell\}$ is 2-separating in $M\backslash a$. Now both $M\backslashba a$ or $M\backslashba b$ have special $N$-minors. Hence we may assume that neither of these matroids is $3$-connected. Next we show that \begin{sublemma} \label{duh2} $r(\{a,\ell\}) = 3 = r(\{b,\ell\})$. \end{sublemma} We shall show that $r(\{b,\ell\}) = 3$, which, by symmetry, will suffice. As $M\backslashba a$ is not $3$-connected, $M\backslash a$ has a non-trivial 2-separation $(A,B)$ in which $A$ contains $\ell$. Then $(A - \ell,B)$ is a 2-separating partition of $M\backslash a \backslash \ell$. Observe that $r(M\backslash a \backslash \ell) = r(M) - 1$. Suppose $b \in B$. Then $r(B \cup a) = r(B) + 1$. Thus $(A - \ell, B\cup a)$ is a 2-separating partition of $M \backslash \ell$. Since $B\cup a \neq \{a,b\}$, we deduce that $A - \ell$ contains a unique element. Moreover, as $\sqcap(X,\ell) = 0$, it follows that $r(A) = r(A - \ell) + 2$. Thus $(A- \ell, B \cup a)$ is a 1-separating partition of $M\backslash a$; a contradiction\ to Lemma~\ref{Step1}. We may now assume that $b \in A - \ell$. Then $((A - \ell) \cup a, B)$ is a non-trivial $2$-separation\ of $M\backslash \ell$. Thus $(A - \ell) \cup a = \{a,b\}$, so $A = \{b,\ell\}$. Hence $B = X$ and $r(\{b,\ell\}) = 3$. Thus \ref{duh2} holds. As $r(X \cup a) = r(M) - 1$, we deduce that $\{b, \ell\}$ is a prickly $3$-separator of $M$. Now $M\backslash \ell \downarrow b$, which, by Lemma~\ref{pricklytime0}, equals $M \downarrow b \backslash \ell$, has a c-minor isomorphic to $N$. Hence so does $M\downarrow b$ and, by symmetry, $M\downarrow a$. Thus, by Lemma~\ref{portia}, part~(ii) of the lemma holds. \end{proof}
2,051
142,722
en
train
0.91.24
\begin{lemma} \label{duh85} Suppose $M_Y$ is $P_5$ or $P_8$. Let $a$ be an element of $Y$ for which $\sqcap(\{a\},\{p\}) = 0$. Then \begin{itemize} \item[(i)] $M/a$ has a $2$-separation; and \item[(ii)] for every $2$-separation $(A,B)$ of $M/a$ with $\ell$ in $A$, \begin{itemize} \item[(a)] $b \in B$; \item[(b)] $(A - \ell,B \cup a)$ is a $2$-separation of $M\backslash \ell$ and $|B-b| \ge 2$; \item[(c)] $|A - \ell| \le 2$ and if $|A - \ell| = 1$, then $A - \ell$ consists of a line of $M/a$; \item[(d)] $r_{M/a}(A - \ell) = r_{M/a}(A)$; and \item[(e)] $\sqcap(\{a,b\}, A - \ell) = 0$. \end{itemize} \end{itemize} Moreover, if $(X,Y)$ is the unique non-trivial $2$-separation of $M\backslash \ell$, then $M/a$ has a unique $2$-separation $(A,B)$ with $\ell$ in $A$. Further, $A - \ell$ consists of a line of $M/a$. \end{lemma} \begin{proof} Certainly $M\backslash \ell/a$ and hence $M/a$ has a c-minor isomorphic to $N$. By Lemma~\ref{pixl}, $\ell$ is a line and $\sqcap(X,\ell) = 0$. As the theorem fails, $M/a$ is not $3$-connected, but, by Lemma~\ref{Step1}, it is $2$-connected. Let $(A,B)$ be a $2$-separation\ of $M/a$ with $\ell$ in $A$. \begin{sublemma} \label{bB} $b \in B$. \end{sublemma} Suppose $b \in A$. Then $a$ is skew to $B$ in $M$, so $(A\cup a,B)$ is a $2$-separation\ of $M$; a contradiction. Thus \ref{bB} holds. \begin{sublemma} \label{mab} $M$ does not have a point $c$ such that $B = \{b,c\}$. \end{sublemma} Assume the contrary. We have $r_{M/a}(A)+r_{M/a}(B)-r(M/a)=1$, that is, $r(A\cup a)-2+r(\{a, b, c\})-r(M)=1$. But $r(A-\ell)\le r(A\cup a)-2$ and $A - \ell = X - c$. Hence $r(X-c)+r(\{a, b, c\})-r(M)\le 1$. Since $r(M)=r(M\backslash \ell)$, this implies that $(X-c, \{a, b, c\})$ is a $2$-separation of $M\backslash \ell$ that violates the fact that $\mu(\ell) = 2$. If such a point $c$ exists, then $A \cup a \supseteq X \cup a$, so $r(A \cup a) = r(M)$. Hence $r(\{a,b,c\}) = 3 = r(\{a,b\})$, so $(X - c, \{a,b,c\})$ is a $2$-separation\ of $M\backslash \ell$ that violates the choice of $Y$. Thus \ref{mab} holds. Next we show that \begin{sublemma} \label{aminusl} $(A - \ell, B)$ is a $2$-separation of $M\backslash \ell/a$. \end{sublemma} Certainly $(A - \ell, B)$ is $2$-separating in $M\backslash \ell/a$. We need to show that $\max\{|A - \ell|, r(A - \ell)\} \ge 2$. By Lemma~\ref{skewer}, $A \neq \{\ell\}$. Assume $A = \{\ell,c\}$ where $c$ is a point of $M/a$. Then $c$ is a point in $M$ as $a$ is skew to $X$. Moreover, \begin{equation} \label{seec} c \in {\rm cl}_M(X - c) \end{equation} otherwise $(X - c, \{a,b,c\})$ is a $2$-separation\ of $M\backslash \ell$ that violates the choice of $Y$. By Lemma~\ref{skewer}, $a$ is not skew to $\{c,\ell\}$, so $r_{M/a}(\{c, \ell\}) < r_{M}(\{c, \ell\}) \le 3$. Suppose $r_{M/a}(\{c,\ell\}) = 2$. Then $r_M(B \cup a) = r(M) - 1$, so $(\{c\}, B \cup a)$ is a 1-separation of $M\backslash \ell$; a contradiction. We conclude that \begin{sublemma} \label{aminusl.1} $r_{M/a}(\{c,\ell\}) = 1$, so $r_M(\{a,c,\ell\}) = 3$ and $r(M\backslash \ell/a) = r(M/a)$. \end{sublemma} Since $c$ and $\ell$ are parallel points in $M/a$, we deduce that $M\backslash c$ has a c-minor isomorphic to $N$. Thus $M\backslash c$ has a $2$-separation\ $(U,V)$ where we may assume that $\ell \in U$ and $a \in V$ otherwise $M$ has a $2$-separation. Continuing with the proof of \ref{aminusl}, next we show that \begin{sublemma} \label{bisinP} $b \in U$. \end{sublemma} Suppose $b \in V$. Then, as $a \in V$, we see that $r(V \cup \ell) \le r(V) + 1$ and $r(U - \ell) = r(U) - 2$. Thus $U = \{\ell\}$ otherwise $(U - \ell, V \cup \ell)$ is a 1-separation of $M\backslash c$. But, by (\ref{seec}), $c \in {\rm cl}(E - c - \ell)$. Hence $(U,V \cup c)$ is a $2$-separation\ of $M$; a contradiction. Hence \ref{bisinP} holds. \begin{sublemma} \label{pnotlb} $U \neq \{\ell,b\}$. \end{sublemma} Assume $U = \{\ell,b\}$. Then $V = (X - c) \cup a$. Thus $r(V) \ge r(X) + 1 = r(M) - 1$. But $r(U) \ge 3$ so $(U,V)$ is not a $2$-separation\ of $M\backslash c$. Thus contradiction\ completes the proof of \ref{pnotlb}. \begin{sublemma} \label{pnotlbd} $M$ does not have a point $d$ such that $U = \{\ell,b,d\}$. \end{sublemma} Assume the contrary. Then \begin{equation} \label{veeq} r(V) = r((X - \{c,d\}) \cup a) \ge r(M) - 2 \end{equation} so, as $r(U) + r(V) = r(M) +1$, we must have that \begin{equation} \label{peep} r(\{\ell,b,d\}) = r(U) \le 3. \end{equation} Thus equality must hold in each of (\ref{veeq}) and (\ref{peep}). As $r(\{a,c,\ell\}) = 3$, we have \begin{align*} r(\{b,d\}) + r((X - d) \cup \{a,\ell\}) & = r(\{b,d\}) + r((X - \{c,d\}) \cup \{a,\ell\})\\ & \le r(\{\ell,b,d\}) + r((X - \{c,d\}) \cup a) + 1\\ & = r(M) + 2. \end{align*} Now $r(\{b,d\}) = 3$, otherwise $\{a,b,d\}$ contradicts the choice of $Y$ since at most one of $a$ and $b$ is in $E(N)$. Hence $(\{b,d\}, (X - d) \cup \{a,\ell\})$ is a 3-separation of $M$. Thus $r((X - d) \cup \{a,\ell\}) = r(M) - 1$, so $r((X - d) \cup a) \le r(M) - 1$. Hence $r(X - d) \le r(M) - 3$, while $r(X) = r(M) - 2$. Thus $(X - d, \{a,b,d\})$ is a $2$-separation\ of $M\backslash \ell$ contradicting the choice of $Y$. We conclude that \ref{pnotlbd} holds. Now recall that $\{\ell,b\} \subseteq U$ and $a \in V$. Moreover, $r(\{a,c,\ell\}) = 3$ and $\sqcap(a,b) = 1$. Thus $$r(V \cup \{\ell,b\}) \le r(V) + 2.$$ Also $\ell \not\in {\rm cl}(X \cup b)$ otherwise $\{a\}$ is 2-separating in $M$; a contradiction. Thus $$r(U - \{\ell,b\}) \le r(U) - 2.$$ It follows by \ref{pnotlb} and \ref{pnotlbd} that $(U - \{\ell,b\}, V \cup \{\ell,b\})$ is a $2$-separation\ of $M\backslash c$, so $(U - \{\ell,b\}, V \cup \{\ell,b\}\cup c)$ is a $2$-separation\ of $M$. This contradiction\ completes the proof of \ref{aminusl}. We deduce from \ref{aminusl} that (ii)(d) of the lemma holds, that is, \begin{sublemma} \label{rankla} $r((A - \ell) \cup a) = r(A \cup a).$ \end{sublemma} Moreover, since $a$ is skew to $X$, and $A - \ell \subseteq X$, it follows, by Lemma~\ref{skewer}, that \begin{sublemma} \label{aminuslba} $(A - \ell, B \cup a)$ is a $2$-separation of $M\backslash \ell$. \end{sublemma} Now $(A,B)$ is a $2$-separation\ of $M/a$ and $b \in B$. Since $b$ is a point of $M/a$, it follows that $|B| \ge 2$, so $|B \cup a| \ge 3$. Hence $B \cup a$ is the $N$-side of the $2$-separation\ $(A - \ell, B \cup a)$ of $M\backslash \ell$. At most one member of $\{a,b\}$ is in $E(N)$. Since $|E(N)| \ge 4$, it follows that at least two elements of $N$ are in $B- b$, so $|B-b| \ge 2$. Thus (ii)(b) of the lemma holds. Moreover, $|A - \ell| \le 2$. Since $A - \ell$ is one side of a $2$-separation, if it contains a single element, that element is a line of $M/a$. Thus (ii)(c) of the lemma holds. Next we observe that \begin{sublemma} \label{piab} $\sqcap(\{a,b\}, A - \ell) = 0.$ \end{sublemma} Since $\sqcap(\{a,b\}, X) = 1$, we see that $\sqcap(\{a,b\}, A - \ell) \le 1$. Assume $\sqcap(\{a,b\}, A - \ell) = 1$. Then $r((A - \ell) \cup \{a,b\}) = r(A - \ell) + 2$. But $r(A - \ell) + r(B \cup a) = r(M\backslash \ell) + 1$. Thus \begin{equation} \label{eqbb} r((A - \ell) \cup \{a,b\}) + r(B - b) \le r(M\backslash \ell) + 1. \end{equation} By \ref{rankla}, $r((A - \ell) \cup a) = r(A \cup a)$. Hence we obtain the contradiction\ that $(A \cup \{a,b\}, B- b)$ is a $2$-separation\ of $M$. Thus \ref{piab} holds. Now suppose that $(X,Y)$ is the unique non-trivial $2$-separation of $M\backslash \ell$. We complete the proof of the lemma by showing that \begin{sublemma} \label{lonely} $M/a$ has a unique $2$-separation $(A,B)$ with $\ell$ in $A$. Moreover, $A - \ell$ consists of a line of $M/a$. \end{sublemma} Let $(A_1, B_1)$ and $(A_2,B_2)$ be distinct 2-separations of $M/a$ with $\ell$ in $A_1 \cap A_2$. Then $b \in B_1 \cap B_2$. By (ii)(c), $|A_i - \ell| \le 2$. Suppose $|A_i - \ell| = 2$. Then, by (ii)(b), $(A_i - \ell, B_i \cup a)$ is a non-trivial $2$-separation\ of $M\backslash \ell$, so $A_i - \ell = Y$; a contradiction\ as $a \not\in A_i - \ell$. We deduce that $|A_i - \ell| = 1$, so $A_i - \ell$ consists of a line $m_i$ of $M/a$. Now $(\{m_1\}, B_1 \cup a)$ and $(\{m_2\}, B_2 \cup a)$ are 2-separations of $M\backslash \ell$. Thus $r(\{m_1,m_2\}) = 4$ otherwise one easily checks that $(\{m_1,m_2\}, (B_1\cap B_2) \cup a)$ is a $2$-separation\ of $M\backslash \ell$ that contradicts the uniqueness of $(X,Y)$. Now $\sqcap(a,X) = 0$, so $\sqcap(a,\{m_1,m_2\}) = 0$. Thus $r_{M/a}(\{m_1,m_2\}) = 4$. But, by (ii)(d) of the lemma, \begin{align*} 2 + 2 & = r_{M/a}(\{m_1,\ell\}) + r_{M/a}(\{m_2,\ell\})\\ & \ge r_{M/a}(\{m_1,m_2,\ell\}) + r_{M/a}(\{\ell\})\\ & \ge 4 + 1. \end{align*} This contradiction\ finishes the proof of \ref{lonely} and thereby completes the proof of the lemma. \end{proof}
4,016
142,722
en
train
0.91.25
\begin{lemma} \label{duh8} If $M_Y = P_8$, then $(X,Y)$ is not the only non-trivial $2$-separation of $M\backslash \ell$. \end{lemma} \begin{proof} Assume $(X,Y)$ is the unique such $2$-separation. By Lemma~\ref{duh85}, $M/a$ and $M/b$ have unique $2$-separations $(A_1,B_1)$ and $(A_2,B_2)$ with $\ell$ in $A_1 \cap A_2$. Moreover, $A_1 - \ell$ and $A_2 - \ell$ consist of lines $\ell_1$ and $\ell_2$ in $M/a$ and $M/b$; and $M\backslash \ell$ has $(A_1 - \ell, B_1 \cup a)$ and $(A_2 - \ell, B_2 \cup b)$ as $2$-separations. Assume $\ell_1 \neq \ell_2$. Then $\{b,\ell_2\} \subseteq B_1 \cup a$, so $\ell \in {\rm cl}(B_1 \cup a)$. Hence $(A_1 - \ell, B_1 \cup a \cup \ell)$ is a $2$-separation\ of $M$; a contradiction. Thus $\ell_1 = \ell_2$. Hence $r(\{\ell_1,b,\ell\}) = r(\{\ell_1,b\}) = 4$. But we also know that $r(\{\ell_1,a,\ell\}) = r(\{\ell_1,a\}) = 4$. By Lemma~\ref{duh85}(ii)(a) and (b), we see that $b \notin {\rm cl}_{M/a}(A_1)$, so $r(\{\ell_1,\ell,b,a\}) \ge 5$. Thus \begin{align*} 4+4 & = r(\{\ell_1,\ell, b\}) + r(\{\ell_1,\ell, a\})\\ & \ge r(\{\ell_1,\ell, b,a\}) + r(\{\ell_1,\ell\})\\ & \ge 5 + r(\{\ell_1,\ell\}). \end{align*} Therefore $r(\{\ell_1,\ell\}) \le 3$. As $\sqcap(\{\ell_1\},\{\ell\}) = 0$, we deduce that $r(\{\ell\}) = 1$; a contradiction\ to Lemma~\ref{pixl}. \end{proof} \begin{lemma} \label{duh5} If $M_Y = P_5$, then $(X,Y)$ is not the only non-trivial $2$-separation of $M\backslash \ell$. \end{lemma} \begin{proof} Assume $(X,Y)$ is the unique such $2$-separation. Label $Y$ so that $\sqcap(a,X) = 0$ and $\sqcap(b,X) = 1$. \begin{sublemma} \label{duh5a} $\sqcap(a,\ell) = 0$. \end{sublemma} Suppose $\sqcap(a,\ell) = 1$. Then $r(\{a,\ell\}) = 3$. Now $r(E - \ell) = r(E)$ and $r(E - \{\ell,a\}) = r(E) - 1$. Thus, by Lemma~\ref{cactus}, $\{a,\ell\}$ is a prickly 3-separator of $M$. Now $M\backslash \ell \downarrow a$ has a c-minor isomorphic to $N$ since it is the 2-sum of $M_X$ and the $2$-polymatroid consisting of the line $b$ with the point $p$ on it. But, by Lemma~\ref{pricklytime0}, $M\backslash \ell \downarrow a = M\downarrow a \backslash \ell$. Thus, by Lemma~\ref{portia}, $M\downarrow a$ is $3$-connected\ having a c-minor isomorphic to $N$; a contradiction. We conclude that \ref{duh5a} holds. By Lemma~\ref{duh85}, $M/a$ has a unique $2$-separation\ and it has the form $(\{\ell_1,\ell\}, E - \{\ell_1,\ell,a\})$ where $\ell_1$ is a line of $M/a$. Moreover, $r(\{\ell_1,\ell,a\}) = r(\{\ell_1,\ell\})$. Now $\sqcap(\ell,a) = 0$ and, by Lemma~\ref{pixl}, $\ell$ is a line of $M$. Thus \begin{equation} \label{ral} r(\{a,\ell\}) = 4. \end{equation} Now $M\backslash \ell \backslash a$, and hence $M\backslash a$, has a c-minor isomorphic to $N$. Thus $M\backslash a$ has a non-trivial $2$-separation\ $(U,V)$. Without loss of generality, we may assume that $\ell_1 \in U$ and $\ell \in V$ since $r(\{\ell_1,\ell\}) = 4 = r(\{\ell_1,\ell,a\})$. \begin{sublemma} \label{binV} $b \in V$. \end{sublemma} Suppose $b \in U$. Then, as $\sqcap(X,\ell) = 0$, we see that, unless $V = \{\ell,c\}$ for some point $c$, the partition $(U \cup \ell,V-\ell)$ is a $2$-separation\ of $M\backslash a$, so $(U \cup \ell \cup a,V-\ell)$ is a $2$-separation\ of $M$. Consider the exceptional case. Then $r(V-\ell) = r(V) - 2 = 1$. Now $r(M\backslash a, \ell) = r(M) - 1$ and $r(U) + r(V) = r(M) + 1$. We see that $r(U) = r(E - \{a,\ell,c\}) = r(M) - 2.$ Hence $\lambda_{M\backslash a,\ell}(\{c\}) = 0$; a contradiction. We conclude that \ref{binV} holds. We now have that $V \supseteq \{\ell,b\}$. Next observe that \begin{sublemma} \label{new2s} $(U,(V - \ell) \cup a)$ is a $2$-separation of $M\backslash \ell$, and $r((V - \ell) \cup a) = r(V)$. \end{sublemma} To see this, first note that, since $b \in V- \ell$, we have \begin{equation} \label{vee1} r((V- \ell) \cup a) \le r(V - \ell) + 1. \end{equation} We also have \begin{equation} \label{vee2} r(V - \ell) \le r(V) - 1 \end{equation} otherwise $r(V - \ell) = r(V)$ so $\ell \in {\rm cl}(E - \{a,\ell\})$. But $r(E - \{a,\ell\}) = r(E) - 1$, so $(E-a,\{a\})$ is a $2$-separation\ of $M$; a contradiction. Combining (\ref{vee1}) and (\ref{vee2}) gives \ref{new2s}. Since $(X,Y)$ is the unique non-trivial $2$-separation\ of $M\backslash \ell$, we deduce that $(V - \ell) \cup a = \{a,b\}$. Moreover, by \ref{new2s}, $r(\{a,b\}) = 3 = r(\{b,\ell\})$. It follows using submodularity that $r(\{a,b,\ell\}) = 4$. Thus $b \in {\rm cl}_{M/a}(\{\ell\})$. Hence $(\{\ell_1,\ell,b\}, E- \{\ell_1,\ell,a,b\})$ is a $2$-separation\ of $M/a$, which contradicts the fact that $(\{\ell_1,\ell\}, E - \{\ell_1,\ell,a\})$ is the unique $2$-separation\ of $M/a$. This completes the proof of Lemma~\ref{duh5}. \end{proof}
2,110
142,722
en
train
0.91.26
\begin{lemma} \label{duh5} If $M_Y = P_5$, then $(X,Y)$ is not the only non-trivial $2$-separation of $M\backslash \ell$. \end{lemma} \begin{proof} Assume $(X,Y)$ is the unique such $2$-separation. Label $Y$ so that $\sqcap(a,X) = 0$ and $\sqcap(b,X) = 1$. \begin{sublemma} \label{duh5a} $\sqcap(a,\ell) = 0$. \end{sublemma} Suppose $\sqcap(a,\ell) = 1$. Then $r(\{a,\ell\}) = 3$. Now $r(E - \ell) = r(E)$ and $r(E - \{\ell,a\}) = r(E) - 1$. Thus, by Lemma~\ref{cactus}, $\{a,\ell\}$ is a prickly 3-separator of $M$. Now $M\backslash \ell \downarrow a$ has a c-minor isomorphic to $N$ since it is the 2-sum of $M_X$ and the $2$-polymatroid consisting of the line $b$ with the point $p$ on it. But, by Lemma~\ref{pricklytime0}, $M\backslash \ell \downarrow a = M\downarrow a \backslash \ell$. Thus, by Lemma~\ref{portia}, $M\downarrow a$ is $3$-connected\ having a c-minor isomorphic to $N$; a contradiction. We conclude that \ref{duh5a} holds. By Lemma~\ref{duh85}, $M/a$ has a unique $2$-separation\ and it has the form $(\{\ell_1,\ell\}, E - \{\ell_1,\ell,a\})$ where $\ell_1$ is a line of $M/a$. Moreover, $r(\{\ell_1,\ell,a\}) = r(\{\ell_1,\ell\})$. Now $\sqcap(\ell,a) = 0$ and, by Lemma~\ref{pixl}, $\ell$ is a line of $M$. Thus \begin{equation} \label{ral} r(\{a,\ell\}) = 4. \end{equation} Now $M\backslash \ell \backslash a$, and hence $M\backslash a$, has a c-minor isomorphic to $N$. Thus $M\backslash a$ has a non-trivial $2$-separation\ $(U,V)$. Without loss of generality, we may assume that $\ell_1 \in U$ and $\ell \in V$ since $r(\{\ell_1,\ell\}) = 4 = r(\{\ell_1,\ell,a\})$. \begin{sublemma} \label{binV} $b \in V$. \end{sublemma} Suppose $b \in U$. Then, as $\sqcap(X,\ell) = 0$, we see that, unless $V = \{\ell,c\}$ for some point $c$, the partition $(U \cup \ell,V-\ell)$ is a $2$-separation\ of $M\backslash a$, so $(U \cup \ell \cup a,V-\ell)$ is a $2$-separation\ of $M$. Consider the exceptional case. Then $r(V-\ell) = r(V) - 2 = 1$. Now $r(M\backslash a, \ell) = r(M) - 1$ and $r(U) + r(V) = r(M) + 1$. We see that $r(U) = r(E - \{a,\ell,c\}) = r(M) - 2.$ Hence $\lambda_{M\backslash a,\ell}(\{c\}) = 0$; a contradiction. We conclude that \ref{binV} holds. We now have that $V \supseteq \{\ell,b\}$. Next observe that \begin{sublemma} \label{new2s} $(U,(V - \ell) \cup a)$ is a $2$-separation of $M\backslash \ell$, and $r((V - \ell) \cup a) = r(V)$. \end{sublemma} To see this, first note that, since $b \in V- \ell$, we have \begin{equation} \label{vee1} r((V- \ell) \cup a) \le r(V - \ell) + 1. \end{equation} We also have \begin{equation} \label{vee2} r(V - \ell) \le r(V) - 1 \end{equation} otherwise $r(V - \ell) = r(V)$ so $\ell \in {\rm cl}(E - \{a,\ell\})$. But $r(E - \{a,\ell\}) = r(E) - 1$, so $(E-a,\{a\})$ is a $2$-separation\ of $M$; a contradiction. Combining (\ref{vee1}) and (\ref{vee2}) gives \ref{new2s}. Since $(X,Y)$ is the unique non-trivial $2$-separation\ of $M\backslash \ell$, we deduce that $(V - \ell) \cup a = \{a,b\}$. Moreover, by \ref{new2s}, $r(\{a,b\}) = 3 = r(\{b,\ell\})$. It follows using submodularity that $r(\{a,b,\ell\}) = 4$. Thus $b \in {\rm cl}_{M/a}(\{\ell\})$. Hence $(\{\ell_1,\ell,b\}, E- \{\ell_1,\ell,a,b\})$ is a $2$-separation\ of $M/a$, which contradicts the fact that $(\{\ell_1,\ell\}, E - \{\ell_1,\ell,a\})$ is the unique $2$-separation\ of $M/a$. This completes the proof of Lemma~\ref{duh5}. \end{proof} By Lemma~\ref{cactus2}, $M\backslash \ell$ has no 2-element 2-separating set that is a prickly 3-separating set in $M$. \begin{lemma} \label{oldstep5} Let $\{a,b\}$ and $\{c,d\}$ be disjoint $2$-separating sets of $M\backslash \ell$ where each of $a$, $b$, $c$, and $d$ is a line, $r(\{a,b\}) = 3 = r(\{c,d\})$, and $\sqcap(\{a\},E - \{a,b,\ell\}) = 0$. Then either \begin{itemize} \item[(i)] $M/a$ is $3$-connected having a c-minor isomorphic to $N$; or \item[(ii)] $M/ \ell$ has a c-minor isomorphic to $N$ and $\ell \in {\rm cl}_{M/a}(\{c,d\})$. \end{itemize} \end{lemma} \begin{proof} Assume that the lemma fails. Let $Z = E - \{\ell,a,b,c,d\}$. Then, as neither $\{a,b\}$ nor $\{c,d\}$ is a prickly 3-separating set of $M$, by Lemma~\ref{cactus}, we see that $$\sqcap(Z \cup \{c,d\}, \{\ell\}) = 0 = \sqcap(Z \cup \{a,b\}, \{\ell\}),$$ so $\sqcap(Z,\{\ell\}) = 0$ and $\sqcap(\{a,b\},\{\ell\}) = 0$. It follows, as $\sqcap(\{a\}, Z) = 0$, that \begin{equation} \label{mazl} \sqcap_{M/a}(Z,\{\ell\}) = 0. \end{equation} Let $X = E - \{a,b,\ell\}$ and $Y = \{a,b\}$. Then $M\backslash \ell = M_X \oplus_2 M_Y$ where $M_Y$ has ground set $\{p,a,b\}$. Then $M_X$ has a c-minor isomorphic to $N$. As $\sqcap(\{a\}, X) = 0$, it follows that $M\backslash \ell/a$, and hence $M/a$, has a c-minor isomorphic to $N$. \begin{sublemma} \label{lcd} $\ell \in {\rm cl}_{M/a}(\{c,d\})$. \end{sublemma} Assume $\ell \not\in {\rm cl}_{M/a}(\{c,d\})$. Since $M/a$ is not $3$-connected, it has a $2$-separation\ $(A,B)$ with $\ell \in A$ and $b \in B$. Moreover, by Lemma~\ref{duh85}, we know that $(A-\ell, B\cup a)$ is a 2-separation of $M\backslash \ell$, that $|B - b| \ge 2$, that $|A - \ell| \le 2$, and that $\ell \in {\rm cl}_{M/a}(A - \ell)$. Suppose $|A - \ell| = 1$. Then, by Lemma~\ref{duh85} again, $A - \ell$ consists of a line $m$ of $M/a$ and $\ell \in {\rm cl}_{M/a}(\{m\})$. Thus $m \not \in \{c,d\}$, so $m \in Z$ and we have a contradiction to (\ref{mazl}). Now suppose that $|A - \ell| = 2$. Then $\ell \in {\rm cl}_{M/a}(A - \ell)$. Thus $\{c,d\} \neq A - \ell$. If $\{c,d\}$ avoids $A - \ell$, then we again get a contradiction\ to (\ref{mazl}). Thus $A- \ell$ meets $\{c,d\}$ in a single element. Then, by uncrossing the 2-separations $(A - \ell, B \cup a)$ and $(\{c,d\}, E - \{\ell,c,d\})$ of $M\backslash \ell$, we see that $(A - \ell) \cup \{c,d\})$ is a 3-element 2-separating set in $M\backslash \ell$. At most one element of $\{c,d\}$ is in $E(N)$. Thus $(A - \ell) \cup \{c,d\}$ is the non-$N$-side of a $2$-separation\ of $M\backslash \ell$. This is a contradiction\ as this set has three elements. We conclude that \ref{lcd} holds. We shall complete the proof of Lemma~\ref{oldstep5} by showing that $M/ \ell$ has a c-minor isomorphic to $N$. In the argument that follows, it helps to think in terms of the matroids that are naturally derived from the $2$-polymatroids we are considering. We know that $M\backslash \ell = M_X \oplus_2 M_Y$ where $M_Y$ has ground set $\{a,b,p\}$ with $p$ being the basepoint of the 2-sum. As $\{c,d\}$ is 2-separating in $M\backslash \ell$, it is also 2-separating in $M_X$. Thus $M_X = M_Z \oplus_2 M_W$ where $M_W$ has ground set $\{c,d,q\}$ with $q$ being the basepoint of this 2-sum. Now $\{c,d\}$ does not span $p$ otherwise $\{a,b,c,d\}$ is 2-separating in $M\backslash \ell$ and contains at most two elements of $N$, a contradiction\ to the definition of $Y$. By two applications of Lemma~\ref{claim1}, we see that $M_X$, and hence $M_Z$, has a c-minor isomorphic to $N$. Now $M\backslash \ell/a$ equals $M_X$ after relabelling the element $p$ of the latter by $b$. We will call this relabelled $2$-polymatroid $M_X'$. By \ref{lcd}, $M/a$ is obtained from $M'_X$ by adding $\ell$ to the closure of $\{c,d\}$ as a point or a line. Thus $M/a$ is the 2-sum with basepoint $q$ of $M'_Z$ and $M'_W$ where $M'_Z$ is obtained from $M_Z$ by relabelling $p$ as $b$, while $M'_W$ is obtained from $M_W$ by adding $\ell$. By (\ref{mazl}), $\ell$ is skew to $Z$ in $M/a$, so $\ell$ is skew to $q$ in $M'_W$. Now $\ell$ is a not a line of $M'_W$, otherwise at least one of $c$ and $d$ is parallel to the basepoint $q$ in $M'_W$, so $M/a/ \ell$ and hence $M/\ell$ has a c-minor isomorphic to $N$. Hence $\ell$ is a point of $M'_W$, so $M'_W / \ell$ has rank $2$. It has no point parallel to $q$ otherwise $M/a /\ell$ has a c-minor isomorphic to $N$. Thus $M'_W/ \ell$ can be obtained from one of $P_1, P_2$, or $P_4$ by relabelling the element $p$ by $q$. In the first two cases, we can contract a point from $M'_W/ \ell$ to obtain a $2$-polymatroid consisting of two parallel points, one of which is $q$, so we get the contradiction\ that $M/a / \ell$ has a c-minor isomorphic to $N$. In the third case, deleting one of the lines, say $c$, of $M'_W/ \ell$ leaves $d$ as a line through $q$. Thus $\{d\}$ is $2$-separating in $M/a \backslash \ell \backslash c$. Compactifying $d$, we obtain a $2$-polymatroid having a c-minor isomorphic to $N$. Again we obtain the contradiction\ that $M/a \backslash \ell$ has a c-minor isomorphic to $N$. \end{proof}
3,515
142,722
en
train
0.91.27
\begin{lemma} \label{3connel} Let $\{a,b\}$ and $\{c,d\}$ be disjoint $2$-separating sets of $M\backslash \ell$ where each of $a$, $b$, $c$, and $d$ is a line, $r(\{a,b\}) = 3 = r(\{c,d\})$. Assume $M/ \ell$ has a c-minor isomorphic to $N$. Then at least one of $\sqcap(\{a\}, E - \{\ell,a,b\})$ and $\sqcap(\{b\}, E - \{\ell,a,b\})$ is not equal to one. \end{lemma} \begin{proof} As before, let $Z = E - \{a,b,c,d,\ell\}$. Since the theorem fails, it follows by Lemmas~\ref{Step1} and \ref{cactus2} that $M/ \ell$ is $2$-connected and neither $\{a,b\}$ nor $\{c,d\}$ is a prickly 3-separating set of $M$. Moreover, by Lemma~\ref{pixl}, $\ell$ is a line that is skew to each of $Z \cup \{a,b\}$ and $Z \cup \{c,d\}$. Thus, if $(R,B)$ is a $2$-separation\ of $M/ \ell$, then, by Lemma~\ref{skewer}, $\sqcap(R,\{\ell\}) \ge 1$ and $\sqcap(B,\{\ell\}) \ge 1$. By Lemma~\ref{general}, $$\sqcap(R,\{\ell\}) + \sqcap(B,\{\ell\}) + \lambda_{M/ \ell}(R) = \lambda_{M\backslash \ell}(R) + \lambda_M(\{\ell\}),$$ so \begin{equation} \label{eq1rb} \sqcap(R,\{\ell\}) + \sqcap(B,\{\ell\}) = \lambda_{M\backslash \ell}(R) + 1. \end{equation} As $\sqcap(\{\ell\}, Z \cup \{a,b\}) = 0 = \sqcap(\{\ell\}, Z \cup \{c,d\})$, it follows by Lemma~\ref{8.2.3} that both $R$ and $B$ meet both $\{a,b\}$ and $\{c,d\}$. Without loss of generality, we may assume that $\{a,c\} \subseteq R$ and $\{b,d\} \subseteq B$. Now suppose that $\sqcap(\{a\}, E - \{\ell,a,b\}) = 1 = \sqcap(\{b\}, E - \{\ell,a,b\})$. By Lemma~\ref{oswrules}(i), \begin{align*} \sqcap(\{a,c\}, \{b,d\}) + \sqcap(\{a\},\{c\}) + \sqcap(\{b\},\{d\}) & = \sqcap(\{a,b\}, \{c,d\}) + \sqcap(\{a\},\{b\})\\ &\hspace*{1.5in} + \sqcap(\{c\},\{d\}). \end{align*} As $\mu(\ell) = 2$, we see that $\sqcap(\{a,b\}, \{c,d\}) = 0$, so $\sqcap(\{a\},\{c\}) = 0 = \sqcap(\{b\},\{d\})$. Thus $$\sqcap(\{a,c\}, \{b,d\}) = \sqcap(\{a\},\{b\}) + \sqcap(\{c\},\{d\}) = 2.$$ Hence $\sqcap(R,B) \ge 2$, that is, $\lambda_{M\backslash \ell}(R) \ge 2$. Thus, by (\ref{eq1rb}), $\sqcap(R,\{\ell\}) = 2$ or $\sqcap(B,\{\ell\}) = 2$. By symmetry, we may assume the former. But, as $\sqcap(\{c,d\} \cup Z, \{\ell\}) = 0$ and $\sqcap(\{c,d\} \cup Z, \{a\}) = 1$, by Lemma~\ref{oswrules}(ii), \begin{align*} \sqcap(\{c,d\} \cup Z \cup a, \{\ell\}) + 1 & = \sqcap(\{c,d\} \cup Z \cup a, \{\ell\}) + \sqcap(\{c,d\} \cup Z, \{a\})\\ & = \sqcap(\{c,d\} \cup Z \cup \ell, \{a\}) + \sqcap(\{c,d\} \cup Z, \{\ell\})\\ & \le 2 + 0. \end{align*} Thus $\sqcap(\{c,d\} \cup Z \cup a, \{\ell\}) \le 1$. But $R \subseteq Z \cup \{a,c\}$ so $\sqcap(R, \{\ell\}) \le 1$; a contradiction. \end{proof} \begin{lemma} \label{22sep} The $2$-polymatroid $M\backslash \ell$ does not have two disjoint $2$-element $2$-separating sets. \end{lemma} \begin{proof} Assume that $M\backslash \ell$ has $\{a,b\}$ and $\{c,d\}$ as disjoint $2$-separating sets. Then each of $a, b, c$, and $d$ is a line and $r(\{a,b\}) = 3 = r(\{c,d\})$. As before, let $Z = E - \{a,b,c,d,\ell\}$. Suppose $Y$ is $\{a,b\}$ or $\{c,d\}$, and $X = E - \ell - Y$. Then $M\backslash \ell = M_X \oplus_2 M_Y$. By Lemmas~\ref{not23}, \ref{fourmost}, \ref{noone}, \ref{no7}, and \ref{no9}, we know that $M_Y$ is isomorphic to $P_5$, $P_6$, or $P_8$. By Lemma~\ref{pixl}, \begin{sublemma} \label{skewy} $\ell$ is skew to $X$, so $\ell$ is skew to each of $a$, $b$, $c$, and $d$. \end{sublemma} When $M_Y {\rm co}ng P_n$, we shall say that $Y$ is a {\it type-$n$ $2$-separator} of $M\backslash \ell$. \begin{sublemma} \label{not66} Neither $\{a,b\}$ nor $\{c,d\}$ is of type-$6$. \end{sublemma} Assume the contrary. Suppose $\{a,b\}$ is of type-6. Then, by Lemma~\ref{3connel}, $M/ \ell$ does not have a c-minor isomorphic to $N$. Thus, by Lemma~\ref{oldstep5}, neither $\sqcap(\{c\},X)$ nor $\sqcap(\{d\},X)$ is $0$. Hence $\{c,d\}$ is also of type-6. Suppose $\alpha \in \{a,b\}$ and $\gamma \in \{c,d\}$. Then $r(Z \cup \{\alpha, \gamma\}) = r(Z) + 2$. Of course, $r(M) = r(Z) + 4$. Suppose $r(Z \cup \{\alpha, \gamma\} \cup \ell) = r(M)$. Then $\sqcap(Z \cup \{\alpha, \gamma\}, \ell) = 0$. Let the elements of $\{a,b,c,d\} - \{\alpha, \gamma\}$ be $\beta$ and $\delta$. In $M\backslash \beta \backslash \delta$, the set $\{\ell\}$ is $1$-separating. Thus $M\backslash \beta \backslash \delta\backslash \ell = M\backslash \beta \backslash \delta/ \ell$. As $M\backslash \beta \backslash \delta\backslash \ell$ has a c-minor isomorphic to $N$, so does $M/\ell$. We then get a contradiction\ to Lemma~\ref{3connel} since $\sqcap(a, E - \{\ell,a,b\}) = 1 = \sqcap(b, E - \{\ell,a,b\})$. We may now assume that $r(Z \cup \{\alpha, \gamma\} \cup \ell) \le r(M) - 1.$ By \ref{skewy}, $\ell$ is skew to $Z \cup \{a,b\}$, so $r(Z \cup a \cup \ell) = r(M) - 1.$ Thus, using the submodularity of $r$, we have \begin{align*} 2r(M) - 1 & = r(Z \cup a \cup \ell) + r(M)\\ & \le r(Z \cup \{a,c\} \cup \ell) + r(Z \cup \{a,d\} \cup \ell)\\ & \le 2r(M) - 2. \end{align*} This contradiction\ establishes \ref{not66}. We now know that each of $\{a,b\}$ and $\{c,d\}$ is of type-5 or of type-8. In particular, we may assume that $\sqcap(\{a\}, Z \cup \{c,d\}) = 0 = \sqcap(\{c\}, Z \cup \{a,b\})$. Since $\mu(\ell) = 2$ and $\{a,b,c,d\}$ contains at most two elements of $N$, we see that \begin{equation} \label{abcd6} r(\{a,b,c,d\}) = 6. \end{equation} By Lemma~\ref{oldstep5}, \begin{sublemma} \label{mell} $\ell \in {\rm cl}_{M/a}(\{c,d\})$ and $\ell \in {\rm cl}_{M/c}(\{a,b\})$. \end{sublemma} We deduce that $r(\{a,c,d,\ell\}) = r(\{a,c,d\}) = 5$ and $r(\{a,b,c,\ell\}) = r(\{a,b,c\}) = 5$. By submodularity and (\ref{abcd6}), \begin{align*} 10 & = r(\{a,c,d,\ell\}) + r(\{a,b,c,\ell\})\\ & \ge r(\{a,b,c,d,\ell\}) + r(\{a,c,\ell\})\\ & \ge 6 + 4 = 10. \end{align*} We conclude that \begin{equation} \label{eqnacl} r(\{a,c,\ell\}) = 4. \end{equation} Next we show the following. \begin{sublemma} \label{type5} Both $\{a,b\}$ and $\{c,d\}$ are of type-$5$. \end{sublemma} Suppose $\{a,b\}$ is of type-8. Then $\sqcap(\{b\}, Z \cup \{c,d\}) = 0$. Thus we can replace $a$ by $b$ in the argument used to prove (\ref{eqnacl}) to get that $r(\{b,c,\ell\}) = 4$. Hence \begin{align*} 4 + 4 & = r(\{a,c,\ell\}) + r(\{b,c,\ell\})\\ & \ge r(\{a,b,c,\ell\}) + r(\{c,\ell\})\\ & \ge 5 + 4. \end{align*} This contradiction\ and symmetry implies that \ref{type5} holds. Now, by Lemma~\ref{claim1}, $M\backslash \ell \backslash a$, and hence $M\backslash a$, has a c-minor isomorphic to $N$. Thus $M\backslash a$ is not $3$-connected. Let $(U,V)$ be a non-trivial $2$-separation\ of $M\backslash a$. Then we may assume that $\ell \in U$ and $c\in V$ otherwise $M$ has a $2$-separation. Suppose $d \in U$. Then, by \ref{skewy}, $(U \cup c, V - c)$ is a 1-separation of $M\backslash a$; a contradiction. Thus $d \in V$. By \ref{skewy} again, $r(U- \ell) = r(U) - 2$, so we obtain the contradiction\ that $(U - \ell, V \cup \ell \cup a)$ is a 1- or $2$-separation\ of $M$ unless $U - \ell$ consists of a single point, $u$, and $r(U) = 3$. In the exceptional case, since $M\backslash a\backslash \ell$ is $2$-connected, we see that $u \in {\rm cl}(V)$, so $(U - u, V \cup u)$ is a $1$-separation of $M\backslash a$; a contradiction. \end{proof} \begin{lemma} \label{muend} Suppose that $M$ has an element $\ell$ such that $M\backslash \ell$ has $N$ as a c-minor. Then the largest non-$N$-side in a $2$-separation of $M\backslash \ell$ has size exceeding two. \end{lemma} \begin{proof} Assume $\mu(\ell) = 2$. Then $M\backslash \ell = M_X \oplus_2 M_Y$ where $|Y| = 2$. In Lemma~\ref{old2}, we identified the nine possibilities for $M_Y$. We showed in Lemmas~\ref{not23}, \ref{fourmost}, \ref{noone}, \ref{no7}, and \ref{no9} that $M_Y$ must be isomorphic to $P_5$, $P_6$, or $P_8$. In Lemmas~\ref{duh}, \ref{duh8}, and \ref{duh5}, we showed that $(X,Y)$ cannot be the sole non-trivial 2-separation of $M\backslash \ell$. Lemma~\ref{22sep} completes the proof by showing that $M\backslash \ell$ cannot have a second non-trivial 2-separation. \end{proof} \begin{lemma} \label{dualmu} Suppose that $M$ has an element $\ell$ such that $M/\ell$ has $N$ as a c-minor. Then the largest non-$N$-side in a $2$-separation of $M/ \ell$ has size exceeding two. \end{lemma} \begin{proof} By Lemma~\ref{csm}, $(M/ \ell)^*$ has a c-minor isomorphic to $N^*$. By Lemma~\ref{compact0}, $(M/ \ell)^* = (M^*\backslash \ell)^{\flat}$. Thus $M^*\backslash \ell$ has a c-minor isomorphic to $N^*$. Let $Y$ be a largest non-$N$-side in a $2$-separation\ of $M/ \ell$. By Lemma~\ref{compact0} again, $Y$ is a largest non-$N^*$-side in a $2$-separation\ of $M^*\backslash \ell$. Replacing $(M,N)$ by $(M^*,N^*)$ in Lemma~\ref{muend}, we deduce that $|Y| > 2$. \end{proof}
3,880
142,722
en
train
0.91.28
\section{Finding a doubly labelled line} \label{fdll} Recall that we are assuming that $(M,N)$ is a counterexample to Theorem~\ref{modc} where $N$ is a $3$-connected $2$-polymatroid that is a c-minor of $M$. In this section, we prove some lemmas that will eventually enable us to deduce that $M$ has a doubly labelled line. The first step in this process is to prove the following elementary but useful lemma. \begin{lemma} \label{predichotomy} Suppose $y \in E(M) - E(N)$. If $y$ is not a doubly labelled element of $M$, and $M'$ has a special $N$-minor for some $M'$ in $\{M\backslash y,M/ y\}$, then $M'$ has $N$ as a c-minor. \end{lemma} \begin{proof} Since $y \in E(M) - E(N)$, some $M''$ in $\{M\backslash y,M/ y\}$ has $N$ as a c-minor. Since $y$ is not doubly labelled, we see that $M'' = M'$. \end{proof} The next lemma identifies an important dichotomy. \begin{lemma} \label{dichotomy} Let $M'$ be a c-minor of $M$ having $N$ as a c-minor and let $(X',Y')$ be a $2$-separation of $M'$ having $X'$ as the $N$-side. Assume that, for all elements $y$ of $Y'$, at least one of $M'\backslash y$ and $M'/y$ does not have a special $N$-minor. Then either \begin{itemize} \item[(i)] $\sqcap_{M'}(\{y\},X') = 1$ for all $y$ in $Y'$; or \item[(ii)] $\sqcap_{M'}(Y'-y,X') = 0$ for all $y$ in $Y'$. \end{itemize} \end{lemma} \begin{proof} Suppose $y \in Y'$. If $\sqcap_{M'}(\{y\},X) = 0$, then, by Lemma~\ref{obs1}, $\sqcap_{M'/y}(X',Y'-y) = 1$, so, by Lemma~\ref{claim1}(ii), $M'/y$ has a special $N$-minor. If $\sqcap_{M'}(Y'-y,X') = 1$, then, by Lemma~\ref{claim1}(i), $M'\backslash y$ has a special $N$-minor. By hypothesis, $M'\backslash y$ or $M'/y$ has no special $N$-minor. We deduce the following. \begin{sublemma} \label{dich1} Either $\sqcap_{M'}(\{y\},X') = 1$ or $\sqcap_{M'}(Y'-y,X') = 0$. \end{sublemma} Next we show that all the elements of $Y'$ behave similarly. \begin{sublemma} \label{dich2} If $\sqcap_{M'}(\{y\},X') = 1$, then $\sqcap_{M'}(\{z\},X) = 1$ for all $z$ in $Y'$. \end{sublemma} To see this, note first that $M' = M'_{X'} \oplus_2 M'_{Y'}$. Since $\sqcap_{M'}(\{y\},X') = 1$, it follows that $p \in {\rm cl}_{M'_{Y'}}(\{y\})$. Suppose $z \in Y'-y$. Then $p \in {\rm cl}_{M'_{Y'}}(Y' - z)$. Hence $\sqcap_{M'}(X',Y'-z) = 1$ so $M'\backslash z$ has a special $N$-minor. Thus $M'/z$ does not have a special $N$-minor. Hence, by Lemma~\ref{claim1}(ii), $\sqcap_{M'/z}(X',Y'-z) = 0$, so, by Lemma~\ref{obs1}, $\sqcap_{M'}(X',\{z\}) = 1$, and \ref{dich2} holds. Now suppose that $\sqcap_{M'}(\{y\},X') = 0$. Then, by \ref{dich2}, $\sqcap_{M'}(\{z\},X') = 0$ for all $z$ in $Y'$. Thus $M'/z$ has a special $N$-minor for all $z$ in $Y'$. The hypothesis implies that $M'\backslash z$ has no special $N$-minor for all $z$ in $Y'$. Then, by Lemma~\ref{claim1}(i), $\sqcap_{M'}(Y'-z,X') = 0$ and the lemma follows. \end{proof}
1,224
142,722
en
train
0.91.29
The next lemma describes what happens when (i) of Lemma~\ref{dichotomy} holds. \begin{lemma} \label{p63rev} Suppose $M\backslashba \ell$ has $N$ as a c-minor. Let $(X,Y)$ be a $2$-separation of $M\backslash \ell$ in which $X$ is the $N$-side and $|Y| \ge 3$. Then \begin{itemize} \item[(i)] $Y$ contains a doubly labelled element; or \item[(ii)] $\sqcap(\{y\},X) \neq 1$ for some $y$ in $Y$; or \item[(iii)] $Y$ contains an element $y$ such that $M\backslashba y$ has $N$ as a c-minor and every non-trivial $2$-separation of $M\backslash y$ has the form $(Z_1,Z_2)$ where $Z_1$ is the $N$-side and $Z_2 \subseteq Y - y$. \end{itemize} \end{lemma} \begin{proof} Suppose that $\sqcap(\{y\},X) = 1$ for all $y$ in $Y$ and that $Y$ does not contain any doubly labelled elements. As usual, we write $M\backslash \ell$ as the $2$-sum with basepoint $p$ of the $2$-polymatroids $M_X$ and $M_Y$ having ground sets $X \cup p$ and $Y \cup p$, respectively. First we show that \begin{sublemma} \label{nopoint} $Y$ does not contain a point. \end{sublemma} Assume that $Y$ does contain a point, $z$. Then, since $\sqcap(\{z\},X) = 1$, we see that $z$ is parallel to $p$ in $M_Y$. By Proposition~\ref{connconn}, $M\backslash \ell\backslash z$ is $2$-connected. Hence $M\backslash z$ is $2$-connected. Also, in $M_X$ and $M_Y$, the sets $X$ and $Y-z$ span $p$, and hence span $z$. We show next that \begin{sublemma} \label{nopointsub} $M\backslash z$ is $3$-connected. \end{sublemma} Suppose that $M\backslash z$ has a $2$-separation $(R,B)$ where $\ell \in R$. Then $(R-\ell,B)$ is 2-separating in $M\backslash z \backslash \ell$. Note that $r(M\backslash \ell) = r(M)$, so $r(M\backslash \ell\backslash z) = r(M)$. We have $$r(R) + r(B) = r(M\backslash z) + 1.$$ Thus $$r(R - \ell) + r(B) \le r(M\backslash z,\ell) +1.$$ Now $R \neq \{\ell\}$ otherwise $Y - z \subseteq B$ and we obtain the contradiction that $(R,B \cup z)$ is a $2$-separation of $M$. Observe that, since $M\backslash \ell\backslash z$ is $2$-connected, $r(R - \ell) = r(R)$. As $M$ is $3$-connected, neither $B$ nor $R - \ell$ spans $z$. Thus neither $X$ nor $Y - z$ is contained in $B$ or $R - \ell$. Hence $(X,Y-z)$ and $(R - \ell,B)$ cross. Now $\lambda_{M\backslash \ell \backslash z}(Y-z) = \lambda_{M\backslash \ell}(Y) = 1$ and $\lambda_{M\backslash \ell \backslash z}(B) = 1$. Thus, by uncrossing, $\lambda_{M\backslash \ell \backslash z}(B\cap (Y-z)) = 1$. Since $\ell \in {\rm cl}(R - \ell)$ and $z \in {\rm cl}(X)$, we deduce that $\lambda_M(B \cap (Y-z)) = 1$. As $M$ is $3$-connected, it follows that $B \cap (Y - z)$ consists of a single point $y$. Then, by assumption, $\sqcap(X,\{y\}) = 1$. But $\sqcap(X,\{z\}) = 1$. Thus $y$ is parallel to $p$ in $M_Y$. Hence $y$ and $z$ are parallel points in $M$; a contradiction. We conclude that \ref{nopointsub} holds. To complete the proof of \ref{nopoint}, we shall show that $M\backslashba z$ has a special $N$-minor. We know that $M\backslash \ell = M_X \oplus_2 M_Y$ where $z$ is parallel in $M_Y$ to the basepoint $p$ of the $2$-sum. Moreover, by Lemma~\ref{p69}, $M_X$ has a special $N$-minor. Now $M\backslash \ell\backslash z$ is $2$-connected and, by \cite[Proposition 3.1]{hall}, $M\backslash \ell\backslash z = M_X \oplus_2 (M_Y \backslash z)$. Hence $M\backslash z$ has a special $N$-minor. Thus $M\backslashba z$ is $3$-connected having a c-minor isomorphic to $N$; a contradiction. We deduce that \ref{nopoint} holds. We now know that every element of $Y$ is a line $y$ with $\sqcap(X,\{y\}) = 1$. Hence, in $M_Y$, the basepoint $p$ lies on $y$. Thus, for all $y$ in $Y$, we see that $M\backslash \ell\backslash y$ is $2$-connected. Then, by Lemma~\ref{p49} again, we deduce that \begin{sublemma} \label{usey} for all $y$ in $Y$, both $M\backslash \ell\backslash y$ and $M\backslash y$ have special $N$-minors. \end{sublemma} Since every line in $Y$ contains $p$, it follows that $M_Y/p$ is a matroid. Next we show that \begin{sublemma} \label{caseD1.1} $M_Y/p$ has a circuit. \end{sublemma} Assume that $M_Y/p$ has no circuits. Let $y$ and $y'$ be two distinct elements of $Y$. Then $r(X \cup (Y - \{y,y'\})) = r(X) + |Y - \{y,y'\}|$ and $r(X \cup Y) = r(X) + |Y|$. As a step towards \ref{caseD1.1}, we show that \begin{sublemma} \label{caseD1.1sub} $\sqcap((X\cup Y) - \{y,y'\},\{\ell\}) = 0$. \end{sublemma} Suppose that $\sqcap((X\cup Y) - \{y,y'\},\{\ell\}) \ge 1$. Then, as $r(Y) = |Y| + 1$, \begin{align*} \lambda_M(\{y,y'\}) & = r(X \cup (Y - \{y,y'\})\cup \ell) + r(\{y,y'\}) - r(M)\\ & \le r(X) + |Y - \{y,y'\}| +1 + 3 - r(M)\\ & = r(X) + r(Y) - r(M) + 1 = 2. \end{align*} As $M$ is $3$-connected, we see that $\lambda_M(\{y,y'\}) = 2$, so equality holds thoughout the last chain of inequalities. Thus $\{y,y'\}$ is a prickly 3-separator of $M$ and $\lambda_{M\backslash \ell}(\{y,y'\}) = 1$. By Lemma~\ref{portia}, $M\downarrow y$ is $3$-connected. By Lemma~\ref{dennisplus}(vi), $(M\backslash \ell)\downarrow y = M_X \oplus_2 (M_Y \downarrow y)$. Thus $\sqcap_{M\backslash \ell \downarrow y}(X,Y-y) = 1$ so, by Lemma~\ref{claim1}(iii), $(M\downarrow y)\backslash \ell$, and hence $M\downarrow y$, has a special $N$-minor. This contradiction implies that \ref{caseD1.1sub} holds for all distinct $y$ and $y'$ in $Y$. As the next step towards proving \ref{caseD1.1}, we now show that \begin{sublemma} \label{lcn} $M/ \ell$ has a c-minor isomorphic to $N$. \end{sublemma} In $M\backslash \ell$, deleting all but one element, $y$, of $Y$ leaves the $2$-polymatroid that, when $y$ is compactified, equals $M_X$ with $p$ relabelled as $y$. Hence $M\backslash \ell \backslash (Y - y)$ has a c-minor isomorphic to $N$. By \ref{caseD1.1sub}, since $|Y| \ge 3$, we deduce that $\{\ell\}$ is 1-separating in $M\backslash (Y - y)$. Hence $M\backslash (Y - y) \backslash \ell = M\backslash (Y - y) / \ell$, so, by \ref{usey}, we deduce that \ref{lcn} holds. Still continuing towards the proof of \ref{caseD1.1}, next we observe that \begin{sublemma} \label{linear} $\ell$ is a line of $M$. \end{sublemma} Suppose $\ell$ is a point. By Lemma~\ref{newbix}, $M/ \ell$ is $2$-connected having one side of every $2$-separation being a pair of points of $M$ that are parallel in $M/ \ell$. By \ref{lcn}, $M$ must have such a pair $\{u,v\}$ of points. Then both $M\backslash u$ and $M\backslash v$ have c-minors isomorphic to $N$. By \cite[Lemma 4.2]{oswww}, $M$ has a triad of points containing $\ell$ and one of $u$ and $v$, say $u$. Let $w$ be the third point in this triad. Then $M\backslash \ell$ has $\{u,w\}$ as a series pair of points, so $M\backslash \ell/u$, and hence $M/u$, has a c-minor isomorphic to $N$. Thus the point $u$ contradicts Lemma~\ref{Step0}. By \ref{lcn}, $M/ \ell$ has a 2-separation $(U,V)$. Thus $r(U \cup \ell) + r(V \cup \ell) - r(M) = 3$. By symmetry, we may assume that $U \subseteq (X \cup Y) - \{y,y'\}$ for some $y'$ in $Y - y$. Then, by \ref{caseD1.1sub} and \ref{linear}, $r(U \cup \ell) = r(U) + 2$. Hence $(U, V \cup \ell)$ is a $2$-separation\ of $M$. This contradiction\ completes the proof of \ref{caseD1.1}. Choose $y$ in $Y$ such that $y$ is in a circuit of $M_Y/p$ and $y \in E(M) - E(N)$. By \ref{usey}, $M\backslash y$ has a special $N$-minor. Thus, by Lemma~\ref{predichotomy}, $M\backslash y$ has $N$ as a c-minor. Now $r(M\backslash \ell\backslash y) = r(M\backslash \ell) = r(M) = r(M\backslash y)$. Hence $\ell \in {\rm cl}_{M\backslash y}(X \cup (Y - y))$ and $M\backslash \ell \backslash y$ is $2$-connected. Next we show the following. \begin{sublemma} \label{caseD1.2} Every non-trivial $2$-separation of $M\backslash y$ has the form $(X \cup Y' \cup \ell, Y'')$ where $Y'$ and $Y''$ are disjoint and $Y' \cup Y'' = Y-y$. \end{sublemma} Let $(A,B)$ be a non-trivial $2$-separation of $M\backslash y$ that is not in the stated form. Without loss of generality, $\ell \in A$. Then $X \not \subseteq A$. Since $M\backslash \ell\backslash y$ is $2$-connected having the same rank as $M\backslash y$, it follows that $r(A - \ell) = r(A)$ and $(A- \ell,B)$ is a 2-separation of $M\backslash \ell\backslash y$. We also know that $(X,Y-y)$ is a 2-separation of $M\backslash \ell \backslash y$. Now $\ell \not\in {\rm cl}(X)$ and $\ell \not\in {\rm cl}(Y-y)$. But $\ell \in {\rm cl}(A - \ell)$, so $(A - \ell) \cap (Y -y) \neq \emptyset \neq (A - \ell) \cap X$. By uncrossing, $\lambda_{M\backslash \ell\backslash y}(B \cap X) = 1$. As $\ell \in {\rm cl}(A - \ell)$ and $y \in {\rm cl}(Y-y)$, we deduce that $\lambda_M(B\cap X) = 1$. Thus $B \cap X$ consists of a single point $x$ of $M$. Then $B \cap (Y-y) \neq \emptyset$. Therefore, by uncrossing again, $\lambda_{M\backslash \ell\backslash y}(X \cap (A - \ell)) = 1$, so $\lambda_{M\backslash \ell}(X \cap (A - \ell)) = 1$. Thus $(X-x,Y\cup x)$ is a 2-separation of $M\backslash \ell$. If $r(Y \cup x) = r(Y)$, then $x$ is parallel to $p$ in $M_X$. Hence, we see that $x$ lies on $y$. Then $M\backslash x$ is $3$-connected having a special $N$-minor; a contradiction. Thus we may assume that $r(Y \cup x) = r(Y) + 1$. Then $r(X- x) = r(X) - 1$. Hence, in $M_X$, the points $p$ and $x$ are a series pair. Thus $M_X$ is the 2-sum with basepoint $q$ of a $2$-polymatroid $M'_X$, say, and a copy of $U_{2,3}$ with ground set $\{q,p,x\}$. Moreover, every element of $Y$ is a line through $p$ in $M_Y$. Thus we see that both $M\backslash y$ and $M/y$ have special $N$-minors; a contradiction. We conclude that \ref{caseD1.2} holds, so (iii) of the lemma holds, and the proof of the lemma is complete. \end{proof}
3,885
142,722
en
train
0.91.30
\begin{lemma} \label{prep65rev} Suppose $M\backslashba \ell$ has $N$ as a c-minor. Let $(X,Y)$ be a $2$-separation of $M\backslash \ell$ in which $X$ is the $N$-side and $|Y| \ge 3$. Let $M_X \oplus_2 M_Y$ be the associated $2$-sum decomposition of $M\backslash \ell$ with respect to the basepoint $p$. Then \begin{itemize} \item[(i)] $Y$ contains a doubly labelled element; or \item[(ii)] $\sqcap(Y-y,X) > 0$ for some $y$ in $Y$; or \item[(iii)] $r(X\cup \ell \cup y_0) > r(X \cup y_0)$ for some $y_0$ in $Y$, and $M/y_0$ has a special $N$-minor. Moreover, either \begin{itemize} \item[(a)] every non-trivial $2$-separation of $M/y_0$ has the form $(Z_1,Z_2)$ where $Z_1$ is the $N$-side and $Z_2 \subseteq Y - y_0$; or \item[(b)] $M_X$ is the $2$-sum with basepoint $q$ of two $2$-polymatroids, one of which is a copy of $U_{2,3}$ with ground set $\{p,z,q\}$. \end{itemize} \end{itemize} \end{lemma} \begin{proof} Assume that neither (i) nor (ii) holds. Suppose $y \in Y$. As $\sqcap(Y,X) = 1$, it follows that $r(Y) > r(Y-y)$ so \begin{sublemma} \label{*1} $r(Y-y) \le r(Y) - 1.$ \end{sublemma} Next we show that \begin{sublemma} \label{MYy} $\lambda_{M_Y}(\{y\}) = \lambda_{M\backslash (X \cup \ell)}(\{y\}) + 1.$ \end{sublemma} We see that $\lambda_{M_Y}(\{y\}) = r_M(\{y\}) + r_{M_Y}((Y-y) \cup p) - r(M_Y).$ Since $\sqcap(Y-y, X) = 0$, we deduce that $r_{M_Y}((Y-y) \cup p) = r_M(Y-y) + 1.$ As $M_Y$ is $2$-connected, $r(Y) = r(M_Y)$ and \ref{MYy} follows. We now extend \ref{*1} as follows. \begin{sublemma} \label{*2} Let $\{y_1,y_2,\dots,y_k\}$ be a subset of $Y$. Then $$r(Y - \{y_1,y_2,\dots,y_k\}) \le r(Y) - k.$$ \end{sublemma} By \ref{*1}, $r(Y-y_1) \le r(Y) - 1$ and $r(Y-y_2) \le r(Y) - 1$. Thus, by submodularity, $r(Y-\{y_1,y_2\}) \le r(Y) - 2$. Repeating this argument gives \ref{*2}. Next we show the following. \begin{sublemma} \label{mcony} For all $y$ in $Y$, the $2$-polymatroid $M\backslash \ell/y$ has a special $N$-minor and $\lambda_{M\backslash \ell/y}(X) = 1$. \end{sublemma} Let $M' = M\backslash \ell$. By Corollary~\ref{general2}, \begin{align} \label{Yyy} \lambda_{M'/y}(X) & = \lambda_{M'\backslash y}(X) - \sqcap_{M'}(X,y) - \sqcap_{M'}(Y-y,y) + r(\{y\}) \nonumber \\ & = \lambda_{M'\backslash y}(X) - r_{M'}(Y-y) + r_{M'}(Y) \text{~as $\sqcap(X,Y-y') = 0$ for all $y'$ in $Y$;} \nonumber\\ & = r_{M'}(Y) - r_{M'}(Y-y). \end{align} But \begin{align*} 1 & = \lambda_{M'}(X)\\ & = r(X) + r(Y) - r(M')\\ & \ge r(X \cup y) - r(\{y\}) + r(Y) - r(\{y\}) - r(M') + r(\{y\})\\ & = \lambda_{M'/y}(X). \end{align*} We conclude, using (\ref{Yyy}) that, since $r(Y) \neq r(Y-y)$, we have $\lambda_{M'/y}(X) = 1$ for all $y$ in $Y$. Then, by Lemma~\ref{claim1}(ii), $M'/y$ has a special $N$-minor. Hence $M\backslash \ell /y$ has a special $N$-minor, that is, \ref{mcony} holds. \begin{sublemma} \label{neworg} If $y \in Y$ and $\ell$ is in a parallel pair of points in $M/y$, then $r(X \cup \ell \cup y) = r(X \cup y)$. \end{sublemma} To see this, observe that, as $M$ is $3$-connected, $\ell \not\in {\rm cl}_M(Y)$. Thus $\ell$ is parallel to a point of $X$ in $M/y$, and \ref{neworg} follows. \begin{sublemma} \label{subsume} Let $Y = \{y_1,y_2,\dots,y_n\}$. If $r(X \cup \ell \cup y_i) = r(X \cup y_i)$ for all $i$ in $\{1,2,\dots,n\}$, then $\{y_{n-1},y_n\}$ is a prickly $3$-separator of $M$, and $M\downarrow y_n$ is $3$-connected having a special $N$-minor. \end{sublemma} First observe that each $y_i$ in $Y$ is a line for if $y_i$ is a point, then $$r(X \cup \ell \cup y_i) = r(X \cup y_i) = r(X) + r(\{y_i\}) = r(X) + 1.$$ As $r(Y-y_i) \le r(Y) - 1$, we deduce that $(X \cup \ell \cup y_i,Y-y_i)$ is a 2-separation of $M$; a contradiction. Continuing with the proof of \ref{subsume}, next we show the following. \begin{sublemma} \label{subsume2} For $1 \le k \le n-1$, \begin{align*} r(X \cup \ell \cup \{y_1,y_2,\dots,y_k\}) & = r(X) + 1 + k \text{~~and}\\ r(Y - \{y_1,y_2,\dots,y_k\}) & = r(Y) - k. \end{align*} \end{sublemma} We argue by induction on $k$. By assumption, $r(X \cup \ell \cup y_1) = r(X) + r(\{y_1\}) = r(X) + 2$. Moreover, $r(Y-y_1) \le r(Y) - 1$. Equality must hold otherwise we get the contradiction that $(X \cup \ell \cup y_1, Y- y_1)$ is a 2-separation of $M$. We deduce that the result holds for $k = 1$. Assume it holds for $k < m$ and let $k = m\ge 2$. Then \begin{multline*} r(X \cup \{y_1,y_2,\dots,y_{m-1}\} \cup \ell) + r(X \cup \{y_2,y_3,\dots,y_{m}\} \cup \ell)\\ \shoveleft{\hspace*{1in}\ge r(X \cup \{y_2,y_3,\dots,y_{m-1}\} \cup \ell) + r(X \cup \{y_1,y_2,\dots,y_{m}\} \cup \ell).} \end{multline*} If $m = 2$, then $r(X \cup \{y_2,y_3,\dots,y_{m-1}\} \cup \ell) = r(X \cup \ell) \ge r(X) + 1$. If $m>2$, then $r(X \cup \{y_2,y_3,\dots,y_{m-1}\} \cup \ell) = r(X) + m - 1$ by the induction assumption. Thus \begin{align} \label{eq1} r(X \cup \{y_1,y_2,\dots,y_m\}\cup \ell) & \le r(X) + m + r(X) + m - (r(X) + m-1) \nonumber \\ & = r(X) + m + 1. \end{align} But \begin{equation} \label{eq2} r(Y - \{y_1,y_2,\dots,y_m\}) \le r(Y) - m. \end{equation} It follows that equality must hold in (\ref{eq1}) and (\ref{eq2}). Thus, by induction, \ref{subsume2} holds. By \ref{subsume2}, $r(Y - \{y_1,y_2,\dots,y_{n-1}\}) = r(Y) - (n-1).$ But $r(Y - \{y_1,y_2,\dots,y_{n-1}\}) = r(\{y_n\}) = 2$. Thus $r(Y) = n+1$, and it follows by \ref{subsume2} that $r(\{y_{n-1},y_n\}) = 3$ and $\{y_{n-1},y_n\}$ is a prickly 3-separating set in $M$. Hence, by Lemma~\ref{portia}, $M\downarrow y_n$ is $3$-connected. Recall that \begin{equation*} r_{M\downarrow y_n}(Z) = \begin{cases} r(Z), & \text{if $r(Z \cup y_n) > r(Z)$; and}\\ r(Z) - 1, & \text{otherwise.} \end{cases} \end{equation*} Thus \begin{align*} \sqcap_{M\downarrow y_n}(X,Y-y_n) & = r_{M\downarrow y_n}(X) + r_{M\downarrow y_n}(Y -y_n) - r_{M\downarrow y_n}(X \cup (Y-y_n))\\ & = r(X) + r(Y-y_n) - r(M) + 1\\ & = r(X) + r(Y) - r(M) \text{~~by \ref{subsume2};}\\ & = 1. \end{align*} It follows by Lemma~\ref{dennisplus}(vi) that $(M\backslash \ell)\downarrow y_n = M_X \oplus_2 M_Y \downarrow y_n$. Then, by Lemma~\ref{claim1}(iii), $(M\backslash \ell)\downarrow y_n$ has a special $N$-minor. We deduce that $M\downarrow y_n$ is $3$-connected having a special $N$-minor. Thus \ref{subsume} holds. Since we have assumed that the theorem fails, it follows by \ref{subsume} that, for some element $y_0$ of $Y$, $$r(X \cup \ell \cup y_0) > r(X \cup y_0).$$ By \ref{mcony}, $M/y_0$ has a special $N$-minor. Thus $M/y_0$ is not $3$-connected. Moreover, by \ref{neworg}, the element $\ell$ is not in a pair of parallel points of $M/y_0$. Let $(A \cup \ell, B)$ be a $2$-separation\ of $M/y_0$ with $\ell \not\in A$. Next we show that \begin{sublemma} \label{2sepab} $(A,B)$ is an exact $2$-separation of $M/y_0\backslash \ell$, and $\ell \in {\rm cl}_{M/y_0}(A).$ \end{sublemma} If $(A,B)$ is not exactly 2-separating in $M/y_0\backslash \ell$, then, by Proposition~\ref{connconn}, $M_Y/y_0$ is not $2$-connected, so we obtain the contradiction\ that $Y$ contains a doubly labelled element. Thus $r_{M/y_0}(A \cup \ell) = r_{M/y_0}(A)$ and \ref{2sepab} holds. We shall show that \begin{sublemma} \label{crosspath} either (iii)(b) holds, or $(A,B)$ does not cross $(X,Y-y_0)$. \end{sublemma} Assume each of $A$ and $B$ meets each of $X$ and $Y - y_0$. Then, by uncrossing, $\lambda_{M\backslash \ell/y_0}(X \cap B) = 1$. But $\sqcap(X,\{y_0\}) = 0$, so $r_M(X \cap B) = r_{M/y_0}(X \cap B).$ Also $r_M((Y-y_0) \cup A \cup \ell \cup y_0) = r_{M/y_0}((Y-y_0) \cup A \cup \ell) + r(\{y_0\}).$ Then \begin{multline*} r(X\cap B)) + r((Y-y_0) \cup A \cup \ell \cup y_0) - r(M)\\ \shoveleft{= r_{M/y_0}(X \cap B) + r_{M/y_0}((Y-y_0) \cup A \cup \ell) + r(\{y_0\}) - r(M/y_0) - r(\{y_0\})}\\ \shoveleft{= \lambda_{M/y_0}(X \cap B)}\\ \shoveleft{= \lambda_{M/y_0\backslash \ell}(X \cap B) \text{~~ as $\ell \in {\rm cl}_{M/y_0}(A)$;}}\\ \shoveleft{= 1.}\\ \end{multline*} Since $M$ is $3$-connected, it follows that $X \cap B$ consists of a point $z$ of $M$.
3,864
142,722
en
train
0.91.31
By \ref{subsume2}, $r(Y - \{y_1,y_2,\dots,y_{n-1}\}) = r(Y) - (n-1).$ But $r(Y - \{y_1,y_2,\dots,y_{n-1}\}) = r(\{y_n\}) = 2$. Thus $r(Y) = n+1$, and it follows by \ref{subsume2} that $r(\{y_{n-1},y_n\}) = 3$ and $\{y_{n-1},y_n\}$ is a prickly 3-separating set in $M$. Hence, by Lemma~\ref{portia}, $M\downarrow y_n$ is $3$-connected. Recall that \begin{equation*} r_{M\downarrow y_n}(Z) = \begin{cases} r(Z), & \text{if $r(Z \cup y_n) > r(Z)$; and}\\ r(Z) - 1, & \text{otherwise.} \end{cases} \end{equation*} Thus \begin{align*} \sqcap_{M\downarrow y_n}(X,Y-y_n) & = r_{M\downarrow y_n}(X) + r_{M\downarrow y_n}(Y -y_n) - r_{M\downarrow y_n}(X \cup (Y-y_n))\\ & = r(X) + r(Y-y_n) - r(M) + 1\\ & = r(X) + r(Y) - r(M) \text{~~by \ref{subsume2};}\\ & = 1. \end{align*} It follows by Lemma~\ref{dennisplus}(vi) that $(M\backslash \ell)\downarrow y_n = M_X \oplus_2 M_Y \downarrow y_n$. Then, by Lemma~\ref{claim1}(iii), $(M\backslash \ell)\downarrow y_n$ has a special $N$-minor. We deduce that $M\downarrow y_n$ is $3$-connected having a special $N$-minor. Thus \ref{subsume} holds. Since we have assumed that the theorem fails, it follows by \ref{subsume} that, for some element $y_0$ of $Y$, $$r(X \cup \ell \cup y_0) > r(X \cup y_0).$$ By \ref{mcony}, $M/y_0$ has a special $N$-minor. Thus $M/y_0$ is not $3$-connected. Moreover, by \ref{neworg}, the element $\ell$ is not in a pair of parallel points of $M/y_0$. Let $(A \cup \ell, B)$ be a $2$-separation\ of $M/y_0$ with $\ell \not\in A$. Next we show that \begin{sublemma} \label{2sepab} $(A,B)$ is an exact $2$-separation of $M/y_0\backslash \ell$, and $\ell \in {\rm cl}_{M/y_0}(A).$ \end{sublemma} If $(A,B)$ is not exactly 2-separating in $M/y_0\backslash \ell$, then, by Proposition~\ref{connconn}, $M_Y/y_0$ is not $2$-connected, so we obtain the contradiction\ that $Y$ contains a doubly labelled element. Thus $r_{M/y_0}(A \cup \ell) = r_{M/y_0}(A)$ and \ref{2sepab} holds. We shall show that \begin{sublemma} \label{crosspath} either (iii)(b) holds, or $(A,B)$ does not cross $(X,Y-y_0)$. \end{sublemma} Assume each of $A$ and $B$ meets each of $X$ and $Y - y_0$. Then, by uncrossing, $\lambda_{M\backslash \ell/y_0}(X \cap B) = 1$. But $\sqcap(X,\{y_0\}) = 0$, so $r_M(X \cap B) = r_{M/y_0}(X \cap B).$ Also $r_M((Y-y_0) \cup A \cup \ell \cup y_0) = r_{M/y_0}((Y-y_0) \cup A \cup \ell) + r(\{y_0\}).$ Then \begin{multline*} r(X\cap B)) + r((Y-y_0) \cup A \cup \ell \cup y_0) - r(M)\\ \shoveleft{= r_{M/y_0}(X \cap B) + r_{M/y_0}((Y-y_0) \cup A \cup \ell) + r(\{y_0\}) - r(M/y_0) - r(\{y_0\})}\\ \shoveleft{= \lambda_{M/y_0}(X \cap B)}\\ \shoveleft{= \lambda_{M/y_0\backslash \ell}(X \cap B) \text{~~ as $\ell \in {\rm cl}_{M/y_0}(A)$;}}\\ \shoveleft{= 1.}\\ \end{multline*} Since $M$ is $3$-connected, it follows that $X \cap B$ consists of a point $z$ of $M$. Now $\lambda_{M\backslash \ell /y_0}((Y- y_0) \cup z) = 1$, so \begin{align*} 1 & = r_{M/y_0}((Y-y_0) \cup z) + r_{M/y_0}(A \cap X) - r(M/y_0)\\ & = r(Y \cup z) - r(\{y_0\}) + r((A \cap X)\cup y_0) - r(\{y_0\}) - r(M) + r(\{y_0\})\\ & = r(Y \cup z)) + r(A \cap X) - r(M\backslash \ell) \text{~~since $\sqcap(X,\{y_0\}) = 0$.}\\ \end{align*} Thus $Y \cup z$ is $2$-separating in $M\backslash \ell$. If $r(Y \cup z) = r(Y)$, then $z$ is parallel to the basepoint $p$ of the 2-sum. Hence each element of $Y$ is doubly labelled; a contradiction. Thus we may assume that $r(Y \cup z) = r(Y) + 1$. Then $r(X- z) = r(X) - 1$. Now $M_X$ is $2$-connected, so $r(M_X) = r(X)$ and $M_X$ has $\{p,z\}$ as a series pair of points. It follows that $M_X$ is the 2-sum with basepoint $q$ of a $2$-polymatroid $M'_X$ and a copy of $U_{2,3}$ with ground set $\{q,z,p\}$. Thus (iii)(b) of the lemma holds. Hence so does \ref{crosspath}. We shall now assume that (iii)(b) does not hold. \begin{sublemma} \label{notsubset} $A\not \subseteq Y - y_0$ and $B\not \subseteq X$ and $A\not \subseteq X$. \end{sublemma} To see this, first suppose that $A \subseteq Y - y_0$. Then, as $\ell \in {\rm cl}_{M/y_0}(A)$, we deduce that $\ell \in {\rm cl}_M(Y)$; a contradiction. Thus $A\not \subseteq Y - y_0$. Now suppose that $B \subseteq X$. We have \begin{align*} 1 & = \lambda_{M/y_0}(B)\\ & = r_{M/y_0}(B) + r_{M/y_0}(A \cup \ell) - r(M/y_0)\\ & = r(B \cup y_0) - r(\{y_0\}) + r(A \cup \ell \cup y_0) - r(\{y_0\}) - r(M) + r(\{y_0\})\\ & = r(B) + r(A \cup \ell \cup y_0) - r(M) \text{~~as $B \subseteq X$.} \end{align*} Thus $(A \cup \ell \cup y_0,B)$ is a $2$-separation\ of $M$; a contradiction. Thus $B\not\subseteq X$. Next suppose that $A \subseteq X$. As $(A \cup \ell,B)$ is a $2$-separation\ of $M/y_0$, we have \begin{align*} 1 & = r_{M/y_0}(A \cup \ell) + r_{M/y_0}(B) - r(M/y_0)\\ & = r(A \cup \ell \cup y_0) - r(\{y_0\}) + r(B \cup y_0) - r(\{y_0\}) - r(M) + r(\{y_0\})\\ & \ge r(A \cup y_0) - r(\{y_0\}) + r(B \cup y_0) - r(M)\\ & \ge r(A) + r(B \cup y_0) - r(M\backslash \ell) \text{~~as $A \subseteq X$;}\\ & \ge 1 \text{~~as $M\backslash \ell$ is $2$-connected.} \end{align*} We deduce that equality holds throughout, so $r(A \cup \ell \cup y_0) = r(A \cup y_0)$. But $A \subseteq X$, so $r(X \cup \ell \cup y_0) = r(X \cup y_0)$, which contradicts the choice of $y_0$. Hence $A\not\subseteq X$, so \ref{notsubset} holds. By \ref{crosspath}, we deduce that $B \subseteq Y - y_0$. Since, by \ref{mcony}, $M/y_0$ has a special $N$-minor, we see that (iii)(a) of the lemma holds, so the lemma is proved. \end{proof}
2,602
142,722
en
train
0.91.32
We now combine the above lemmas to prove one of the two main results of this section. \begin{lemma} \label{bubbly} Suppose $M\backslash \ell$ has $N$ as a c-minor. Let $(X,Y)$ be a $2$-separation of $M\backslash \ell$ having $X$ as the $N$-side and $|Y| = \mu(\ell)$. Then $Y$ contains a doubly labelled element. \end{lemma} \begin{proof} By Lemma~\ref{muend}, $|Y| \ge 3$. Assume that $Y$ does not contain a doubly labelled element. Then, by Lemma~\ref{p63rev}, \begin{itemize} \item[(i)(a)] $\sqcap(\{y\},X) \neq 1$ for some $y$ in $Y$; or \item[(i)(b)] $Y$ contains an element $y$ such that $M\backslashba y$ has $N$ as a c-minor and every non-trivial $2$-separation of $M\backslash y$ has the form $(Z_1,Z_2)$ where $Z_1$ is the $N$-side and $Z_2 \subseteq Y - y$. \end{itemize} Now, since $|Y| = \mu(\ell)$, outcome (iii)(b) of Lemma~\ref{prep65rev} does not arise. Thus, by that lemma and Lemma~\ref{predichotomy}, \begin{itemize} \item[(ii)(a)] $\sqcap(Y-y,X) > 0$ for some $y$ in $Y$; or \item[(ii)(b)] $Y$ contains an element $y$ such that $M/ y$ has $N$ as a c-minor and every non-trivial $2$-separation of $M/y$ has the form $(Z_1,Z_2)$ where $Z_1$ is the $N$-side and $Z_2 \subseteq Y - y$. \end{itemize} By Lemma~\ref{dichotomy}, (i)(a) and (ii)(a) cannot both hold. Thus (i)(b) or (ii)(b) holds. Therefore, for some $y$ in $Y$, either $M\backslashba y$ has $N$ as a c-minor and has a $2$-separation\ $(Z_1,Z_2)$ where $Z_1$ is the $N$-side, $Z_2 \subseteq Y - y$, and $|Z_2| = \mu(y) < \mu(\ell)$, or $M/ y$ has $N$ as a c-minor and has a $2$-separation\ $(Z_1,Z_2)$ where $Z_1$ is the $N$-side, $Z_2 \subseteq Y - y$, and $|Z_2| = \mu^*(y) < \mu(\ell)$. We can now repeat the argument above using $(y,Z_2)$ in place of $(\ell,Y)$ and, in the latter case, $M^*$ in place of $M$. Since we have eliminated the possibility that $\mu(\ell) = 2$ or $\mu^*(\ell) = 2$, after finitely many repetitions of this argument, we obtain a contradiction\ that completes the proof. \end{proof} \begin{corollary} \label{doubly} The $2$-polymatroid $M$ contains a doubly labelled element. \end{corollary} \begin{proof} Take $\ell$ in $E(M) - E(N)$. Then $M\backslash \ell$ or $M/ \ell$ has $N$ as a c-minor, so applying the last lemma to $M$ or its dual gives the result. \end{proof}
918
142,722
en
train
0.91.33
\section{Non-$N$-$3$-separators exist} \label{keylargo} The purpose of this section is prove the existence of a non-$N$-3-separating set in $M$ where we recall that such a set $Y$ is exactly $3$-separating, meets $E(N)$ in at most one element, and, when it has exactly two elements, both of these elements are lines. The following lemma will be key in what follows. \begin{lemma} \label{key} Let $(X,Y)$ be a $2$-separation of $M\backslash \ell$ where $X$ is the $N$-side, $|Y| \ge 2$, and $Y$ is not a series pair of points in $M\backslash \ell$. Then $Y$ contains no points. \end{lemma} \begin{proof} Assume that $Y$ contains a point $y$. Then, by Lemma~\ref{Step0}, $y$ is not doubly labelled. \begin{sublemma} \label{key1} $M\backslash y$ or $M/y$ has a special $N$-minor. \end{sublemma} To see this, consider the $2$-connected $2$-polymatroid $M_Y$. By Lemma~\ref{Tutte2}, $M_Y\backslash y$ or $M_Y/y$ is $2$-connected, so $\sqcap_{M\backslash y}(X,Y-y) = 1$ or $\sqcap_{M/ y}(X,Y-y) = 1$. As $M_X$ has a special $N$-minor, so does $M\backslash y$ or $M/y$. \begin{sublemma} \label{key2} $M\backslash y$ does not have a special $N$-minor. \end{sublemma} Assume $M\backslash y$ does have a special $N$-minor. Then, as $y$ is not doubly labelled, $M/y$ does not have a special $N$-minor. Then, by Lemma~\ref{claim1}(ii), $\sqcap_{M/y}(X,Y-y) = 0$, that is, $r_{M/y}(X) + r_{M/y}(Y-y) - r(M/y) = 0$, so $r(X \cup y) + r(Y) = r(M) + r(\{y\}) = r(M) + 1$. But $r(X) + r(Y) = r(M) +1$, so $r(X \cup y) = r(X)$ and $r(Y - y) = r(Y)$ otherwise $(X\cup y, Y-y)$ is a 1-separation of $M\backslash \ell$; a contradiction\ to Lemma~\ref{Step1}. Since $y \in Y$ and $r(X \cup y) = r(X)$, we see that $\sqcap(X,\{y\}) = 1$. But $\sqcap(X,Y) = 1$. Thus, in $M_Y$, the point $y$ is parallel to the basepoint $p$ of the 2-sum. Hence $M\backslash \ell \backslash y$ is $2$-connected and $r(M\backslash \ell \backslash y) = r(M)$. Let $(A \cup \ell, B)$ be a non-trivial $2$-separation\ of $M\backslash y$ where $\ell \not \in A$. Now \begin{align*} 1 & \le r(A) + r(B) - r(M\backslash \ell,y)\\ & \le r(A \cup \ell) + r(B) - r(M\backslash y)\\ & = 1. \end{align*} Thus $r(A) = r(A \cup \ell)$. Hence $\ell \in {\rm cl}(A)$ so $r(A) \ge 2$. Continuing with the proof of \ref{key2}, we now show the following. \begin{sublemma} \label{key3} $(A,B)$ crosses $(X,Y-y)$. \end{sublemma} Because $y \in {\rm cl}(X) \cap {\rm cl}(Y-y)$ but $y \notin {\rm cl}(A) \cup {\rm cl}(B)$, we deduce that neither $A$ nor $B$ contains $X$ or $Y-y$, so \ref{key3} holds. By uncrossing, $\lambda_{M\backslash \ell,y}(B \cap (Y-y)) = 1$. But $\ell \in {\rm cl}(A)$ and $y \in {\rm cl}(X)$ so $\lambda_M(B \cap (Y - y)) = 1$. Hence $B \cap (Y - y)$ consists of a single point, say $z$. As $z$ is not parallel to $y$, we deduce that $\sqcap(X,\{z\}) = 0$. Thus, by Lemma~\ref{obs1}, $\sqcap_{M/z}(X,Y-z) = \lambda_{M\backslash \ell/z}(X) = 1$. Hence, by Lemma~\ref{claim1}(ii), $M\backslash \ell/z$, and hence $M/z$, has a special $N$-minor. On the other hand, $$1 = \sqcap(X,\{y\}) \le \sqcap(X,Y-z) \le \sqcap(X,Y) = 1.$$ Thus $\sqcap_{M\backslash z}(X,Y-z) = 1$ so $M\backslash z$ has a special $N$-minor. Since $z$ is a point, we have a contradiction\ to Lemma~\ref{Step0} that proves \ref{key2}. By combining \ref{key1} and \ref{key2}, we deduce that $M/y$ has a special $N$-minor but $M\backslash y$ does not. Since $(M,N)$ is a counterexample, $M/y$ is not $3$-connected. By Lemma~\ref{Step1}, $M/y$ is $2$-connected. As $M\backslash y$ does not have a special $N$-minor, by Lemma~\ref{claim1}(i), $\sqcap(X,Y-y) = 0$. But $\sqcap(X,Y) = 1$. As $y$ is a point, it follows that $$r(Y-y) = r(Y) - 1$$ and $r(X \cup (Y-y)) = r(X \cup Y)$. Moreover, as $(X \cup y,Y-y)$ is not a 1-separation of $M\backslash \ell$, we deduce that \begin{sublemma} \label{xy1} $r(X \cup y) = r(X) + 1.$ \end{sublemma} Now $r(M_Y \backslash p,y) = r(Y - y) = r(Y) - 1$. But $r(M_Y \backslash p) = r(Y)$. If $r(M_Y\backslash y) = r(Y) - 1$, then $\{y\}$ is a 1-separating set in $M_Y$. We deduce that $\{p,y\}$ is a series pair of points in $M_Y$. Thus $M_Y\backslash y$ is not 2-connected but $M_Y$ is, so, by Lemma~\ref{Tutte2}, $M_Y/y$ is 2-connected. Hence, by Proposition~\ref{connconn}, $M\backslash \ell/y$ is 2-connected. \begin{sublemma} \label{notel} $(\{\ell\}, X \cup (Y-y))$ is not a $2$-separation of $M/y$. \end{sublemma} Assume the contrary. Then $r(\{\ell,y\}) + r(X \cup Y) = r(M) + 2.$ But $r_{M/y}(\{\ell\}) = 2$ otherwise we do not have a $2$-separation. Thus $r(\{\ell,y\}) = 3$, so $(\{\ell\}, X \cup Y)$ is a $2$-separation\ of $M$; a contradiction. Therefore \ref{notel} holds. Let $(A\cup \ell,B)$ be a $2$-separation\ of $M/y$ with $\ell$ not in $A$. By \ref{notel}, $A \neq \emptyset.$ Since $M/y \backslash \ell$ is 2-connected, $\lambda_{M/y\backslash \ell}(A) > 0$. Hence $\lambda_{M/y\backslash \ell}(A) = 1$, so $\ell \in {\rm cl}_{M/y}(A).$ Hence one easily checks that \begin{sublemma} \label{crank1} \begin{itemize} \item[(i)] $r(A \cup y \cup \ell) = r(A \cup y)$; and \item[(ii)] $r(A \cup y) + r(B \cup y) = r(M\backslash \ell) + 2$. \end{itemize} \end{sublemma} Next we show that \begin{sublemma} \label{crossagain} $(A,B)$ crosses $(X,Y-y)$. \end{sublemma} Assume $B \cap (Y - y) = \emptyset$ or $B \cap X = \emptyset$. As $r(X \cup y) = r(X) + 1$ and $r(Y) = r(Y-y) + 1$, we have $r(B \cup y) = r(B) + 1$. Then, as $r(A\cup y \cup \ell) = r(A \cup y)$, we have, by \ref{crank1}, $$r(A \cup y \cup \ell) + r(B) = r(M) + 1,$$ that is, $(A \cup y \cup \ell, B)$ is a $2$-separation\ of $M$; a contradiction. We deduce that $B \cap (Y-y) \neq \emptyset \neq B \cap X$. Now assume that $A \cap (Y-y) = \emptyset.$ Then $A \subseteq X$ and $Y-y \subseteq B$, so $r(X \cup y \cup \ell) = r(X \cup y)$. As $r(X \cup y) = r(X) + 1$ and $r(Y-y) = r(Y) -1$, it follows that $(X \cup y \cup \ell, Y-y)$ is 2-separating in $M$. Hence $Y-y$ consists of a single point $z$. Now $r(X) + r(Y) = r(M\backslash \ell) + 1$, so $r(X) = r(M\backslash \ell) - 1$. As $M\backslash \ell$ is connected, neither $y$ nor $z$ is in ${\rm cl}(X)$ so $\{y,z\}$ is a series pair of points in $M\backslash \ell$; a contradiction. Hence $A \cap (Y - y) \neq \emptyset$. Finally, assume that $X \cap A = \emptyset$. Then $A \subseteq Y -y$, so, as $r(A \cup y \cup \ell) = r(A \cup y)$, it follows that $r(Y \cup \ell) = r(Y)$, so $(X,Y \cup \ell)$ is a $2$-separation\ of $M$; a contradiction. We conclude that \ref{crossagain} holds. Next we determine the structure of the set $B$. \begin{sublemma} \label{whatsb} In $M$, the set $B$ consists of two points, $x'$ and $y'$, that lie in $B\cap X$ and $B\cap (Y-y)$, respectively. \end{sublemma} By uncrossing, $\lambda_{M\backslash \ell/y}(X \cap B) = 1$, so $$r((X \cap B) \cup y) + r(A \cup Y) - r(M\backslash \ell) = 2.$$ As $X \cap B \subseteq X$, we deduce that $r((X \cap B) \cup y) = r(X \cap B) + 1$. Also $y \in Y$, so $r(A \cup Y) = r(A \cup Y \cup \ell)$. Thus $(X\cap B, A \cup Y \cup \ell)$ is $2$-separating in $M$. Hence $X \cap B$ consists of a point, say $x'$. By uncrossing again, we see that $\lambda_{M\backslash \ell/y}((Y-y) \cap B) = 1$, so $$r(((Y-y) \cap B) \cup y) + r(A \cup X \cup y) - r(M\backslash \ell) = 2.$$ Thus $$r((Y-y) \cap B) + r(A \cup X \cup y\cup \ell) = r(M) + 1$$ since $r(((Y-y) \cap B) \cup y) = r((Y-y) \cap B) + 1$ and $r(A \cup X \cup y) = r(A \cup X \cup y \cup \ell).$ Hence $((Y-y) \cap B, A \cup X \cup y \cup \ell)$ is 2-separating in $M$, so $(Y-y) \cap B$ consists of a single matroid point, $y'$. We deduce that \ref{whatsb} holds. \begin{sublemma} \label{doubleup} The element $y'$ is doubly labelled. \end{sublemma} To see this, first observe that, in $M/y$, the set $B$ is a 2-separating set consisting of two matroid points, $x'$ and $y'$. Suppose $r_{M/y}(B) = 2$. Then $r_{M/y}(A \cup \ell) = r(M/y) - 1$, so $r(A \cup \ell \cup y) = r(M) -1$. Hence $(A \cup \ell \cup y, \{x',y'\})$ is a $2$-separation\ of $M$; a contradiction. We deduce that $r_{M/y}(B) = 1$ so $\{x',y'\}$ is a pair of parallel points in $M/y$. Then $M/y\backslash y'$, and so $M\backslash y'$, has a special $N$-minor. Now $r_{M/y}(\{x',y'\}) = 1$, so $r(\{x',y',y\}) = 2$. Thus $y \in {\rm cl}_{M/y'}(X)$, so $r(X \cup y' \cup y) = r(X \cup y')$. But, by \ref{xy1}, $r(X \cup y) > r(X)$, so $r(X \cup y') > r(X)$. Hence $\sqcap(X,\{y'\}) = 0$. Thus, by Lemma~\ref{obs1}, $\sqcap_{M/y'}(X,Y- y') = \sqcap(X,Y) = 1$. We conclude by Lemma~\ref{claim1} that $M/y'$ has a special $N$-minor. Therefore \ref{doubleup} holds. As \ref{doubleup} contradicts Lemma~\ref{Step0}, we deduce that Lemma~\ref{key} holds. \end{proof}
3,849
142,722
en
train
0.91.34
\begin{lemma} \label{nonN} There is a c-minor $N_0$ of $M$ that is isomorphic to $N$ such that $M$ has a non-$N_0$-$3$-separating set. \end{lemma} \begin{proof} By Corollary~\ref{doubly}, $M$ has a doubly labelled element $\ell$. By Lemma~\ref{Step0}, $\ell$ is a line. Moreover, by Lemma~\ref{Step1}, each of $M\backslash \ell$ and $M/ \ell$ is 2-connected. Assume the lemma fails. Let $N_D$ and $N_C$ be special $N$-minors of $M\backslash \ell$ and $M/ \ell$, respectively. We now apply what we have learned earlier using $N_D$ in place of $N$. Let $(X,Y)$ be a $2$-separation\ of $M\backslash \ell$ in which $X$ is the $N_D$-side and $|Y| = \mu(\ell)$. Then $|Y| \ge 3$. Now $\sqcap(X,\{\ell\}) \in \{0,1\}$. We show next that \begin{sublemma} \label{pixel} $\sqcap(X,\{\ell\}) = 0$ and $\sqcap(Y,\{\ell\}) = 0$. \end{sublemma} Assume that $\sqcap(X,\{\ell\}) = 1$. Then $r(X \cup \ell) = r(X) + 1$, so $\lambda_M(Y) = 2$. Thus $Y$ is a non-$N_D$-3-separating set; a contradiction. Thus $\sqcap(X,\{\ell\}) = 0$. Similarly, if $\sqcap(Y,\{\ell\}) = 1$, then $\lambda_M(X) = 2$, so $Y \cup \ell$ is a non-$N_D$-3-separating set. This contradiction\ completes the proof of \ref{pixel}. We deduce that $M\backslash \ell$ has a $2$-separation\ $(D_1,D_2)$ where $D_1$ is the $N_D$-side, $|D_2| = \mu(\ell) \ge 3$, and $\sqcap(D_1,\ell) = 0 = \sqcap(D_2,\ell)$. A similar argument to that used to show \ref{pixel} shows that $M/ \ell$ has a $2$-separation\ $(C_1,C_2)$ where $C_1$ is the $N_C$-side, $|C_2| = \mu^*(\ell) \ge 3$, and $\sqcap(C_1,\ell) = 2 = \sqcap(C_2,\ell)$. We observe here that the definition of $\mu^*(\ell)$ depends on $N_C$ here rather than on $N_D$. By the local connectivity conditions between $\ell$ and each of $D_1,D_2, C_1$, and $C_2$, \begin{sublemma} \label{crosscd} $(C_1,C_2)$ and $(D_1,D_2)$ cross. \end{sublemma} We have $r(D_1) + r(D_2) = r(E - \ell ) + 1$ and $r(C_1) + r(C_2) = r(E - \ell) + 3$. By uncrossing, $$\lambda_{M\backslash \ell}(D_2 \cap C_2) + \lambda_{M\backslash \ell}(D_1 \cap C_1) \le 4.$$ Suppose $\lambda_{M\backslash \ell}(D_2 \cap C_2) \le 1$. Since $\ell \in {\rm cl}(D_1 \cup C_1)$, it follows that $\lambda_{M}(D_2 \cap C_2) \le 1$. Thus $D_2 \cap C_2$ consists of a single point, $z$. Then $$2 = \sqcap(C_2,\{\ell\}) \le \sqcap(D_1\cup z, \{\ell\}) \le \sqcap(D_1,\{\ell\}) + 1 = 1;$$ a contradiction. We deduce that $\lambda_{M\backslash \ell}(D_2 \cap C_2) = 2 = \lambda_{M\backslash \ell}(D_1 \cap C_1)$, so $\lambda_{M}(D_2 \cap C_2) = 2 = \lambda_{M}(D_1 \cap C_1)$. By symmetry, $\lambda_{M}(D_1 \cap C_2) = 2 = \lambda_{M}(D_2 \cap C_1)$. Clearly each of $D_2 \cap C_1$ and $D_2 \cap C_2$ contains at most one element of $N_D$. As $|D_2| \ge 3$, we deduce from Lemma~\ref{key} that $D_2$ contains no points. Hence, some $Z$ in $\{D_2 \cap C_1, D_2 \cap C_2\}$ contains at least two elements. Then $Z$ is a non-$N_D$-3-separator of $M$. \end{proof} For the rest of the proof of Theorem~\ref{modc}, we will use the c-minor $N_0$ of $M$ found in the last lemma. To avoid cluttering the notation, we will relabel $N_0$ as $N$. \begin{lemma} \label{p124} Let $Y_1$ be a minimal non-$N$-$3$-separating set in $M$ with $|Y_1| \ge 3$, and let $X_1 = E(M) - Y_1$. Let $\ell$ be an element of $Y_1$ such that $M\backslash \ell$ has $N$ as a c-minor. Let $(A,B)$ be a $2$-separation of $M\backslash \ell$ where $A$ is the $N$-side and $|B| = \mu(\ell)$. Then one of the following holds. \begin{itemize} \item[(i)] $\lambda_{M\backslash \ell}(Y_1 - \ell) = 1$; or \item[(ii)] $B \subseteq Y_1 - \ell$; or \item[(iii)] $(A,B)$ crosses $(X_1, Y_1 - \ell)$ and $\lambda_{M\backslash \ell}(A \cap (Y_1 - \ell)) = 1 = \lambda_{M\backslash \ell}(B \cap (Y_1 - \ell))$, while $\lambda_{M\backslash \ell}(A \cap X_1) = 2 = \lambda_{M\backslash \ell}(B \cap X_1) = \lambda_{M\backslash \ell}(Y_1 - \ell)$. \end{itemize} \end{lemma} \begin{proof} Assume neither (i) nor (ii) holds. Then $\ell \in {\rm cl}(Y_1 - \ell)$ and $B \not \subseteq Y_1 - \ell$. If $B \subseteq X_1$, then $\lambda_M(B) = 1$; a contradiction. If $B \supseteq Y_1 - \ell$, then $\lambda_M(A) = 1$; a contradiction. Finally, observe that $|X_1 \cap A| \ge 2$ since $|E(N)| \ge 4$ and $X_1$ and $A$ are the $N$-sides of their separations. We conclude that $(A,B)$ crosses $(X_1, Y_1 - \ell)$. By Lemma~\ref{muend}, $|B| \ge 3$. By Lemma~\ref{key}, $B$ contains no points. Now $\lambda_{M\backslash \ell}(B \cap X_1) \ge 2$ otherwise, as $\ell \in {\rm cl}(Y_1 - \ell)$, we get the contradiction\ that $\lambda_{M}(B \cap X_1)= 1$. By uncrossing, we deduce that $\lambda_{M\backslash \ell}(A \cap (Y_1 - \ell)) \le 1$. Since $|X_1 \cap A| \ge 2$, we get, similarly, that $\lambda_{M\backslash \ell}(A \cap X_1) \ge 2$, so $\lambda_{M\backslash \ell}(B \cap (Y_1- \ell)) \le 1$. As $M\backslash \ell$ is $2$-connected, we deduce that $\lambda_{M\backslash \ell}(A \cap (Y_1 - \ell)) = 1 = \lambda_{M\backslash \ell}(B \cap (Y_1 - \ell))$. Hence $\lambda_{M\backslash \ell}(A \cap X_1) = 2 = \lambda_{M\backslash \ell}(B \cap X_1)$. We conclude that (iii) holds. Hence so does the lemma. \end{proof}
2,226
142,722
en
train
0.91.35
\section{Finding big enough $3$-separators} \label{bigtime} In this section, we first establish (\ref{Step5}) and then we start the proof of (\ref{Step6}). Specifically, we begin by showing the following. \begin{lemma} \label{Step5+} $M$ has a minimal non-$N$-$3$-separator with at least three elements. \end{lemma} \begin{proof} Assume every minimal non-$N$-3-separating set has exactly two elements. Let $\{a,b\}$ be such a set, $Z$. Then both of its members are lines. We may assume that $b \not\in E(N)$. Suppose first that $r(Z) = 2$. Then $a$ and $b$ are parallel lines. Suppose that $N$ is a c-minor of $M/b$. Since $a$ is a loop of $M/b$, we deduce that $a \notin E(N)$ so $M\backslash a$ has $N$ as a c-minor. Since $M\backslash a$ is $3$-connected, this is a contradiction. We may now assume that $M\backslash b$ has $N$ as a c-minor. Since it is $3$-connected, we have a contradiction\ that implies that $r(Z) > 2$. Suppose next that $r(Z) = 4$. Then $r^*(Z) = ||Z|| + r(E - Z) - r(M)= 4 -2 = 2$. Hence $Z$ consists of a pair of parallel lines in $M^*$, so we obtain a contradiction\ as above. We may now assume that $r(Z) = 3$. Then $Z$ is a prickly $3$-separating set and, by Lemma~\ref{portia}, $M\downarrow b$ is $3$-connected. Hence $M\downarrow b$ has no c-minor isomorphic to $N$. Now $M\backslash b$ or $M/b$ has a c-minor isomorphic to $N$. We begin by assuming the former. Let $(S \cup a, T)$ be a non-trivial $2$-separation\ of $M\backslash b$ with $a \not\in S$. Suppose the non-$N$-side of $(S \cup a, T)$ has $\mu(b)$ elements. By Lemma~\ref{muend}, $\mu(b) \ge 3$. We have $r(S \cup a) + r(T) - r(M) = 1$. As $\sqcap(\{a\},\{b\}) = 1$ and $M$ is $3$-connected, $r(S \cup a \cup b) = r(S \cup a) + 1$, so \begin{equation} \label{eqmt} \lambda_M(T) = 2. \end{equation} Moreover, $$r(S \cup a) \ge r(S) + 1$$ otherwise $r(S \cup a) = r(S)$ so $r(E - b) = r(E - \{a,b\})$; a contradiction. Next we show the following. \begin{sublemma} \label{addon} Suppose $M\backslash b$ has a $2$-separation $(S_1,S_2)$ where $S_1$ is the $N$-side and $S_2$ contains a prickly $3$-separator $\{u,v\}$ where $u \notin E(N)$. Then $M\backslash b \downarrow u$ is not $2$-connected. \end{sublemma} Suppose $M\backslash b \downarrow u$ is $2$-connected. Now $M\backslash b = M_1 \oplus_2 M_2$ where $M_i$ has ground set $S_i \cup p$. Since $M\backslash b \downarrow u$ is $2$-connected, $\sqcap_{M\backslash b\downarrow u}(S_1, S_2 - u) = 1$. Then, by Lemma`\ref{claim1}(iii), $M\backslash b \downarrow u$ has a special $N$-minor. By Lemma~\ref{portia}, $M\downarrow u$ is $3$-connected. Since it has a c-minor isomorphic to $N$, we have a contradiction. Thus \ref{addon} holds. Now suppose that $T$ is the $N$-side of $(S \cup a, T)$. Then, by Lemma~\ref{key}, $S \cup a$ contains no points. Assume that $r(S \cup a) = r(S) + 1$. As $r(S \cup a) + r(T) - r(M\backslash b) = 1$, we see that $$[r(S) + 1] + r(T) - [r(M\backslash b,a) + 1] = 1.$$ Hence $\sqcap(S,T) = 1$, so, by Lemma~\ref{claim1}(i), $M\backslash b\backslash a$ has a special $N$-minor. As $\{a,b\}$ is a prickly 3-separating set, we see that $M\backslash b \backslash a = M\downarrow b \backslash a$ so $M\downarrow b$ has a c-minor isomorphic to $N$; a contradiction. Next we consider the case when $T$ is the $N$-side of $(S \cup a, T)$, and $r(S \cup a) = r(S) + 2$. Then $r(S) + r(T \cup a \cup b) = r(M) + 2$. Thus $S$ is a non-$N$-$3$-separator and so contains a minimal such set, $\{u,v\}$ where $u \notin E(N)$. From above, we know that $\{u,v\}$ is a prickly $3$-separator of $M$. By \ref{addon}, $M\backslash b \downarrow u$ is not $2$-connected. Now $M\backslash b \downarrow u = M\downarrow u\backslash b$. Let $(J,K)$ be a $1$-separation of $M\downarrow u \backslash b$ with $a \in J$. Then $r_{M\downarrow u}(J \cup b) \le r_{M\downarrow u}(J) + 1$. Thus \begin{align*} r_{M\downarrow u}(J \cup b) + r_{M\downarrow u}(K) - r(M\downarrow u) & \le [r_{M\downarrow u}(J) + r_{M\downarrow u}(K) - r(M\downarrow u\backslash b)] \\ & \hspace*{0.9in} + [1 + r(M\downarrow u\backslash b) - r(M\downarrow u)]\\ & = 1 + r(M\downarrow u\backslash b) - r(M\downarrow u). \end{align*} By Lemma~\ref{portia}, $M\downarrow u$ is $3$-connected, so $r(M\downarrow u\backslash b) = r(M\downarrow u)$, and $K$ consists of a single point, $k$, of $M\downarrow u$. Then \begin{align*} 1 & = r_{M\downarrow u}(J) + r_{M\downarrow u}(\{k\}) - r(M\downarrow u\backslash b)\\ & = r_{M\downarrow u}(E - \{b,u,k\}) + r_{M\downarrow u}(\{k\}) - r(M\downarrow u)\\ & = r(E - \{b,k\}) - 1 + r(\{k\}) - r(M) + 1\\ & = r(E - \{b,k\}) + r(\{k\}) - r(M\backslash b). \end{align*} Hence $\{k\}$ is $1$-separating in $M\backslash b$. Thus $k$ contradicts Lemma~\ref{Step0}. When $M\backslash b$ has a c-minor isomorphic to $N$, it remains to consider the case when $S \cup a$ is the $N$-side of $(S \cup a, T)$. As $\mu(b) \ge 3$, it follows that $|T| \ge 3$. By (\ref{eqmt}), $\lambda_M(T) = 2$. By assumption, $T$ contains a minimal non-$N$-$3$-separating set $T'$. The latter consists of a pair, $\{u,v\}$, of lines that form a prickly 3-separating set. We may assume that $u \not\in E(N)$. Now $M\backslash b$ is certainly $2$-connected. By Lemma~\ref{portia}, $M\downarrow u$ is $3$-connected. Since $\sqcap(\{a\},\{b\}) = 1$, it follows that $M\downarrow u \backslash b$ is $2$-connected; a contradiction. We conclude that $M\backslash b$ does not have a c-minor isomorphic to $N$. We now know that $M/b$ has a c-minor isomorphic to $N$. Moreover, $M^*$ has a c-minor isomorphic to $N^*$ and has $\{a,b\}$ as a prickly 3-separating set; and $(M/b)^* = (M^* \backslash b)^{\flat}$. We use $M^* \backslash b$ in place of $M\backslash b$ in the argument above to complete the proof of the lemma. \end{proof} The argument to establish that $M$ has a minimal non-$N$-$3$-separator with at least four elements is much longer than that just given since it involves analyzing a number of cases. We shall use three preliminary results. In each, we denote $E(M) - Y_1$ by $X_1$. \begin{lemma} \label{pre3lines} Let $Y_1$ be a minimal-non-$N$-$3$-separator with exactly three elements. Suppose $\ell \in Y_1$ and $M\backslash \ell$ has $N$ as a c-minor. Let $(A,B)$ be a $2$-separation\ of $M\backslash \ell$ where $A$ is the $N$-side and $|B| \ge 3$. Suppose $\ell \in {\rm cl}(Y_1 - \ell)$. Then $(A,B)$ crosses $(X_1, Y_1 - \ell)$ and $\lambda_{M\backslash \ell}(X_1 \cap A) \ge 2$. Moreover, $Y_1 \cap B$ consists of a single line. \end{lemma} \begin{proof} As $\ell \in {\rm cl}(Y_1 - \ell)$, we see that $\lambda_{M_1 \backslash \ell}(Y_1 - \ell) = 2$. To see that $(A,B)$ crosses $(X_1, Y_1 - \ell)$, note first that, as $|Y_1 - \ell| = 2$ and $|A|, |B| \ge 3$, neither $A$ nor $B$ is contained in $Y_1 - \ell$. Moreover, $Y_1 - \ell$ is not contained in $A$ or $B$ otherwise $(A \cup \ell,B)$ or $(A,B \cup \ell)$ is a $2$-separation\ of $M$; a contradiction. Hence $(A,B)$ crosses $(X_1, Y_1 - \ell)$. As $|E(N)| \ge 4$, we see that $|X_1 \cap A| \ge 2$. Then $$\lambda_{M\backslash \ell}(X_1 \cap A) \ge 2$$ otherwise, as $\ell \in {\rm cl}(Y_1 - \ell)$, we get the contradiction\ that $\lambda_{M}(X_1 \cap A) \le 1$. By uncrossing, $\lambda_{M\backslash \ell}(Y_1 \cap B) \le 1$. By Lemma~\ref{key}, $B$ contains no points, so $Y_1 \cap B$ contains no points. As $|Y_1 - \ell| = 2$, we see that $Y_1 \cap B$ consists of a single line. \end{proof} \begin{lemma} \label{3lines} Let $Y_1$ be a minimal-non-$N$-$3$-separator with exactly three elements. If $Y_1$ contains a line $\ell$ such that $M\backslash \ell$ has $N$ as a c-minor, then $Y_1$ consists of three lines. \end{lemma} \begin{proof} Assume that the lemma fails. Let $(A,B)$ be a $2$-separation\ of $M\backslash \ell$ where $A$ is the $N$-side and $|B| \ge 3$. First we show that \begin{sublemma} \label{3lines1} $\ell \in {\rm cl}(Y_1 - \ell)$. \end{sublemma} Assume that $\ell \not\in {\rm cl}(Y_1 - \ell)$. Then $(X_1, Y_1 - \ell)$ is a $2$-separation\ of $M\backslash \ell$ with $|Y_1 - \ell| = 2$. By Lemma~\ref{key}, we may assume that $Y_1 - \ell$ consists of a series pair $\{y_1,y_2\}$ of points. Now $r(M\backslash \ell) = r(M) = r(X_1) + 1$, so $r(\{\ell,y_1,y_2\}) = 3$. Moreover, for each $i$ in $\{1,2\}$, we see that $M\backslash \ell/y_i$, and hence $M/y_i$, has a special $N$-minor. As the theorem fails for $M$, we know that $M/y_i$ is not $3$-connected. Now $M/y_i$ is certainly $2$-connected. Let $(J,K)$ be a $2$-separation\ of it where we may assume that $\ell \in J$. Now $r_{M/y_i}(\{\ell,y_j\}) = 2$ where $\{i,j\} = \{1,2\}$. Suppose $r_{M/y_i}(\{\ell\}) = 2$. Assume $y_j \in K$. Then $(J \cup y_j, K- y_j)$ is a $2$-separation\ of $M/y_i$ unless $K-y_j$ consists of a single point. In the exceptional case, $y_j$ is in a parallel pair of points in $M/y_i$. Hence $M\backslash y_j$ has a special $N$-minor. As $M/y_j$ also has such a minor, we contradict Lemma~\ref{Step0}. We deduce that we may assume that $J$ contains $\{\ell,y_j\}$. Then $r(J \cup y_i) + r(K \cup y_i) = r(M) + 2$, so $r(J \cup y_i,K)$ is a $2$-separation\ of $M$; a contradiction. We may now assume that $r_{M/y_i}(\{\ell\}) = 1$. Then $y_i$ lies on the line $\ell$. Since this must be true for each $i$ in $\{1,2\}$, we see that $r(\{\ell,y_1,y_2\}) = 2$; a contradiction. We deduce that \ref{3lines1} holds. By Lemma~\ref{pre3lines}, we know that $(A,B)$ crosses $(X_1,Y_1)$, that $\lambda_{M\backslash \ell}(X_1 \cap A) \ge 2$, and that $Y_1 \cap B$ consists of a single line. As the lemma fails, $A \cap (Y_1 - \ell)$ consists of a single point, $a$. As $\lambda_{M\backslash \ell}(X_1 \cap A) \ge 2$ and $\lambda_{M\backslash \ell}(A) = 1$, we deduce that $r(A-a) = r(A)$ and $r(B \cup a) = r(B) + 1$. Hence $a \in {\rm cl}(X_1)$. Thus $Y_1 - a$ is a minimal non-$N$-$3$-separator; a contradiction. \end{proof}
4,047
142,722
en
train
0.91.36
The next lemma verifies (\ref{Step5.5}). \begin{lemma} \label{y13} Let $Y_1$ be a minimal-non-$N$-$3$-separator having exactly three elements. Then $Y_1$ consists of three lines. \end{lemma} \begin{proof} As $|Y_1 \cap E(N)| \le 1$, at least two of the elements of $Y_1$ are not in $E(N)$. Let $\ell$ be one of these elements. Suppose $\ell$ is a line. If $M\backslash \ell$ has $N$ as a c-minor, then the result follows by Lemma~\ref{3lines}. If $M/\ell$ has $N$ as a c-minor, then $(M^*\backslash \ell)^{\flat}$, and hence $M^*\backslash \ell$ has $N^*$ as a c-minor and again the result follows by Lemma~\ref{3lines}. We may now assume that $\ell$ is a point. By switching to the dual if necessary, we may assume that $M\backslash \ell$ has $N$ as a c-minor. Let $(A,B)$ be a $2$-separation\ of $M\backslash \ell$ where $A$ is the $N$-side and $|B| \ge 3$. Next we show that \begin{sublemma} \label{ellisnot} $\ell \notin {\rm cl}(Y_1 - \ell)$. \end{sublemma} Assume $\ell \in {\rm cl}(Y_1 - \ell)$. Then, by Lemma~\ref{pre3lines}, we know that $(A,B)$ crosses $(X_1,Y_1- \ell)$, that $\lambda_{M\backslash \ell}(X_1 \cap A) \ge 2$, and that $Y_1 \cap B$ consists of a single line, say $m$. Now $|B \cap X_1| \ge 2$ since $|B| \ge 3$. Then $$\lambda_{M\backslash \ell}(B \cap X_1) \ge 2$$ otherwise, since $\ell \in {\rm cl}(Y_1 - \ell)$, we deduce that $\lambda_{M}(B \cap X_1) \ge 1$; a contradiction. By uncrossing, $\lambda_{M\backslash \ell}(Y_1 \cap A) \le 1$. Since $|Y_1| = 3$ and $Y_1 \cap B$ consists of the line $m$, we deduce that $A \cap (Y_1 - \ell)$ consists of a single point, say $a$, otherwise one of the elements of $Y_1 - \ell$ is a line that is not in $E(N)$ and we have already dealt with that case. As $\lambda_{M\backslash \ell}(X_1 \cap A) \ge 2$ and $\lambda_{M\backslash \ell}(A) = 1$, we deduce that \begin{equation} \label{aab} r(A-a) = r(A) \text{~~and~~} r(B \cup a) = r(B) + 1. \end{equation} Hence \begin{equation} \label{ax1} a \in {\rm cl}(X_1). \end{equation} We may assume that $m \in E(N)$ otherwise $m$ is removed in forming $N$ and that case was dealt with in the first paragraph. Now $Y_1 = \{a,\ell,m\}$. As $m \in B$, it follows by (\ref{aab}) that $r(\{m,a\}) = 3$. Moreover, as $\{m,a\} = Y_1 - \ell$ and $\ell \in {\rm cl}(Y_1 - \ell)$, we deduce that $r(Y_1) = 3$. By (\ref{ax1}), $r(X_1 \cup a) = r(X_1)$. We deduce that \begin{equation} \label{lmn} r(\{a,\ell,m\}) = r(\{\ell,m\}) = 3 \text{~~and~~} r(X_1 \cup a) = r(M) - 1. \end{equation} Since $m \in E(N)$, it follows that $a \not\in E(N)$. Suppose that $M\backslash \ell /a$ has $N$ as a c-minor. Still as part of the proof of \ref{ellisnot}, we show next that \begin{sublemma} \label{am2s} $M/a$ is the $2$-sum with basepoint $q$ of two $2$-polymatroids, one of which consists of the line $m$ having non-parallel points $q$ and $\ell$ on it. \end{sublemma} By (\ref{lmn}), $(\{\ell,m\},X_1)$ is a 2-separation of $M/a$. Thus $M/a$ is the 2-sum with basepoint $q$ of two $2$-polymatroids, one of which, $Q$ say, consists of the line $m$ having points $q$ and $\ell$ on it. Suppose $q$ and $\ell$ are parallel points in $Q$. Then $(\{m\},X_1 \cup \ell)$ is a 2-separation of $M/a$. It follows that $(\{m\},X_1 \cup \ell \cup a)$ is a 2-separation of $M$; a contradiction. Thus \ref{am2s} holds. By \ref{am2s}, both $M/\ell$ and $M\backslash \ell$ have $N$ as a c-minor; a contradiction\ to Lemma~\ref{Step0}. We now know that $N$ is a c-minor of $M\backslash \ell\backslash a$. In that $2$-polymatroid, $\{m\}$ is $2$-separating so, in the formation of $N$, the element $m$ is compactified. As the next step towards showing \ref{ellisnot}, we now show that \begin{sublemma} \label{m1m'} $M{\underline{\downarrow}\,} m$ is $3$-connected. \end{sublemma} To see this, it will be helpful to consider the $2$-polymatroid $M_1$ that is obtained from $M$ by freely adding the point $m'$ on $m$. By definition, $M{\underline{\downarrow}\,} m = M_1/m_1$. Certainly $M_1$ is $3$-connected, so $M_1/m'$ is $2$-connected. Assume it has a $2$-separation\ $(U,V)$ where $m \in U$. Then $$r(U \cup m') + r(V \cup m') - r(M_1) = 2.$$ But $r(U \cup m') = r(U).$ Hence $r(V \cup m') = r(V)$ otherwise $M_1$ has a $2$-separation; a contradiction. But, as $m'$ was freely placed on $m$, we deduce that $r(V\cup m' \cup m) = r(V \cup m') = r(V)$. Now, in $M_1\backslash m$, we see that $\{\ell,m'\}$ is a series pair of points. As $m' \in {\rm cl}(V)$, it follows that $\ell \in V$. Then $r(U - m) < r(U)$ since $\{m\}$ is 2-separating in $M\backslash \ell$. Now $r(U - m) = r(U) - 1$ otherwise $r(U - m) = r(U) - 2$ and $(U-m,V\cup \{m',m\})$ is a $1$-separation of $M_1$. As $(U-m,V\cup \{m',m\})$ is not a $2$-separation of $M_1$, it follows that $U - m$ consists of a single point $u$ and $r(\{u,m\}) = 2$. Thus, in $M\backslash \ell$, when we compactify $m$, we find that $u$ and $m$ are parallel. Since $m\in E(N)$, we see that $u \not\in E(N)$. Moreover, $M\backslash u$ has $N$ as a c-minor. Since $u$ lies on $m$ in $M$, we deduce that $M\backslash u$ is $3$-connected\ having $N$ as a c-minor. This contradiction\ completes the proof of \ref{m1m'}. Now, in $M_1/m'$, the elements $a, \ell$, and $m$ form a triangle of points. We know that $M_1/m'\backslash \ell$ is not $3$-connected\ otherwise $(M\backslash \ell)^{\flat}$ is $3$-connected\ having $N$ as a c-minor. Because $M\backslash a$ has $N$ as a c-minor, $M\backslash a$ is not $3$-connected, so $M_1 \backslash a$ is not $3$-connected. Still continuing with the proof of \ref{ellisnot}, we show next that \begin{sublemma} \label{m1m'a} $M_1\backslash a/m'$ is not $3$-connected. \end{sublemma} Let $(G,H)$ be a $2$-separation of $M_1 \backslash a$ with $m$ in $G$. Then $(G \cup m',H-m')$ is a $2$-separation\ of $M_1\backslash a$ unless $H$ consists of two points. In the exceptional case, $r(H) = 2$ so $r(G) = r(M) - 1$. But then $a \cup (H - m')$ is a series pair in $M$; a contradiction. We conclude that we may assume that $m' \in G$. Then $\ell \in H$, otherwise, by (\ref{lmn}), $(G \cup a, H)$ is a $2$-separation\ of $M_1$; a contradiction. Observe that $G \neq \{m,m'\}$ otherwise $\{m\}$ is 2-separating in $M\backslash a$ and so, as $a \in {\rm cl}(X_1)$ , we obtain the contradiction\ that $\{m\}$ is 2-separating in $M$. Now \begin{equation} \label{m1m1m1} r_{M_1 \backslash a/m'}(G - m') + r_{M_1 \backslash a/m'}(H) - r(M_1 \backslash a/m')= r(G) + r(H \cup m') - 1 - r(M_1\backslash a). \end{equation} Suppose that $r(H \cup m') = r(H)$. Then $r(H \cup m'\cup m) = r(H)$ as $m'$ is freely placed on $m$. Thus, as $G \supsetneqq \{m,m'\}$ and $\{m\}$ is 2-separating in $M\backslash \ell \backslash a$, we see that $(G- m- m', H \cup \{m,m'\})$ is a 1-separation of $M_1\backslash a$. Therefore $(G- m- m', H \cup \{m,a\})$ is a 1-separation of $M$; a contradiction. We now know that $r(H \cup m') = r(H) + 1$. Then, as $(G,H)$ is a $2$-separation\ of $M_1 \backslash a$, it follows by (\ref{m1m1m1}) that $(G-m',H)$ is a $2$-separation\ of $M_1 \backslash a /m'$ unless either $|H| = 1$ and $r_{M_1/m'}(H) = 1$, or $|G-m'| = 1$ and $r_{M_1/m'}(G-m') = 1$. Consider the exceptional cases. The first of these cannot occur since $m'$ is freely placed on $m$; the second cannot occur since it implies that $G = \{m,m'\}$, which we eliminated above. As neither of the exceptional cases occurs, $M_1 \backslash a /m'$ has a 2-separation and so \ref{m1m'a} holds. Recall that $M_1/m' = M{\underline{\downarrow}\,} m$. In this $2$-polymatroid, we have $\{a, \ell,m\}$ as a triangle such that the deletion of either $a$ or $\ell$ destroys $3$-connectedness. Hence, by \cite[Lemma 4.2]{oswww}, there is a triad of $M_1/m'$ that contains $a$ and exactly one of $\ell$ and $m$. Assume this triad contains $\ell$. Thus, in $M\backslash \ell{\underline{\downarrow}\,} m$, we have that $a$ is in a series pair with some element $b$. Then $M\backslash \ell/a$ has $N$ as a c-minor, so $a$ is a doubly labelled point of $M$; a contradiction\ to Lemma~\ref{Step0}. We deduce that $M_1/m'$ has a triad containing $\{a,m\}$ but not $\ell$. Then $M_1/m'\backslash \ell$, which equals $M\backslash \ell{\underline{\downarrow}\,} m$, either has a triad containing $\{a,m\}$ or has $a$ in a series pair. This is straightforward to see by considering the matroid that is naturally derived from $M\backslash \ell{\underline{\downarrow}\,} m$ and using properties of the cocircuits in this matroid. Now $a$ is not in a series pair in $M\backslash \ell{\underline{\downarrow}\,} m$ otherwise we again obtain the contradiction\ that $a$ is a doubly labelled point. We deduce that $M\backslash \ell{\underline{\downarrow}\,} m$ has a triad containing $\{a,m\}$. Since $m \in B$ and, by (\ref{aab}), $r(A - a) = r(A)$, we must have that the third point, $b$, of this triad is in $A-a$. Now $M\backslash \ell {\underline{\downarrow}\,} m$ has $(A,B)$ as a $2$-separation and has $\{a,b,m\}$ as a triad with $\{a,b\} \subseteq A$. Thus $(A \cup m,B - m)$ is a $2$-separation\ of $M\backslash \ell {\underline{\downarrow}\,} m$. Since $\ell$ is in the triangle $\{a,m,\ell\}$ in $M {\underline{\downarrow}\,} m$, it follows that $(A \cup m\cup \ell,B - m)$ is a $2$-separation\ of $M {\underline{\downarrow}\,} m$. This contradiction\ to \ref{m1m'} completes the proof of \ref{ellisnot}. Since $\ell \not\in {\rm cl}(Y_1 - \ell)$, we deduce that $(X_1\cup \ell,Y_1 - \ell)$ is 3-separating in $M$. Because $Y_1$ is a minimal non-$N$-3-separating set, $Y_1 - \ell$ does not consist of two lines. Moreover, $(X_1,Y_1 - \ell)$ is a 2-separation in $M\backslash \ell$. \begin{sublemma} \label{pointline} $Y_1 - \ell$ does not consist of a point and a line. \end{sublemma}
3,859
142,722
en
train
0.91.37
Observe that $G \neq \{m,m'\}$ otherwise $\{m\}$ is 2-separating in $M\backslash a$ and so, as $a \in {\rm cl}(X_1)$ , we obtain the contradiction\ that $\{m\}$ is 2-separating in $M$. Now \begin{equation} \label{m1m1m1} r_{M_1 \backslash a/m'}(G - m') + r_{M_1 \backslash a/m'}(H) - r(M_1 \backslash a/m')= r(G) + r(H \cup m') - 1 - r(M_1\backslash a). \end{equation} Suppose that $r(H \cup m') = r(H)$. Then $r(H \cup m'\cup m) = r(H)$ as $m'$ is freely placed on $m$. Thus, as $G \supsetneqq \{m,m'\}$ and $\{m\}$ is 2-separating in $M\backslash \ell \backslash a$, we see that $(G- m- m', H \cup \{m,m'\})$ is a 1-separation of $M_1\backslash a$. Therefore $(G- m- m', H \cup \{m,a\})$ is a 1-separation of $M$; a contradiction. We now know that $r(H \cup m') = r(H) + 1$. Then, as $(G,H)$ is a $2$-separation\ of $M_1 \backslash a$, it follows by (\ref{m1m1m1}) that $(G-m',H)$ is a $2$-separation\ of $M_1 \backslash a /m'$ unless either $|H| = 1$ and $r_{M_1/m'}(H) = 1$, or $|G-m'| = 1$ and $r_{M_1/m'}(G-m') = 1$. Consider the exceptional cases. The first of these cannot occur since $m'$ is freely placed on $m$; the second cannot occur since it implies that $G = \{m,m'\}$, which we eliminated above. As neither of the exceptional cases occurs, $M_1 \backslash a /m'$ has a 2-separation and so \ref{m1m'a} holds. Recall that $M_1/m' = M{\underline{\downarrow}\,} m$. In this $2$-polymatroid, we have $\{a, \ell,m\}$ as a triangle such that the deletion of either $a$ or $\ell$ destroys $3$-connectedness. Hence, by \cite[Lemma 4.2]{oswww}, there is a triad of $M_1/m'$ that contains $a$ and exactly one of $\ell$ and $m$. Assume this triad contains $\ell$. Thus, in $M\backslash \ell{\underline{\downarrow}\,} m$, we have that $a$ is in a series pair with some element $b$. Then $M\backslash \ell/a$ has $N$ as a c-minor, so $a$ is a doubly labelled point of $M$; a contradiction\ to Lemma~\ref{Step0}. We deduce that $M_1/m'$ has a triad containing $\{a,m\}$ but not $\ell$. Then $M_1/m'\backslash \ell$, which equals $M\backslash \ell{\underline{\downarrow}\,} m$, either has a triad containing $\{a,m\}$ or has $a$ in a series pair. This is straightforward to see by considering the matroid that is naturally derived from $M\backslash \ell{\underline{\downarrow}\,} m$ and using properties of the cocircuits in this matroid. Now $a$ is not in a series pair in $M\backslash \ell{\underline{\downarrow}\,} m$ otherwise we again obtain the contradiction\ that $a$ is a doubly labelled point. We deduce that $M\backslash \ell{\underline{\downarrow}\,} m$ has a triad containing $\{a,m\}$. Since $m \in B$ and, by (\ref{aab}), $r(A - a) = r(A)$, we must have that the third point, $b$, of this triad is in $A-a$. Now $M\backslash \ell {\underline{\downarrow}\,} m$ has $(A,B)$ as a $2$-separation and has $\{a,b,m\}$ as a triad with $\{a,b\} \subseteq A$. Thus $(A \cup m,B - m)$ is a $2$-separation\ of $M\backslash \ell {\underline{\downarrow}\,} m$. Since $\ell$ is in the triangle $\{a,m,\ell\}$ in $M {\underline{\downarrow}\,} m$, it follows that $(A \cup m\cup \ell,B - m)$ is a $2$-separation\ of $M {\underline{\downarrow}\,} m$. This contradiction\ to \ref{m1m'} completes the proof of \ref{ellisnot}. Since $\ell \not\in {\rm cl}(Y_1 - \ell)$, we deduce that $(X_1\cup \ell,Y_1 - \ell)$ is 3-separating in $M$. Because $Y_1$ is a minimal non-$N$-3-separating set, $Y_1 - \ell$ does not consist of two lines. Moreover, $(X_1,Y_1 - \ell)$ is a 2-separation in $M\backslash \ell$. \begin{sublemma} \label{pointline} $Y_1 - \ell$ does not consist of a point and a line. \end{sublemma} Assume that $Y_1- \ell$ consists of a line $k$ and a point $y$. If $k \not\in E(N)$, then the argument in the first paragraph of the proof of the lemma gives a contradiction. Thus $k \in E(N)$, so $y \not\in E(N)$. If $r(Y_1 - \ell) = 2$, then $M\backslash \ell \backslash y$, and hence $M\backslash y$, has $N$ as a c-minor. Since $y$ is on the line $k$, we see that $M\backslash y$ is $3$-connected; a contradiction. We deduce that $r(Y_1 - \ell) = 3$. Hence \begin{equation} \label{x1rk} r(X_1) = r(M) - 2 \text{~~and~~} r(X_1 \cup \ell) = r(M) - 1. \end{equation} Now $M\backslash \ell$ is the 2-sum with basepoint $p$, say, of two $2$-polymatroids, $M_X$ and $M_Y$, with ground sets $X_1 \cup p$ and $(Y_1 - \ell) \cup p$, respectively. Then $r(M_Y) = 3$. Moreover, $y$ does not lie on $k$ in $M_Y$, otherwise $M_Y$ is not $2$-connected, a contradiction\ to Proposition~\ref{connconn}. Thus $M^*\backslash y$ has $N^*$ as a c-minor. Then, by applying \ref{ellisnot} to $M^*\backslash y$, we deduce that $y \not\in {\rm cl}_{M^*}(Y_1 - y)$. Thus $r^*(Y_1 - y) = r^*(Y_1) - 1$. It follows that $r(X_1 \cup \ell \cup y) = r(X_1 \cup \ell)$. But $r(X_1 \cup \ell \cup y) = r(M\backslash k) = r(M)$ yet $r(X_1 \cup \ell) = r(M) - 1$. This contradiction\ completes the proof of \ref{pointline} We now know that $Y_1 - \ell$ consists of a series pair of points, say $y_1$ and $y_2$. Now $r(M\backslash \ell) = r(M) = r(X_1) + 1$. Also $r(\{\ell,y_1,y_2\}) = 3$. Thus $\{\ell,y_1,y_2\}$ is a triad of $M$. Moreover, both $M/y_1$ and $M/y_2$ have special $N$-minors. Thus neither is $3$-connected. By \cite[Lemma 4.2]{oswww}, $M$ has a triangle that contains $y_1$ and exactly one of $y_2$ and $\ell$. Likewise, $M$ has a triangle that contains $y_2$ and exactly one of $y_1$ and $\ell$. Thus either \begin{itemize} \item[(i)] $M$ has a triangle $\{y_1,y_2,z\}$; or \item[(ii)] $M$ has triangles $\{y_1,\ell,z_1\}$ and $\{y_2,\ell,z_2\}$ but no triangle containing $\{y_1,y_2\}$. \end{itemize} In the first case, $M/y_1$ has $\{y_2,z\}$ as a pair of parallel points. Hence $M\backslash y_2$ has a special $N$-minor. Thus $y_2$ is doubly labelled; a contradiction. We deduce that (ii) holds. Thus $M$ contains a fan $x_1,x_2,\dots,x_n$ where $(x_1,x_2,x_3,x_4,x_5) = (z_2,y_2,\ell,y_1,z_1)$. Hence $M/x_2$ has a c-minor isomorphic to $N$. Then, by Lemmas~\ref{fantan} and \ref{Step0}, we obtain a contradiction. \end{proof}
2,404
142,722
en
train
0.91.38
We complete the proof of Lemma~\ref{Step6} by analyzing the various possibilities for a minimal non-$N$-3-separator consisting of exactly three lines.
35
142,722
en
train
0.91.39
\section{A minimal non-$N$-3-separator consisting of exactly three lines} \label{threeel} In this section, we finish the proof of (\ref{Step6}). We begin by restating that assertion. \begin{lemma} \label{Step6+} $M$ has a minimal non-$N$-$3$-separator with at least four elements. \end{lemma} We have $(X_1,Y_1)$ as a $3$-separation of the $3$-connected\ $2$-polymatroid $M$. We shall consider the extension $M+z$ of $M$ that is obtained by adjoining the line $z$ to $M$ so that $z$ is in the closure of each of $X_1$ and $Y_1$ in $M+z$. To see that this extension exists, we note that, by building on a result of Geelen, Gerards, and Whittle~\cite{ggwtconn}, Beavers~\cite[Proposition~2.2.2]{beavs} showed that, when $(A,B)$ is a $3$-separation\ in a $3$-connected\ matroid $Q$, we can extend $Q$ by an independent set $\{z_1,z_2\}$ of size two so that these two points are clones, and each lies in the closure of both $A$ and $B$ in the extension $Q'$. By working in the matroid naturally derived from $M$, we can add $z_1$ and $z_2$. This corresponds to adding the line $z$ to $M$ to form $M+z$ where $z = \{z_1,z_2\}$. More formally, recall that the natural matroid $M'$ derived from $M$ is obtained from $M$ by freely adding two points, $s_{\ell}$ and $t_{\ell}$, on each line $\ell$ of $M$ and then deleting all such lines $\ell$. After we have extended $M'$ by $z_1$ and $z_2$, we have a matroid with points $\{z_1,z_2\} \cup \{p:\text{~$p$ is a point of $M$}\} \cup \{s_{\ell},t_{\ell}:\text{~$\ell$ is a line of $M$}\}$. Taking $z= \{z_1,z_2\}$, we see that $M+ z$ is the $2$-polymatroid with elements $\{z\} \cup \{p:\text{~$p$ is a point of $M$}\} \cup \{\ell:\text{~$\ell$ is a line of $M$}\} = \{z\} \cup E(M)$. We call $M+z$ the $2$-polymatroid that is obtained from $M$ by {\it adding the guts line $z$ of $(X_1,Y_1)$.} When we have $Y_1$ as a minimal non-$N$-$3$-separator of $M$ consisting of three lines, we look at $(M+z)|(Y_1 \cup z)$. This $2$-polymatroid consists of exactly four lines. \begin{lemma} \label{claim1y1} $(M+z)|(Y_1 \cup z)$ has no parallel lines, so $r_{M+z}(Y_1 \cup z) \ge 3$. \end{lemma} \begin{proof} Suppose $a$ and $b$ are parallel lines in $Y_1$. Then we may assume that $b\not\in E(N)$. Now $M\backslash b$ or $M/b$ has $N$ as a c-minor. In the latter case, as $a$ is a loop of $M/b$, it follows that $a \not \in E(N)$ and $M\backslash a$ has $N$ as a c-minor. We conclude that $M\backslash b$ or $M\backslash a$ has $N$ as a c-minor. Since each of $M\backslash b$ and $M\backslash a$ is $3$-connected, we obtain the contradiction\ that the theorem holds. Thus $Y_1$ contains no pair of parallel lines. Suppose $z$ is parallel to some element $y$ of $Y_1$. Then $(X_1 \cup y, Y_1 - y)$ is a non-$N$-$3$-separator of $M$ contradicting the minimality of $Y_1$. Thus $(M+z)|(Y_1 \cup z)$ has no parallel lines and the lemma holds. \end{proof} \begin{lemma} \label{claim3} $r(Y_1) > 3$. \end{lemma} \begin{proof} Assume that $r(Y_1) = 3$. Then $r_{M+z}(Y_1 \cup z) = 3$, so $\sqcap(z,y) = 1$ for all $y$ in $Y_1$. Moreover, $r(Y_1 - y) = 3 = r(Y_1)$ for all $y$ in $Y_1$. Suppose that $y \in Y_1 - E(N)$ and $N$ is a c-minor of $M/y$. Then the remaining two elements, $y_1$ and $y_2$, of $Y$ are parallel points in $M/y$. We may assume that $y_1 \not \in E(N)$. Thus $M\backslash y_1$ has $N$ as a c-minor. We conclude that $N$ is a c-minor of $M\backslash y$ for some element $y$ of $Y_1$. We now focus on this element $y$. Let $(R,G)$ be a non-trivial $2$-separation\ of $M\backslash y$, that is, $\lambda_{M\backslash y}(R) = 1$ and $\min\{|R|,|G|\} \ge 2$. We show next that \begin{sublemma} \label{claim4} $(R,G)$ crosses $(X_1,Y_1 - y)$. \end{sublemma} If $R \subseteq X_1$, then $G \supseteq Y_1 - y$ so $y \in {\rm cl}_M(G)$ and $(R,G \cup y)$ is a $2$-separation\ of $M$. This contradiction\ implies, using symmetry, that both $R$ and $G$ meet $Y_1 - y$. Suppose $R \cap X_1 = \emptyset$. Then $R$ consists of single line, so $(R,G)$ is a trivial $2$-separation. This contradiction, combined with symmetry, completes the proof of \ref{claim4}. Let $Y_1 - y = \{a,b\}$. We may assume that $a \in R$ and $b \in G$. Now, as $y \in {\rm cl}(Y_1 - y)$, we see that $\lambda_{M\backslash y}(Y_1 - y) = 2$. Thus \begin{align*} 1+2 & = \lambda_{M\backslash y}(R) + \lambda_{M\backslash y}(Y_1 - y)\\ & \ge \lambda_{M\backslash y}(\{a\}) + \lambda_{M\backslash y}(R\cup (Y_1 - y))\\ & = \lambda_{M\backslash y}(\{a\}) + \lambda_{M\backslash y}(G \cap X_1). \end{align*} We know $r(E- Y_1) = r(X_1) = r(M) - 1$ since $r(Y_1) = 3$. Thus $r(E - \{y,a\}) = r(X_1 \cup b) = r(M)$. Hence $$ \lambda_{M\backslash y}(\{a\}) = r(\{a\}) + r(E - \{y,a\}) - r(E - y) = r(\{a\}) = 2,$$ so $\lambda_{M\backslash y}(G\cap X_1) \le 1$. But $y \in {\rm cl}(\{a,b\})$ so $\lambda_M(G \cap X_1) \le 1$. By symmetry, $\lambda_M(R \cap X_1) \le 1$. We conclude that $|G \cap X_1| \le 1$ and $|R \cap X_1| \le 1$, so $|X_1| \le 2$. This is a contradiction\ since $|E(N)| \ge 4$. We conclude that the lemma holds. \end{proof} \begin{lemma} \label{claim5} $r_{M+z}(Y_1 \cup z) =r_M(Y_1) = 4$. \end{lemma} \begin{proof} We know that $r_{M+z}(Y_1 \cup z) = r_M(Y_1) \ge 4$. Suppose $r_M(Y_1) \ge 5$. Then $r_M(X_1) \le r(M) - 3$, so $$r_{M^*}(Y_1) = \sum_{y \in Y_1} r_M(\{y\}) + r_M(X_1) - r(M) \le 6 + r(M) - 3 - r(M) = 3.$$ By using $M^*$ in place of $M$, we get a contradiction\ to Lemma~\ref{claim3}. We conclude that the lemma holds. \end{proof} We will now work with the $2$-polymatroid $(M+z)|(Y_1 \cup z)$, which we rename $P$. This has rank 4 and consists of four lines, $z, a,b,$ and $c$. \begin{lemma} \label{claim5.5} If $B \subseteq Y_1$ and $A = Y_1 - B$, then $$\sqcap_P(A \cup z, B) = \sqcap_{M+z}(A \cup X_1 \cup z, B).$$ \end{lemma} \begin{proof} Since $P = (M+z)|(Y_1 \cup z)$, we can do all of these local connectivity calculations in $M+z$. Now $ \sqcap(A \cup z, X_1) = \sqcap(A \cup z, X_1\cup z)$, so $$2 = \sqcap(Y_1 \cup z, X_1) \ge \sqcap(A \cup z, X_1) = \sqcap(A \cup z, X_1\cup z) \ge 2.$$ Thus $$r(A \cup z) - 2 = r(A \cup z \cup X_1) - r(X_1).$$ Hence \begin{align*} \sqcap(A \cup z, B) & = r(A \cup z) + r(B) - r(A\cup z \cup B)\\ & = r(A \cup z \cup X_1) - r(X_1) + 2 + r(B) - r(Y_1)\\ & = r(A \cup z \cup X_1) + r(B) - [r(X_1) + r(Y_1) - 2]\\ & = r(A \cup z \cup X_1) + r(B) - r(M)\\ & = \sqcap(A \cup X_1 \cup z, B). \end{align*} \end{proof} \begin{lemma} \label{claim6} $P$ is $3$-connected. \end{lemma} \begin{proof} From the last lemma, if $(A,B)$ is a $k$-separation of $P$ for some $k$ in $\{1,2\}$ and $z \in A$, then $ (A\cup X_1\cup z,B)$ is a $k$-separation of $M+z$; a contradiction. \end{proof} \begin{lemma} \label{3one} If $y \in Y_1$ and $\sqcap(X_1,\{y\}) = 1$, then $r(Y_1 - y) = 4$. \end{lemma} \begin{proof} By Lemma~\ref{claim1y1}, $r(Y_1 - y) > 2$. If $r(Y_1 - y) = 3$, then $(X_1 \cup y, Y_1 - y)$ is a $3$-separation\ violating the choice of $(X_1,Y_1)$. \end{proof} \begin{lemma} \label{3m} Suppose $y \in Y_1$ and $r(Y_1 - y) = 4$. If $m$ is a line such that $\{m\}$ is $2$-separating in $M\backslash y$, then $m \in Y_1 - y$. \end{lemma} \begin{proof} We have $1 = r(\{m\}) + r(E - \{y,m\}) - r(M\backslash y)$. Thus $r(E - \{y,m\}) = r(M) - 1$. Suppose $m \not\in Y_1 - y$. Then $E- \{y,m\}$ contains $Y_1 - y$ and so spans $y$. Thus $r(E - \{y,m\}) = r(M\backslash m) = r(M)$; a contradiction. \end{proof} The next four lemmas will help eliminate many of the possibilities for $P$. \begin{lemma} \label{parallel} If $c$ is skew to $X_1$ in $M$, and $M/c$ has $a$ and $b$ as parallel lines, then $M/c$ is $3$-connected. \end{lemma} \begin{proof} Assume $(A,B)$ is a $k$-separation of $M/c$ for some $k$ in $\{1,2\}$ where $|A| \le |B|$. If $\{a,b\} \subseteq Z$ for some $Z$ in $\{A,B\}$, and $\{Z,W\} = \{A,B\}$, then $r(Z \cup c) + r(W \cup c) - r(M) = k+1$. But $c$ is skew to $W$ since $W \subseteq X_1$, so $(Z \cup c,W)$ is a $k$-separation of $M$; a contradiction. We may now assume that $a \in A$ and $b \in B$. Then $(A \cup b, B-b)$ is a $k$-separation of $M/c$ with $\{a,b\} \subseteq A \cup b$ and this possibility has already been eliminated. \end{proof} \begin{lemma} \label{earlier} If $c$ is skew to each of $a$, $b$, and $X_1$ in $M$, then $M/c$ has no c-minor isomorphic to $N$. \end{lemma} \begin{proof} We see that $M/c$ has $a$ and $b$ as parallel lines. Since $(M,N)$ is a counterexample to the theorem, we obtain this lemma as a direct consequence of the last one. \end{proof}
3,722
142,722
en
train
0.91.40
We will now work with the $2$-polymatroid $(M+z)|(Y_1 \cup z)$, which we rename $P$. This has rank 4 and consists of four lines, $z, a,b,$ and $c$. \begin{lemma} \label{claim5.5} If $B \subseteq Y_1$ and $A = Y_1 - B$, then $$\sqcap_P(A \cup z, B) = \sqcap_{M+z}(A \cup X_1 \cup z, B).$$ \end{lemma} \begin{proof} Since $P = (M+z)|(Y_1 \cup z)$, we can do all of these local connectivity calculations in $M+z$. Now $ \sqcap(A \cup z, X_1) = \sqcap(A \cup z, X_1\cup z)$, so $$2 = \sqcap(Y_1 \cup z, X_1) \ge \sqcap(A \cup z, X_1) = \sqcap(A \cup z, X_1\cup z) \ge 2.$$ Thus $$r(A \cup z) - 2 = r(A \cup z \cup X_1) - r(X_1).$$ Hence \begin{align*} \sqcap(A \cup z, B) & = r(A \cup z) + r(B) - r(A\cup z \cup B)\\ & = r(A \cup z \cup X_1) - r(X_1) + 2 + r(B) - r(Y_1)\\ & = r(A \cup z \cup X_1) + r(B) - [r(X_1) + r(Y_1) - 2]\\ & = r(A \cup z \cup X_1) + r(B) - r(M)\\ & = \sqcap(A \cup X_1 \cup z, B). \end{align*} \end{proof} \begin{lemma} \label{claim6} $P$ is $3$-connected. \end{lemma} \begin{proof} From the last lemma, if $(A,B)$ is a $k$-separation of $P$ for some $k$ in $\{1,2\}$ and $z \in A$, then $ (A\cup X_1\cup z,B)$ is a $k$-separation of $M+z$; a contradiction. \end{proof} \begin{lemma} \label{3one} If $y \in Y_1$ and $\sqcap(X_1,\{y\}) = 1$, then $r(Y_1 - y) = 4$. \end{lemma} \begin{proof} By Lemma~\ref{claim1y1}, $r(Y_1 - y) > 2$. If $r(Y_1 - y) = 3$, then $(X_1 \cup y, Y_1 - y)$ is a $3$-separation\ violating the choice of $(X_1,Y_1)$. \end{proof} \begin{lemma} \label{3m} Suppose $y \in Y_1$ and $r(Y_1 - y) = 4$. If $m$ is a line such that $\{m\}$ is $2$-separating in $M\backslash y$, then $m \in Y_1 - y$. \end{lemma} \begin{proof} We have $1 = r(\{m\}) + r(E - \{y,m\}) - r(M\backslash y)$. Thus $r(E - \{y,m\}) = r(M) - 1$. Suppose $m \not\in Y_1 - y$. Then $E- \{y,m\}$ contains $Y_1 - y$ and so spans $y$. Thus $r(E - \{y,m\}) = r(M\backslash m) = r(M)$; a contradiction. \end{proof} The next four lemmas will help eliminate many of the possibilities for $P$. \begin{lemma} \label{parallel} If $c$ is skew to $X_1$ in $M$, and $M/c$ has $a$ and $b$ as parallel lines, then $M/c$ is $3$-connected. \end{lemma} \begin{proof} Assume $(A,B)$ is a $k$-separation of $M/c$ for some $k$ in $\{1,2\}$ where $|A| \le |B|$. If $\{a,b\} \subseteq Z$ for some $Z$ in $\{A,B\}$, and $\{Z,W\} = \{A,B\}$, then $r(Z \cup c) + r(W \cup c) - r(M) = k+1$. But $c$ is skew to $W$ since $W \subseteq X_1$, so $(Z \cup c,W)$ is a $k$-separation of $M$; a contradiction. We may now assume that $a \in A$ and $b \in B$. Then $(A \cup b, B-b)$ is a $k$-separation of $M/c$ with $\{a,b\} \subseteq A \cup b$ and this possibility has already been eliminated. \end{proof} \begin{lemma} \label{earlier} If $c$ is skew to each of $a$, $b$, and $X_1$ in $M$, then $M/c$ has no c-minor isomorphic to $N$. \end{lemma} \begin{proof} We see that $M/c$ has $a$ and $b$ as parallel lines. Since $(M,N)$ is a counterexample to the theorem, we obtain this lemma as a direct consequence of the last one. \end{proof} \begin{lemma} \label{earlybird} Assume that $M\backslash b$ has a c-minor isomorphic to $N$ and that $P\backslash b$ has rank $4$, has $c$ skew to each of $a$ and $z$, and has $\sqcap(\{a\},\{z\}) = 1$. Then $M/c$ has a c-minor isomorphic to $N$. \end{lemma} \begin{proof} Let $(A,C)$ be a non-trivial $2$-separation\ of $M\backslash b$. If $\{a,c\}$ is contained in $A$ or $C$, then $M$ has a $2$-separation; a contradiction. Thus we may assume that $a \in A$ and $c \in C$. Now $c$ is skew to $C - c$ so $(A \cup c, C-c)$ is 2-separating in $M\backslash b$. Hence $(A \cup c \cup b, C - c)$ is 2-separating in $M$. Thus $C - c$ consists of a point $d$ of $M$. Now, by Lemma~\ref{3m}, the only $2$-separating lines in $M\backslash b$ can be $a$ and $c$. But $a$ is not 2-separating. Thus $(M\backslash b)^{\flat} = M\backslash b{\underline{\downarrow}\,} c$, so $c$ is a point of $M\backslash b{\underline{\downarrow}\,} c$. The rank of this $2$-polymatroid is $r(M) - 1$, and it has $\{c,d\}$ as a series pair since $A$ has rank $r(M) - 2$ in it. Thus $M\backslash b{\underline{\downarrow}\,} c/c$, and hence $M/c$, has a c-minor isomorphic to $N$. \end{proof} \begin{lemma} \label{earlybird2} If $\sqcap(\{a\},\{z\}) = 1$ and both $b$ and $c$ are skew to each other and to $z$, then $M\backslash a$ has no c-minor isomorphic to $N$. \end{lemma} \begin{proof} Assume that $M\backslash a$ has a c-minor isomorphic to $N$. Let $(B,C)$ be a $k$-separation of $M\backslash a$ for some $k$ in $\{1,2\}$. If $B$ or $C$ contains $\{b,c\}$, then $M$ has a $k$-separation. Thus we may assume that $b \in B$, that $c \in C$, and that $|B| \ge |C|$. Then $b$ is skew to $B- b$, so the partition $(B-b,C \cup b \cup a)$ of $E(M)$ shows that $M$ is not $3$-connected; a contradiction. \end{proof} By Lemma~\ref{claim1y1}, for all $y$ in $Y_1$, we have $\sqcap(\{y\},\{z\}) \in \{0,1\}$. We shall treat the possibilities for $P$ based on the number $\theta$ of members $y$ of $Y_1$ for which $\sqcap(\{y\},\{z\}) = 1$. The most difficult case is when $\theta = 3$ and we will treat that after we deal with the cases when $\theta=2$ and when $\theta = 1$. \begin{lemma} \label{3two} $\theta \neq 2$. \end{lemma} \begin{proof} Suppose that $\sqcap(\{a\},\{z\}) = 1 = \sqcap(\{b\},\{z\})$ and $\sqcap(\{c\},\{z\}) = 0$. Then, by Lemma~\ref{3one}, $r(\{b,c\}) = 4 = r(\{a,c\})$. Thus, by Lemma~\ref{earlier}, $M/c$ has no c-minor isomorphic to $N$. By Lemma~\ref{earlybird}, neither $M\backslash a$ nor $M\backslash b$ has a c-minor isomorphic to $N$. Thus, without loss of generality, we may assume that $M/a$ has a c-minor isomorphic to $N$. Now, in $M/a$, we have $\{b,c\}$ as a $2$-separating set where $c$ is a line and $b$ is either a point on that line or is a parallel line. Thus, by Lemma~\ref{claim1}, $M/a\backslash b$, and hence $M\backslash b$, has a c-minor isomorphic to $N$; a contradiction. \end{proof} We can exploit duality to eliminate the case when $\theta = 1$. \begin{lemma} \label{3three} $\theta \neq 1$. \end{lemma} \begin{proof} Suppose that $\sqcap(\{a\},\{z\}) = 1$ and $\sqcap(\{b\},\{z\}) = 0 = \sqcap(\{c\},\{z\})$. Then, by Lemma~\ref{3one}, $r(\{b,c\}) = 4$. By Lemma~\ref{general4}, for $y$ in $Y_1$, we have $\sqcap^*(\{y\}, X_1) = \lambda_{M/(Y-y_1)}(\{y\})$. Since $\{b,c\}$ spans $a$ in $M$, we deduce that $\sqcap^*(\{a\},X_1) = 0$. If $\sqcap^*(\{b\},X_1) = 1 = \sqcap^*(\{c\},X_1)$, then $\theta = 2$ in $M^*$ so the result follows by Lemma~\ref{3two}. Thus, we may assume, by symmetry, that $\sqcap^*(\{b\},X_1) = 0$. Hence $\{a,c\}$ spans $b$ in $M$, so $r(\{a,c\}) = 4$. Thus, by Lemma~\ref{earlier}, $M/c$ does not have a c-minor isomorphic to $N$. By Lemma~\ref{earlybird}, $M\backslash b$ has no c-minor isomorphic to $N$. If $\sqcap(\{a\},\{b\}) = 0$, then, by symmetry, the argument of the last two sentences shows that neither $M/b$ nor $M\backslash c$ has a c-minor isomorphic to $N$. Thus both $b$ and $c$ must be in every c-minor of $M$ isomorphic to $N$; a contradiction. We deduce that $\sqcap(\{a\},\{b\}) = 1$. By Lemma~\ref{earlybird2}, $M\backslash a$ has no c-minor isomorphic to $N$. Suppose $M/a$ has a c-minor isomorphic to $N$. In $M/a$, we see that $\{c,b\}$ is a 2-separating set with $c$ as a line and $b$ as a point on it. Hence, by Lemma~\ref{claim1}, $M/a\backslash b$, and so $M\backslash b$, has a c-minor isomorphic to $N$; a contradiction. We conclude that $M/a$ has no c-minor isomorphic to $N$. It follows that $a$ is in every c-minor of $M$ isomorphic to $N$. Thus $M/b$ has $N$ as a c-minor. In $M/b$, we see that $a$ is a point on the line $c$. Suppose that $a$ is parallel to some point $e$, say. Then $e \in X_1$. Moreover, $M/b\backslash e$, and hence $M\backslash e$, has a c-minor isomorphic to $N$. Now $r(X_1 \cup \{a,b\}) = r(X_1) + 2$. Thus \begin{align*} r(X_1) + 1 + 3 & = r(X_1 \cup a) + r(\{a,b,e\})\\ & \ge r(\{a,e\}) + r(X_1 \cup \{a,b\})\\ & = r(\{a,e\}) + r(X_1) + 2. \end{align*} Hence $r(\{a,e\}) = 2$, so $e$ lies on $a$ in $M$. Thus $M\backslash e$ is $3$-connected\ having a c-minor isomorphic to $N$; a contradiction. We deduce that, in $M/b$, the point $a$ is not parallel to another point, so $M/b$ is simple. We complete the proof by showing that $M/b$ is $3$-connected. Suppose it has $(A,C)$ as a $2$-separation. If $A$ or $C$, say $A$, contains $\{a,c\}$, then $b$ is skew to $C$, so $(A\cup b,C)$ is a $2$-separation of $M$; a contradiction. Thus, we may assume that $a \in A$ and $c \in C$. Then, as $a$ is a point on the line $c$ in $M/b$, we see that $(A - a,C\cup a)$ is 2-separating in $M/b$. It is not a 2-separation otherwise we obtain a contradiction\ as before. It follows that $A$ is a parallel pair of points in $M/b$, contradicting the fact that $M/b$ is simple. \end{proof}
3,788
142,722
en
train
0.91.41
Next we eliminate the case when $\theta = 3$. The core of the argument in this case mimics the argument used to prove Tutte's Triangle Lemma for matroids (see, for example, \cite[Lemma 8.7.7]{oxbook}). \begin{lemma} \label{3five} $\theta \neq 3$. \end{lemma} \begin{proof} Assume that $\sqcap(\{a\},\{z\}) = \sqcap(\{b\},\{z\}) = \sqcap(\{c\},\{z\}) = 1$. Then, by Lemma~\ref{3one}, $r(\{a,b\}) = r(\{b,c\}) = r(\{a,c\}) = 4$. First we show the following. \begin{sublemma} \label{two2} There are at least two members $y$ of $Y_1$ such that $M\backslash y$ has a c-minor isomorphic to $N$. \end{sublemma} Assume that this fails. Since $|Y_1 - E(N)| \ge 2$, there is an element, say $a$, of $Y_1 - E(N)$ such that $M/a$ has $N$ as a c-minor. In $M/a$, we see that $b$ and $c$ are parallel lines and $\{b,c\}$ is 2-separating. Thus, by Lemma~\ref{claim1}, each of $M/a\backslash b$ and $M/a\backslash c$ have special $N$-minors. This contradiction\ implies that \ref{two2} holds. We now assume that both $M\backslash a$ and $M\backslash b$ have special $N$-minors. Clearly, $M\backslash a$ has $b$ and $c$ as 2-separating lines, and, by Lemma~\ref{3m}, these are the only 2-separating lines in $M\backslash a$. Thus $(M\backslash a)^{\flat} = M\backslash a {\underline{\downarrow}\,} b {\underline{\downarrow}\,} c$. Symmetrically, $(M\backslash b)^{\flat} = M\backslash b {\underline{\downarrow}\,} a {\underline{\downarrow}\,} c$. As the theorem fails, neither $(M\backslash a)^{\flat}$ nor $(M\backslash b)^{\flat}$ is $3$-connected. Thus each of $M{\underline{\downarrow}\,} c \backslash a$ and $M{\underline{\downarrow}\,} c \backslash b$ have non-trivial 2-separations. It will be convenient to work in the $2$-polymatroid $M{\underline{\downarrow}\,} c$, which we shall rename $M_c$. Let $(X_a,Y_a)$ and $(X_b,Y_b)$ be non-trivial 2-separations of $M_c\backslash a$ and $M_c\backslash b$, respectively, with $b$ in $Y_a$ and $a$ in $Y_b$. Now it is straightforward to check the following. \begin{sublemma} \label{zex} If $Z \subseteq X_1$ and $e \in \{a,b\}$, then $\sqcap_{M}(Z, \{e\}) = \sqcap_{M_c}(Z,\{e\})$. \end{sublemma} We deduce that \begin{sublemma} \label{two2.5} $\sqcap_{M_c}(X_1,\{a\}) = 1 = \sqcap_{M_c}(X_1,\{b\})$. \end{sublemma} Next we show that \begin{sublemma} \label{two3} $c \in X_a \cap X_b$. \end{sublemma} Suppose $c$ in $Y_a$. Since $\{c,b\}$ spans $a$ in $M_c$, it follows that $(X_a,Y_a \cup a)$ is a 2-separation of $M_c$ and hence of $M$; a contradiction. We deduce that $c \in X_a$ and, by symmetry, \ref{two3} holds. \begin{sublemma} \label{zztop} For $Z \subseteq X_1$, if $\sqcap_M(Z,\{a\}) = 1 = \sqcap_M(Z,\{b\})$, then $\sqcap_M(Z,\{a,b\}) = 2.$ \end{sublemma} Assume $\sqcap_M(Z,\{a,b\}) < 2.$ Then $\sqcap_M(Z,\{a,b\}) = \sqcap_M(Z,\{a\}) = 1$. Thus $$r(Z) + r(\{a,b\}) - r(Z \cup \{a,b\}) = r(Z) + r(\{a\}) - r(Z \cup a),$$ so $r(\{a,b\}) - r(\{a\}) = r(Z \cup \{a,b\}) - r(Z \cup a)$. Hence $b$ is skew to $Z \cup a$, so $b$ is skew to $Z$; a contradiction. We deduce that \ref{zztop} holds. \begin{sublemma} \label{zztop2} For $Z \subseteq X_1$, if $\sqcap_{M_c}(Z,\{a\}) = 1 = \sqcap_{M_c}(Z,\{b\})$, then $\sqcap_{M_c}(Z,\{a,b\}) = 2.$ \end{sublemma} By \ref{zex}, $\sqcap_{M_c}(Z,\{a\}) = \sqcap_{M}(Z,\{a\})$. Moreover, \begin{align*} \sqcap_{M_c}(Z,\{a,b\}) & = r_{M_c}(Z) + r_{M_c}(\{a,b\}) - r_{M_c}( Z \cup \{a,b\})\\ & = r_M(Z) + [r_M(\{a,b\}) - 1] - [ r_{M}( Z \cup \{a,b\}) -1]\\ & = \sqcap_{M}(Z,\{a,b\}). \end{align*} Thus \ref{zztop2} follows immediately from \ref{zztop}. \begin{sublemma} \label{zztop3} Assume $Z \subseteq X_1$ and $\sqcap_{M_c}(Z,\{a,b\}) = 2.$ Then $c \in {\rm cl}_{M_c}(Z).$ \end{sublemma} To see this, note that $$r_{M_c}(Z \cup \{a,b,c\}) = r_{M_c}(Z \cup \{a,b\}) = r_{M_c}(Z) + r_{M_c}(\{a,b\}) - 2 = r_{M_c}(Z) + 1.$$ By submodularity, $$r_{M_c}(E - \{a,b\}) + r_{M_c}(Z \cup \{a,b,c\}) \ge r(M_c) + r_{M_c}(Z \cup c).$$ Thus $$r(M_c) - 1 + r_{M_c}(Z) + 1 \ge r(M_c) + r_{M_c}(Z \cup c).$$ Hence $r_{M_c}(Z) \ge r_{M_c}(Z \cup c)$ and \ref{zztop3} holds. \begin{sublemma} \label{two4} Neither $a$ nor $b$ has a point on it in either $M$ or $M_c$. \end{sublemma} Assume there is a point $e$ on $a$ in $M$. Then $M\backslash e$ is $3$-connected. Moreover, in $(M\backslash b)^{\flat}$, we see that $e$ is parallel to $a$ so $(M\backslash b)^{\flat}\backslash e$, and hence $M\backslash e$, has a c-minor isomorphic to $N$; a contradiction. We conclude that \ref{two4} holds. The next step in the proof of Lemma~\ref{3five} is to show that \begin{sublemma} \label{two5} $M_c\backslash a,b$ is $2$-connected. \end{sublemma} Suppose $(A,B)$ be a 1-separation of $M_c\backslash a,b$ having $c$ in $A$. Then \begin{equation} \label{abc} r_{M_c}(A) + r_{M_c}(B) = r(M_c \backslash a,b) = r(M_c) - 1 = r(M) - 2. \end{equation} Thus \begin{multline*} r_{M_c}(A \cup a) + r_{M_c}(B) - r(M_c)\\ \shoveleft{\hspace*{0.8in}= r_{M_c}(A) + r_{M_c}(\{a\}) - \sqcap_{M_c}(A,\{a\}) + r_{M_c}(B) - r(M_c)}\\ \shoveleft{\hspace*{0.8in}= [r_{M_c}(A) + r_{M_c}(B) - r(M_c) + 1] - 1 + r_{M_c}(\{a\}) - \sqcap_{M_c}(A,\{a\})}\\ \shoveleft{\hspace*{0.635in}= 0 - 1 + 2 - \sqcap_{M_c}(A,\{a\}) = 1 - \sqcap_{M_c}(A,\{a\}).} \end{multline*} If $\sqcap_{M_c}(A,\{a\}) = 1$, then $(A \cup a \cup b, B)$ is a 1-separation of $M_c$ and hence of $M$; a contradiction. We deduce that $\sqcap_{M_c}(A,\{a\}) = 0$ and $(A\cup a \cup b, B)$ is 2-separating in $M_c$ and hence in $M$. Thus $B$ consists of a point, say $d$, of $M$. Moreover, $r_{M_c}(A \cup a) = r(M_c)$. Thus, as $\sqcap_{M_c}(A,\{a\}) = 0$, we see that \begin{equation} \label{abcd} r_{M_c}(A) = r(M_c) - 2. \end{equation} Still working towards proving \ref{two5}, we show next that \begin{sublemma} \label{two6} $\{b,d\}$ is a series pair of points in $(M\backslash a)^{\flat}$. \end{sublemma} Recall that $(M\backslash a)^{\flat} = M_c\backslash a {\underline{\downarrow}\,} b$. Now $$r_{M_c}(\{d,b\}) + r_{M_c}(A) - r(M_c\backslash a) \le 3 + r(M_c) - 2 - r(M_c) = 1.$$ Thus $\{d,b\}$ is 2-separating in $M_c\backslash a$. It follows that it is also 2-separating in $M_c\backslash a {\underline{\downarrow}\,} b$, that is, in $(M\backslash a)^{\flat}$. But $d$ and $b$ are points in $(M\backslash a)^{\flat}$, which is $2$-connected. We deduce by \ref{two4} that \ref{two6} holds. By \ref{two6}, $(M\backslash a)^{\flat}/d$, and hence $M/d$, has a c-minor isomorphic to $N$. Next we show that \begin{sublemma} \label{two7} $(A- c,\{a,b,c\})$ is a $2$-separation of $M\backslash d$. \end{sublemma} By (\ref{abcd}), $r_{M_c}(A-c) \le r(M_c) - 2 = r(M) - 3$ and \ref{two7} follows. It follows from \ref{two7} and Lemma~\ref{newbix} that $M/d$ is $3$-connected\ unless $M$ has a pair $\{e,f\}$ of points such that $e$ and $f$ are parallel in $M/d$. Consider the exceptional case. Then $M$ has $\{d,e,f\}$ as a triangle. Then $\{e,f\} \subseteq A - c$. Thus, by \ref{two7}, $((A- c) \cup d,\{a,b,c\})$ is a $2$-separation of $M$; a contradiction. We conclude that \ref{two5} holds. By \ref{two5}, we deduce that \begin{sublemma} \label{har0} $\lambda_{M_c\backslash a,b}(X_a) = 1 = \lambda_{M_c\backslash a}(X_a)$ and $\lambda_{M_c\backslash a,b}(X_b) = 1 = \lambda_{M_c\backslash b}(X_b)$. \end{sublemma} Since $r(M_c \backslash a,b) = r(M_c\backslash a) - 1$, it follows from \ref{har0} and symmetry that \begin{sublemma} \label{har1} $r_{M_c}(Y_a - b) = r_{M_c}(Y_a) - 1$ and $r_{M_c}(Y_b - a) = r_{M_c}(Y_b) - 1$. \end{sublemma} It follows from this, symmetry, and the fact that $r_M(Y_a \cup c) > r_M(Y_a)$ that \begin{sublemma} \label{har2} $r_{M}(Y_a - b) = r_{M}(Y_a) - 1$ and $r_{M}(Y_b - a) = r_{M}(Y_b) - 1$. \end{sublemma} By uncrossing, \begin{align} \label{subm} 2 & = \lambda_{M_c\backslash a,b}(X_a) + \lambda_{M_c\backslash a,b}(Y_b - a) \nonumber\\ & \ge \lambda_{M_c\backslash a,b}(X_a\cap (Y_b - a)) + \lambda_{M_c\backslash a,b}(X_a \cup (Y_b - a)). \end{align} \begin{sublemma} \label{xeyfnot} $X_a\cap Y_b \neq \emptyset \neq X_b\cap Y_a.$ \end{sublemma}
3,745
142,722
en
train
0.91.42
If $\sqcap_{M_c}(A,\{a\}) = 1$, then $(A \cup a \cup b, B)$ is a 1-separation of $M_c$ and hence of $M$; a contradiction. We deduce that $\sqcap_{M_c}(A,\{a\}) = 0$ and $(A\cup a \cup b, B)$ is 2-separating in $M_c$ and hence in $M$. Thus $B$ consists of a point, say $d$, of $M$. Moreover, $r_{M_c}(A \cup a) = r(M_c)$. Thus, as $\sqcap_{M_c}(A,\{a\}) = 0$, we see that \begin{equation} \label{abcd} r_{M_c}(A) = r(M_c) - 2. \end{equation} Still working towards proving \ref{two5}, we show next that \begin{sublemma} \label{two6} $\{b,d\}$ is a series pair of points in $(M\backslash a)^{\flat}$. \end{sublemma} Recall that $(M\backslash a)^{\flat} = M_c\backslash a {\underline{\downarrow}\,} b$. Now $$r_{M_c}(\{d,b\}) + r_{M_c}(A) - r(M_c\backslash a) \le 3 + r(M_c) - 2 - r(M_c) = 1.$$ Thus $\{d,b\}$ is 2-separating in $M_c\backslash a$. It follows that it is also 2-separating in $M_c\backslash a {\underline{\downarrow}\,} b$, that is, in $(M\backslash a)^{\flat}$. But $d$ and $b$ are points in $(M\backslash a)^{\flat}$, which is $2$-connected. We deduce by \ref{two4} that \ref{two6} holds. By \ref{two6}, $(M\backslash a)^{\flat}/d$, and hence $M/d$, has a c-minor isomorphic to $N$. Next we show that \begin{sublemma} \label{two7} $(A- c,\{a,b,c\})$ is a $2$-separation of $M\backslash d$. \end{sublemma} By (\ref{abcd}), $r_{M_c}(A-c) \le r(M_c) - 2 = r(M) - 3$ and \ref{two7} follows. It follows from \ref{two7} and Lemma~\ref{newbix} that $M/d$ is $3$-connected\ unless $M$ has a pair $\{e,f\}$ of points such that $e$ and $f$ are parallel in $M/d$. Consider the exceptional case. Then $M$ has $\{d,e,f\}$ as a triangle. Then $\{e,f\} \subseteq A - c$. Thus, by \ref{two7}, $((A- c) \cup d,\{a,b,c\})$ is a $2$-separation of $M$; a contradiction. We conclude that \ref{two5} holds. By \ref{two5}, we deduce that \begin{sublemma} \label{har0} $\lambda_{M_c\backslash a,b}(X_a) = 1 = \lambda_{M_c\backslash a}(X_a)$ and $\lambda_{M_c\backslash a,b}(X_b) = 1 = \lambda_{M_c\backslash b}(X_b)$. \end{sublemma} Since $r(M_c \backslash a,b) = r(M_c\backslash a) - 1$, it follows from \ref{har0} and symmetry that \begin{sublemma} \label{har1} $r_{M_c}(Y_a - b) = r_{M_c}(Y_a) - 1$ and $r_{M_c}(Y_b - a) = r_{M_c}(Y_b) - 1$. \end{sublemma} It follows from this, symmetry, and the fact that $r_M(Y_a \cup c) > r_M(Y_a)$ that \begin{sublemma} \label{har2} $r_{M}(Y_a - b) = r_{M}(Y_a) - 1$ and $r_{M}(Y_b - a) = r_{M}(Y_b) - 1$. \end{sublemma} By uncrossing, \begin{align} \label{subm} 2 & = \lambda_{M_c\backslash a,b}(X_a) + \lambda_{M_c\backslash a,b}(Y_b - a) \nonumber\\ & \ge \lambda_{M_c\backslash a,b}(X_a\cap (Y_b - a)) + \lambda_{M_c\backslash a,b}(X_a \cup (Y_b - a)). \end{align} \begin{sublemma} \label{xeyfnot} $X_a\cap Y_b \neq \emptyset \neq X_b\cap Y_a.$ \end{sublemma} Suppose $X_a\cap Y_b = \emptyset$. Then $Y_b - a \subseteq Y_a - b$. Thus, by \ref{har1}, $\sqcap_{M_c}(Y_a - b,\{b\}) = 1 = \sqcap_{M_c}(Y_a - b,\{a\})$. Hence, by \ref{zztop2}, $\sqcap_{M_c}(Y_a - b,\{a,b\}) = 2$. Thus, by \ref{zztop3}, $c \in {\rm cl}_{M_c}(Y_a - b).$ It follows that $(Y_a \cup c, X_a - c)$ is 2-separating in $M_c \backslash a$. Thus $(Y_a \cup c \cup a, X_a - c)$ is 2-separating in $M$. As $M$ is $3$-connected, we deduce that $X_a$ consists of exactly two points, $c$ and $x$, say. If $r_{M_c}(\{x,c\}) = 1$, then, in $M$, we see that $x$ is a point that lies on the line $c$. Thus $M\backslash x$ is $3$-connected. As $(M\backslash a)^{\flat}$ has a c-minor isomorphic to $N$ and has $x$ and $c$ as a parallel pair of points, we deduce that $M\backslash x$ has a c-minor isomorphic to $N$; a contradiction. We conclude that $r_{M_c}(\{x,c\}) = 2$. Thus $\{x\}$ is 1-separating in $M_c$; a contradiction. We deduce that $X_a\cap Y_b \neq \emptyset$ and \ref{xeyfnot} follows by symmetry. We now choose the non-trivial 2-separation $(X_a, Y_a)$ of $M_c \backslash a$ such that $|X_a|$ is a minimum subject to the condition that $b \in Y_a$. Since $X_a\cap Y_b$ and $X_b\cap Y_a$ are both non-empty, we deduce from (\ref{subm}) and symmetry that $$\lambda_{M_c\backslash a,b}(X_a\cap Y_b) = 1 = \lambda_{M_c\backslash a,b}(X_b \cap Y_a).$$ We show next that \begin{sublemma} \label{haz5} $\lambda_{M_c\backslash a}(X_a\cap Y_b) = 1 = \lambda_{M_c\backslash b}(X_b \cap Y_a).$ \end{sublemma} We have $1 = r_{M_c}(X_a \cap Y_b) + r_{M_c}((Y_a - b) \cup X_b) - r(M_c \backslash a,b).$ But $r(M_c \backslash a,b) = r(M_c \backslash a) - 1$ and, by \ref{har1}, $r_{M_c}(Y_a - b) = r_{M_c}(Y_a) - 1$. Hence $r_{M_c}((Y_a - b) \cup X_b) = r_{M_c}(Y_a \cup X_b) - 1$. Thus \ref{haz5} follows by symmetry. By the choice of $X_a$ and the fact that $b$ and $c$ are the only 2-separating lines of $M\backslash a$, we deduce that $X_a \cap Y_b$ consists of a single point, say $w$. \begin{sublemma} \label{haz6} $X_a$ consists of a series pair $\{w,c\}$ in $M_c\backslash a$. \end{sublemma} Suppose $w \notin {\rm cl}_{M_c}(X_a - w)$. Then $( X_a - w, Y_a \cup w)$ violates the choice of $(X_a,Y_a)$ unless $|X_a - w| = 1$. In the exceptional case, $\{w,c\}$ is a series pair in $M_c\backslash a$. Now suppose that $w \in {\rm cl}_{M_c}(X_a - w)$. Then $w \in {\rm cl}_{M_c}(X_b)$. Thus $(X_b \cup w, Y_b - w)$ is a 2-separation of $M_c \backslash b$. But $Y_b - w$ avoids $X_a$ so we have a contradiction\ to \ref{xeyfnot} when we replace $(X_b,Y_b)$ by $(X_b \cup w, Y_b - w)$ unless $Y_b = \{a,w\}$. In the exceptional case, by \ref{har1}, $r(Y_b) = 2$ and we have a contradiction\ to \ref{two4}. We conclude that \ref{haz6} holds. Since $M_c \backslash a$ has $\{w,c\}$ as a series pair. It follows that $M_c \backslash a/w$ has a c-minor isomorphic to $N$. Thus so do $(M\backslash a)^{\flat}/w$ and $M/w$. In $M\backslash a$, we have $\{c,w\}$ and $\{b\}$ as 2-separating sets. Now $w \notin {\rm cl}_{M_c \backslash a}(X_1 - w)$. Hence $r_M(X_1 - w) = r_M(X_1) - 1 = r(M) - 3$. As $r(Y_1) = 4$, we deduce that $(X_1 - w, Y_1)$ is a $2$-separation\ in $M\backslash w$. Thus, by Lemma~\ref{newbix}, $M/w$ is $3$-connected\ unless $M$ has a triangle $T$ of points including $w$. In the exceptional case, $T- w \subseteq X_1 - w$, so $(X_1, Y_1)$ is a $2$-separation\ of $M$. This contradiction\ completes the proof of Lemma~\ref{3five}. \end{proof}
2,862
142,722
en
train
0.91.43
\begin{lemma} \label{3four} $\theta \neq 0$. \end{lemma} \begin{proof} Assume that $\theta= 0$. Thus $\sqcap(X_1,\{y\}) = 0$ for all $y$ in $Y_1$. We may assume that $\sqcap^*(X_1,\{y\}) = 0$ for all $y$ in $Y_1$ otherwise, in $M^*$, we have $\theta \in \{1,2,3\}$. Thus, for all $y$ in $Y_1$, we have $r(Y_1 - y) = r(Y_1) = 4$. Then, by Lemma~\ref{earlier}, none of $M/a$, $M/b$, nor $M/c$ has a c-minor isomorphic to $N$. Hence we may assume that $a$ and $b$ are deleted to get $N$. But, in $M\backslash a,b$, we see that $\{c\}$ is a component, so $c$ can be contracted to get $N$; a contradiction. \end{proof} \begin{proof}[Proof of Lemma~\ref{Step6+}.] By Lemma~\ref{y13}, a minimal non-$N$-$3$-separator $Y_1$ of $M$ having exactly three elements consists of three lines. Above, we looked at the number $\theta$ of members $y$ of $Y_1$ for which $\sqcap(X_1,\{y\}) = 1$. In Lemmas~\ref{3two} and \ref{3three}, we showed that $\theta \neq 2$ and $\theta \neq 1$, while Lemmas~\ref{3five} and \ref{3four} showed that $\theta \neq 3$ and $\theta \neq 0$. There are no remaining possibilities for $\theta$, so Lemma~\ref{Step6+} holds. \end{proof}
459
142,722
en
train
0.91.44
\section{A minimal non-$N$-$3$-separator with at least four elements} \label{fourel} By \ref{Step6}, we may now assume that $M$ has a minimal non-$N$-$3$-separator $Y_1$ having at least four elements. As before, we write $X_1$ for $E(M) - Y_1$. Our next goal is to prove \ref{Step7}, which we restate here for convenience. \begin{lemma} \label{dubya} Let $Y_1$ be a minimal non-$N$-$3$-separating set having at least four elements. Then $Y_1$ contains a doubly labelled element. \end{lemma} \begin{proof} Assume that the lemma fails. For each $e$ in $Y_1 - E(N)$, let $\nu(e)$ be equal to the unique member of $\{\mu(e), \mu^*(e)\}$ that is defined. Choose $\ell$ in $Y_1 - E(N)$ to minimize $\nu(\ell)$. By switching to the dual if necessary, we may suppose that $\nu(\ell) = \mu(\ell)$. Let $(A,B)$ be a $2$-separation\ of $M\backslash \ell$ where $A$ is the $N$-side and $|B| = \mu(\ell)$. We now apply Lemma~\ref{p124}. Part (ii) of that lemma does not hold otherwise, by Lemma~\ref{bubbly}, $Y_1 - \ell$ contains a doubly labelled element. Assume next that (iii) of Lemma~\ref{p124} holds. Then $\lambda_{M\backslash \ell}(Y_1 - \ell) = 2$ and $\lambda_{M\backslash \ell}(A \cap(Y_1 - \ell)) = 1= \lambda_{M\backslash \ell}(B \cap(Y_1 - \ell))$, while $\lambda_{M\backslash \ell}(A \cap X_1) = 2= \lambda_{M\backslash \ell}(B \cap X_1)$. Then using the partitions $(A\cap (Y_1 - \ell), A \cap X_1,B)$ and $(B\cap (Y_1 - \ell), B \cap X_1,A)$ as $(A,B,C)$ in Lemma~\ref{general3}, we deduce that $\sqcap(A \cap (Y_1 - \ell), A\cap X_1) = 1$ and $\sqcap(B \cap (Y_1 - \ell), B\cap X_1) = 1$. Now $M\backslash \ell$ is the 2-sum of $2$-polymatroids $M_A$ and $M_B$ having ground sets $A \cup q$ and $B \cup q$, respectively. Since $M\backslash \ell$ is $2$-connected, it follows by Proposition~\ref{connconn}, that each of $M_A$ and $M_B$ is $2$-connected. Now $\lambda_{M\backslash \ell}(B \cap (Y_1 - \ell))= \sqcap_{M\backslash \ell}(B \cap (Y_1 - \ell), (B \cap X_1) \cup A) = 1$ and $\sqcap_{M_B}(B \cap (Y_1 - \ell), B \cap X_1) = 1$. Noting that $M\backslash \ell = P(M_A,M_B)\backslash q$, we see that, in $P(M_A,M_B)$, we have $\sqcap(B \cap (Y_1 - \ell), (B \cap X_1) \cup A\cup q) = 1$. Hence $\sqcap_{M_B}(B \cap (Y_1 - \ell), (B \cap X_1) \cup q) = 1$. Thus $M_B$ is the 2-sum of two $2$-connected $2$-polymatroids $M_{B,Y}$ and $M_{B,X}$ having ground sets $(B \cap (Y_1 - \ell)) \cup s$ and $(B \cap X_1) \cup q \cup s$. Note that $M_B = P(M_{B,X},M_{B,Y})\backslash s$. Let $M'_B = P(M_{B,X},M_{B,Y})$ and consider $P(M_A,M'_B)$ noting that deleting $q$ and $s$ from this $2$-polymatroid gives $M\backslash \ell$. By Lemma~\ref{oswrules}(ii), $$\sqcap(A,B) + \sqcap(B \cap X_1, B \cap (Y_1 - \ell)) = \sqcap(A \cup (B \cap X_1),B\cap (Y_1 - \ell)) + \sqcap(A, B \cap X_1).$$ Since the first three terms in this equation equal one, \begin{equation} \label{labx} \sqcap(A, B \cap X_1) = 1. \end{equation} We deduce, by Lemma~\ref{claim1}(i) that if $y \in B \cap (Y_1 - \ell)$, then $M\backslash \ell\backslash y$ has a special $N$-minor. Now $M_{B,X}$ has $q$ and $s$ as points. We show next that \begin{sublemma} \label{notzero} $\lambda_{M_{B,X}/ s}(\{q\}) = 0$. \end{sublemma} Assume that $\lambda_{M_{B,X}/ s}(\{q\}) \neq 0$. When we contract $s$ in $M'_B$, the set $B \cap (Y_1 - \ell)$ becomes 1-separating. Moreover, in $M'_B/(B \cap (Y_1 - \ell))$, the element $s$ is a loop, so $M'_B\backslash s/(B \cap (Y_1 - \ell)) = M'_B/s/(B \cap (Y_1 - \ell))$. It follows that $\sqcap_{M\backslash \ell/(B \cap (Y_1 - \ell)}(A, B\cap X_1) = 1$. Hence, by Lemma~\ref{claim1}(ii), if $y \in B \cap (Y_1 - \ell)$, then $M\backslash \ell/ y$ has a special $N$-minor. Thus each $y$ in $B \cap (Y_1 - \ell)$ is doubly labelled. This contradiction\ completes the proof of \ref{notzero}. By \ref{notzero}, $\{q,s\}$ is a parallel pair of points in $M_{B,X}$. From considering $P(M_A,M'_B)$, we deduce that $\lambda_{M\backslash \ell}(B \cap X_1) = 1$. This contradiction\ implies that (iii) of Lemma~\ref{p124} does not hold. It remains to consider when (i) of Lemma~\ref{p124} holds. We now apply Lemma~\ref{p63rev} to get, because of the choice of $\ell$, that $\sqcap(\{y\},X_1) \neq 1$ for some $y$ in $Y_1 - \ell$. Then, by Lemma~\ref{dichotomy}, $\sqcap(Y_1 - y, X_1) = 0$ for all $y$ in $Y_1 - \ell$. Thus, by Lemma~\ref{prep65rev} and the choice of $\ell$, (iii)(b) rather than (iii)(a) of that lemma holds. Then $(X_1 - z, (Y_1 - \ell) \cup z$ is a 2-separation of $M\backslash \ell$ having $X_1 - z$ as the $N$-side. Since $z$ is a point, we have a contradiction\ to Lemma~\ref{key}. \end{proof} The doubly labelled element found in the last lemma will be crucial in completing the proof of Theorem~\ref{modc}. We shall need another preliminary lemma. \begin{lemma} \label{series} Let $\ell$ be a doubly labelled element of $M$. Then $M\backslash \ell$ does not have a series pair of points $\{a,b\}$ such that $r(\{a,b,\ell\}) = 3$. \end{lemma} \begin{proof} Assume that $M\backslash \ell$ does have such a series pair $\{a,b\}$. By Lemma~\ref{Step0}, $\ell$ is a line. Thus $M/\ell$ has $\{a,b\}$ as a parallel pair of points or has $a$ or $b$ as a loop. In each case, $M$ has $a$ or $b$ as a doubly labelled point; a contradiction. \end{proof} We will now take $\ell$ to be a doubly labelled element of $Y_1$, a minimal non-$N$-$3$-separating set having at least four elements. \begin{lemma} \label{p125} There is a $2$-separating set $Q$ in $M\backslash \ell$ such that $Q \subseteq Y_1 - \ell$ and $|Q| \ge 2$ and contains no points. \end{lemma} \begin{proof} Suppose $\ell \not\in {\rm cl}(Y_1 - \ell)$. Then $(X_1, Y_1 - \ell)$ is a $2$-separation\ of $M\backslash \ell$ and $r(Y_1) = r(Y_1 - \ell) + 1$. Then, by Lemma~\ref{series}, $Y_1 - \ell$ does not consist of a series pair of points. Hence, by Lemma~\ref{key}, $Y_1 - \ell$ contains no points so the result holds by taking $Q =Y_1 - \ell$. We may now assume that $\ell \in {\rm cl}(Y_1 - \ell)$. Let $(A,B)$ be a $2$-separation\ of $M\backslash \ell$ where $A$ is the $N$-side and $|B| = \mu(\ell)$. Since $|B| \ge 3$, it follows by Lemma~\ref{key} that $B$ contains no points. If $B \subseteq Y_1 - \ell$, then the lemma holds by taking $Q = B$. Thus we may assume that $B \cap X_1 \neq \emptyset$. Since $X_1$ and $A$ are the $N$-sides of their respective separations and $|E(N)| \ge 4$, we see that $|A \cap X_1| \ge 2$. If $A \subseteq X_1$, then $B \supseteq Y_1 - \ell$, so $(A,B \cup \ell)$ is a $2$-separation\ of $M$; a contradiction. Likewise, if $B \subseteq X_1$, then $(A \cup \ell,B)$ is a $2$-separation\ of $M$; a contradiction. We conclude that $(A,B)$ crosses $(X_1,Y_1 - \ell)$. Since $|A \cap X_1| \ge 2$ and $\ell \in {\rm cl}(Y_1 - \ell)$, it follows that $\lambda_{M\backslash \ell}(A \cap X_1) \ge 2$ otherwise $(A \cap X_1, B \cup Y_1)$ is a $2$-separation\ of $M$; a contradiction. Then, by uncrossing, we deduce that $\lambda_{M\backslash \ell}(B \cap Y_1) \le 1$. Since $B$ contains no points, the lemma holds with $Q = B \cap Y_1$ unless this set contains a single line. Consider the exceptional case. As $|B| \ge 3$, we deduce that $|B \cap X_1| \ge 2$. Now $\lambda_{M\backslash \ell}(B \cap X_1) \ge 2$ otherwise, as $\ell \in {\rm cl}(Y_1 - \ell)$, we obtain the contradiction\ that $(B\cap X_1,A \cup Y_1)$ is a $2$-separation\ of $M$. Hence, by uncrossing, $\lambda_{M\backslash \ell}(A \cap Y_1) = 1$ and, as $|Y_1| \ge 4$, it follows using Lemma~\ref{key} that the lemma holds by taking $Q = A \cap Y_1$. \end{proof} \begin{lemma} \label{ab} The $2$-polymatroid $M\backslash \ell$ has a $2$-separation $(D_1,D_2)$ where $D_2$ has at least two elements, is contained in $Y_1 - \ell$, and contains no points. Moreover, either \begin{itemize} \item[(i)] $D_2 \cup \ell = Y_1$; and $\sqcap(D_1,\{\ell\}) = 0$ and $\sqcap(D_2,\{\ell\}) = 1$; or \item[(ii)] $Y_1 - \ell - D_2 \neq \emptyset$ and $\sqcap(D_1,\{\ell\}) = 0 = \sqcap(D_2,\{\ell\})$. \end{itemize} \end{lemma} \begin{proof} Let $D_2$ be the set $Q$ found in Lemma~\ref{p125} and let $D_1 = E(M\backslash \ell) - D_2$. Now $(D_1,D_2)$ is a $2$-separation\ of $M\backslash \ell$. Thus, there are the following four possibilities. \begin{itemize} \item[(I)] $\sqcap(D_1,\{\ell\}) = 1 = \sqcap(D_2,\{\ell\})$; \item[(II)] $\sqcap(D_1,\{\ell\}) = 1$ and $\sqcap(D_2,\{\ell\}) = 0$; \item[(III)] $\sqcap(D_1,\{\ell\}) = 0$ and $\sqcap(D_2,\{\ell\}) = 1$; and \item[(IV)] $\sqcap(D_1,\{\ell\}) = 0 = \sqcap(D_2,\{\ell\})$. \end{itemize} \begin{sublemma} \label{elim12} Neither (I) nor (II) holds. \end{sublemma} Suppose (I) or (II) holds. Then $\lambda_M(D_1 \cup \ell) = 2$, so $\lambda_M(D_2) =2$ and $|D_2| \ge 2$. Since $D_2$ contains no points and $D_2 \subseteq Y_1 - \ell$, we get a contradiction to the minimality of $Y_1$. Thus \ref{elim12} holds. \begin{sublemma} \label{case3} If (III) holds, then $D_1 \cap Y_1 = \emptyset$. \end{sublemma} As $\lambda_M(D_2 \cup \ell) = 2$, we must have that $D_1 \cap Y_1 = \emptyset$ otherwise $D_2 \cup \ell$ violates the minimality of $Y_1$. Thus \ref{case3} holds. \begin{sublemma} \label{case4} If (IV) holds, then $D_1 \cap Y_1 \neq \emptyset$. \end{sublemma} Suppose $D_1 \cap Y_1 = \emptyset$. Then $D_1 = X_1$ and $D_2 = Y_1 - \ell$. Thus $\lambda_M(X_1) > 2$ as $\sqcap(D_2,\{\ell\}) = 0$. This contradiction\ establishes that \ref{case4} holds and thereby completes the proof of the lemma. \end{proof}
3,951
142,722
en
train
0.91.45
\begin{lemma} \label{ab} The $2$-polymatroid $M\backslash \ell$ has a $2$-separation $(D_1,D_2)$ where $D_2$ has at least two elements, is contained in $Y_1 - \ell$, and contains no points. Moreover, either \begin{itemize} \item[(i)] $D_2 \cup \ell = Y_1$; and $\sqcap(D_1,\{\ell\}) = 0$ and $\sqcap(D_2,\{\ell\}) = 1$; or \item[(ii)] $Y_1 - \ell - D_2 \neq \emptyset$ and $\sqcap(D_1,\{\ell\}) = 0 = \sqcap(D_2,\{\ell\})$. \end{itemize} \end{lemma} \begin{proof} Let $D_2$ be the set $Q$ found in Lemma~\ref{p125} and let $D_1 = E(M\backslash \ell) - D_2$. Now $(D_1,D_2)$ is a $2$-separation\ of $M\backslash \ell$. Thus, there are the following four possibilities. \begin{itemize} \item[(I)] $\sqcap(D_1,\{\ell\}) = 1 = \sqcap(D_2,\{\ell\})$; \item[(II)] $\sqcap(D_1,\{\ell\}) = 1$ and $\sqcap(D_2,\{\ell\}) = 0$; \item[(III)] $\sqcap(D_1,\{\ell\}) = 0$ and $\sqcap(D_2,\{\ell\}) = 1$; and \item[(IV)] $\sqcap(D_1,\{\ell\}) = 0 = \sqcap(D_2,\{\ell\})$. \end{itemize} \begin{sublemma} \label{elim12} Neither (I) nor (II) holds. \end{sublemma} Suppose (I) or (II) holds. Then $\lambda_M(D_1 \cup \ell) = 2$, so $\lambda_M(D_2) =2$ and $|D_2| \ge 2$. Since $D_2$ contains no points and $D_2 \subseteq Y_1 - \ell$, we get a contradiction to the minimality of $Y_1$. Thus \ref{elim12} holds. \begin{sublemma} \label{case3} If (III) holds, then $D_1 \cap Y_1 = \emptyset$. \end{sublemma} As $\lambda_M(D_2 \cup \ell) = 2$, we must have that $D_1 \cap Y_1 = \emptyset$ otherwise $D_2 \cup \ell$ violates the minimality of $Y_1$. Thus \ref{case3} holds. \begin{sublemma} \label{case4} If (IV) holds, then $D_1 \cap Y_1 \neq \emptyset$. \end{sublemma} Suppose $D_1 \cap Y_1 = \emptyset$. Then $D_1 = X_1$ and $D_2 = Y_1 - \ell$. Thus $\lambda_M(X_1) > 2$ as $\sqcap(D_2,\{\ell\}) = 0$. This contradiction\ establishes that \ref{case4} holds and thereby completes the proof of the lemma. \end{proof} \begin{lemma} \label{abdual} The $2$-polymatroid $M/ \ell$ has a $2$-separation $(C_1,C_2)$ where $C_2$ contains at least two elements and is contained in $Y_1 - \ell$, and contains no points of $M$. Moreover, either \begin{itemize} \item[(i)] $C_2 \cup \ell = Y_1$; and $\sqcap(C_1,\{\ell\}) = 1$ and $\sqcap(C_2,\{\ell\}) = 2$; or \item[(ii)] $Y_1 - \ell - C_2 \neq \emptyset$ and $\sqcap(C_1,\{\ell\}) = 2 = \sqcap(C_2,\{\ell\})$. \end{itemize} \end{lemma} \begin{proof} We apply the preceding lemma to $M^*\backslash \ell$ recalling that $(M^*\backslash \ell)^{\flat} = (M/\ell)^*$ and that the connectivity functions of $M^*\backslash \ell$ and $M/\ell$ are equal. Thus $M/ \ell$ does indeed have a $2$-separation $(C_1,C_2)$ where $C_2$ contains at least two elements, is contained in $Y_1 - \ell$, and contains no points of $M^*$. Thus $C_2$ contains no points of $M$. Since $r(E - \ell) = r(M)$, one easily checks that $\sqcap^*(C_i, \{\ell\}) + \sqcap(C_j,\{\ell\}) = 2$ where $\{i,j\} = \{1,2\}$. The lemma now follows from the preceding one. \end{proof} \begin{lemma} \label{bb} The $2$-polymatroids $M\backslash \ell$ and $M/ \ell$ have $2$-separations $(D_1,D_2)$ and $(C_1,C_2)$, respectively, such that each of $D_2$ and $C_2$ contains at least two elements, both $D_2$ and $C_2$ are contained in $Y_1 - \ell$, and neither $D_2$ nor $C_2$ contains any points of $M$. Moreover, $Y_1 - \ell - D_2 \neq \emptyset \neq Y_1 - \ell - C_2$ and $\sqcap(D_1,\{\ell\}) = 0 = \sqcap(D_2,\{\ell\})$ while $\sqcap(C_1,\{\ell\}) = 2 = \sqcap(C_2,\{\ell\})$. \end{lemma} \begin{proof} Assume that (i) of Lemma~\ref{ab} holds. Then, as $D_2 = Y_1 - \ell$, we see that $\sqcap(Y_1 - \ell,\{\ell\}) = 1$. Thus (i) of Lemma~\ref{abdual} cannot hold. Moreover, if (ii) of Lemma~\ref{abdual} holds, then $\sqcap(C_2, \{\ell\}) = 2$. This is a contradiction\ as $\sqcap(D_2,\{\ell\}) = 1$ and $C_2 \subseteq D_2$. We conclude that (ii) of Lemma~\ref{ab} holds. If (i) of Lemma~\ref{abdual} holds, then $\sqcap(X_1, \{\ell\}) = 1$. But $X_1 \subseteq D_1$ and $\sqcap(D_1, \{\ell\}) = 0$; a contradiction. \end{proof} We now use the $2$-separations $(D_1,D_2)$ and $(C_1,C_2)$ of $M\backslash \ell$ and $M/ \ell$, respectively, found in the last lemma. \begin{lemma} \label{linemup} The partitions $(D_1,D_2)$ and $(C_1,C_2)$ have the following properties. \begin{itemize} \item[(i)] $\lambda_{M\backslash \ell}(C_1) = 3 = \lambda_{M\backslash \ell}(C_2)$; \item[(ii)] $(D_1,D_2)$ and $(C_1,C_2)$ cross; \item[(iii)] each of $D_1 \cap C_2, D_2 \cap C_2$, and $D_2 \cap C_1$ consists of a single line, and $C_2 \cup D_2 = Y_1 - \ell$; and \item[(iv)] $\lambda_{M\backslash \ell}(D_1 \cap C_1) = \lambda_{M}(D_1 \cap C_1) = 2.$ \end{itemize} \end{lemma} \begin{proof} We have $\sqcap(D_1,\{\ell\}) = 0 = \sqcap(D_2,\{\ell\})$ and $\sqcap(C_1,\{\ell\}) = 2 = \sqcap(C_2,\{\ell\})$. Thus neither $C_1$ nor $C_2$ is contained in $D_1$ or $D_2$, so (ii) holds. Moreover, as $r(C_1 \cup \ell) + r(C_2 \cup \ell) - r(M) = 3$, we see that (i) holds. To prove (iii) and (iv), we use an uncrossing argument. We have, for each $i$ in $\{1,2\}$, \begin{align*} 1 + 3 & = \lambda_{M\backslash \ell}(D_2) + \lambda_{M\backslash \ell}(C_i)\\ & \ge\lambda_{M\backslash \ell}(D_2\cap C_i) + \lambda_{M\backslash \ell}(D_2 \cup C_i). \end{align*} Since $D_2 \cap C_i \neq \emptyset$ and contains no points and $\ell \in {\rm cl}(C_j)$ where $j \neq i$, we deduce that $\lambda_{M\backslash \ell}(D_2 \cap C_i) = \lambda_{M}(D_2 \cap C_i) \ge 2$. Thus $\lambda_{M\backslash \ell}(D_2 \cup C_i) \le 2.$ Hence, as $\ell \in {\rm cl}(C_i)$, we see that \begin{equation} \label{addno} 2 \ge \lambda_{M\backslash \ell}(D_2 \cup C_i) = \lambda_M(D_2 \cup C_i \cup \ell). \end{equation} But $D_2 \cup C_2 \subseteq Y_1 - \ell$, so, by the definition of $Y_1$, we deduce that \begin{equation} \label{addno2} D_2 \cup C_2 = Y_1 - \ell \text{~and~} D_1 \cap C_1 = X_1. \end{equation} Moreover, as $\lambda_{M\backslash \ell}(D_2 \cup C_i) = 2$, we see that $\lambda_{M\backslash \ell}(D_2 \cap C_i) = 2$. Hence \begin{equation} \label{addno4} \lambda_M(D_2 \cap C_i) = 2. \end{equation} Since $D_1 \cap C_1 = X_1$ and $D_1 \cap C_2$ is non-empty containing no points, it follows from (\ref{addno}) that \begin{equation} \label{addno3} 2 = \lambda_{M\backslash \ell}(D_2 \cup C_i) = \lambda_M(D_2 \cup C_i \cup \ell) = \lambda_{M\backslash \ell}(D_1 \cap C_j) = \lambda_M(D_1 \cap C_j). \end{equation} Thus (iv) holds. By that and (\ref{addno4}), it follows, using the minimality of $Y_1$, that each of $D_1 \cap C_2, D_2 \cap C_2$, and $D_2 \cap C_1$ consist of a single line in $M$. Hence (iii) holds. \end{proof}
2,936
142,722
en
train
0.91.46
We now use the $2$-separations $(D_1,D_2)$ and $(C_1,C_2)$ of $M\backslash \ell$ and $M/ \ell$, respectively, found in the last lemma. \begin{lemma} \label{linemup} The partitions $(D_1,D_2)$ and $(C_1,C_2)$ have the following properties. \begin{itemize} \item[(i)] $\lambda_{M\backslash \ell}(C_1) = 3 = \lambda_{M\backslash \ell}(C_2)$; \item[(ii)] $(D_1,D_2)$ and $(C_1,C_2)$ cross; \item[(iii)] each of $D_1 \cap C_2, D_2 \cap C_2$, and $D_2 \cap C_1$ consists of a single line, and $C_2 \cup D_2 = Y_1 - \ell$; and \item[(iv)] $\lambda_{M\backslash \ell}(D_1 \cap C_1) = \lambda_{M}(D_1 \cap C_1) = 2.$ \end{itemize} \end{lemma} \begin{proof} We have $\sqcap(D_1,\{\ell\}) = 0 = \sqcap(D_2,\{\ell\})$ and $\sqcap(C_1,\{\ell\}) = 2 = \sqcap(C_2,\{\ell\})$. Thus neither $C_1$ nor $C_2$ is contained in $D_1$ or $D_2$, so (ii) holds. Moreover, as $r(C_1 \cup \ell) + r(C_2 \cup \ell) - r(M) = 3$, we see that (i) holds. To prove (iii) and (iv), we use an uncrossing argument. We have, for each $i$ in $\{1,2\}$, \begin{align*} 1 + 3 & = \lambda_{M\backslash \ell}(D_2) + \lambda_{M\backslash \ell}(C_i)\\ & \ge\lambda_{M\backslash \ell}(D_2\cap C_i) + \lambda_{M\backslash \ell}(D_2 \cup C_i). \end{align*} Since $D_2 \cap C_i \neq \emptyset$ and contains no points and $\ell \in {\rm cl}(C_j)$ where $j \neq i$, we deduce that $\lambda_{M\backslash \ell}(D_2 \cap C_i) = \lambda_{M}(D_2 \cap C_i) \ge 2$. Thus $\lambda_{M\backslash \ell}(D_2 \cup C_i) \le 2.$ Hence, as $\ell \in {\rm cl}(C_i)$, we see that \begin{equation} \label{addno} 2 \ge \lambda_{M\backslash \ell}(D_2 \cup C_i) = \lambda_M(D_2 \cup C_i \cup \ell). \end{equation} But $D_2 \cup C_2 \subseteq Y_1 - \ell$, so, by the definition of $Y_1$, we deduce that \begin{equation} \label{addno2} D_2 \cup C_2 = Y_1 - \ell \text{~and~} D_1 \cap C_1 = X_1. \end{equation} Moreover, as $\lambda_{M\backslash \ell}(D_2 \cup C_i) = 2$, we see that $\lambda_{M\backslash \ell}(D_2 \cap C_i) = 2$. Hence \begin{equation} \label{addno4} \lambda_M(D_2 \cap C_i) = 2. \end{equation} Since $D_1 \cap C_1 = X_1$ and $D_1 \cap C_2$ is non-empty containing no points, it follows from (\ref{addno}) that \begin{equation} \label{addno3} 2 = \lambda_{M\backslash \ell}(D_2 \cup C_i) = \lambda_M(D_2 \cup C_i \cup \ell) = \lambda_{M\backslash \ell}(D_1 \cap C_j) = \lambda_M(D_1 \cap C_j). \end{equation} Thus (iv) holds. By that and (\ref{addno4}), it follows, using the minimality of $Y_1$, that each of $D_1 \cap C_2, D_2 \cap C_2$, and $D_2 \cap C_1$ consist of a single line in $M$. Hence (iii) holds. \end{proof} For each $(i,j)$ in $\{(1,2),(2,2),(2,1)\}$, let $C_i \cap D_j = \{\ell_{ij}\}$. \begin{lemma} \label{ranks} The following hold. \begin{itemize} \item[(i)] $r(D_2) = 3$ so $r(D_1) = r(M) - 2$; \item[(ii)] $r(D_1) = r(X_1) + 1$; \item[(iii)] $r(Y_1) = 5$; and \item[(iv)] $r(C_2) = 4$. \end{itemize} \end{lemma} \begin{proof} Now $D_2$ consists of two lines, $\ell_{12}$ and $\ell_{22}$. Suppose first that $r(D_2) = 2$. Then both $M\backslash \ell_{12}$ and $M\backslash \ell_{22}$ are $3$-connected. Without loss of generality, $M/ \ell_{12}$ has $N$ as a c-minor. But $M/ \ell_{12}$ has $\ell_{22}$ as a loop, so $M\backslash \ell_{22}$ is $3$-connected\ having a c-minor isomorphic to $N$. Thus $r(D_2) \ge 3$. Now suppose that $r(D_2) = 4$. Then $r(D_1) = r(M) - 3$. Clearly $r(D_1 \cup \ell_{12}) \le r(M) - 1$. Now $D_1 \cup \ell_{12} \supseteq C_2$ so $r(D_1 \cup \ell_{12} \cup \ell) \le r(M) - 1$. Hence $\{\ell_{22}\}$ is 2-separating in $M$; a contradiction. Hence (i) holds. Since $C_2 \cup D_2 = Y_1 - \ell$, we see that $D_1 = X_1 \cup \ell_{21}$. Suppose $r(D_1) = r(X_1)$. As $X_1 \subseteq C_1$, we deduce that $\ell_{21} \in {\rm cl}(C_1)$. But $\ell \in {\rm cl}(C_1)$. Hence \begin{align*} r(M) + 3 & = r(C_1) + r(C_2)\\ & =r(C_1 \cup \ell) + r(C_2 \cup \ell)\\ & = r(C_1 \cup \ell\cup \ell_{21}) + r(C_2 \cup \ell)\\ & \ge r(C_1 \cup C_2 \cup \ell) + r(\{\ell,\ell_{21}\}). \end{align*} Thus $r(\{\ell,\ell_{21}\}) \le 3$, so $\sqcap(D_1,\{\ell\}) \ge 1$; a contradiction. Hence $r(D_1) \ge r(X_1) + 1$. Suppose $r(D_1) = r(X_1) + 2.$ Then $$r(M) + 1 = r(D_1) + r(D_2) = r(X_1) + 2 + 3,$$ so $r(X_1) = r(M) - 4$. Thus $r(Y_1) = 6$. Now $r(C_2) = r(C_2 \cup \ell)$. Thus $6 = r(Y_1) = r(Y_1 - \ell)$. Since $Y_1 - \ell$ consists of three lines, two of which are in $D_2$, we deduce that $r(D_2) = 4$; a contradiction\ to (i). We conclude that $r(D_1) = r(X_1) + 1$, that is, (ii) holds. Finally, as $r(D_2) = 3$, we see that $r(M) = r(D_1) + r(D_2) - 1 = [r(X_1) +1] + 3 - 1$. But $r(M) = r(X_1) + r(Y_1) - 2$. Thus $r(Y_1) = 5$, so (iii) holds. Moreover, $r(Y_1 - \ell) = 5$, that is, $r(C_2 \cup D_2) = 5.$ Now $C_2$ consists of two lines so $r(C_2) \le 4$. Thus \begin{align*} 4 + 3 & \ge r(C_2) + 3\\ & = r(C_2) + r(D_2)\\ & \ge r(C_2 \cup D_2) + r(C_2 \cap D_2)\\ & = 5 + r(\{\ell_{22}\})\\ & = 5 + 2. \end{align*} We deduce that $r(C_2) = 4$ so (iv) holds. \end{proof}
2,444
142,722
en
train
0.91.47
For each $(i,j)$ in $\{(1,2),(2,2),(2,1)\}$, let $C_i \cap D_j = \{\ell_{ij}\}$. \begin{lemma} \label{ranks} The following hold. \begin{itemize} \item[(i)] $r(D_2) = 3$ so $r(D_1) = r(M) - 2$; \item[(ii)] $r(D_1) = r(X_1) + 1$; \item[(iii)] $r(Y_1) = 5$; and \item[(iv)] $r(C_2) = 4$. \end{itemize} \end{lemma} \begin{proof} Now $D_2$ consists of two lines, $\ell_{12}$ and $\ell_{22}$. Suppose first that $r(D_2) = 2$. Then both $M\backslash \ell_{12}$ and $M\backslash \ell_{22}$ are $3$-connected. Without loss of generality, $M/ \ell_{12}$ has $N$ as a c-minor. But $M/ \ell_{12}$ has $\ell_{22}$ as a loop, so $M\backslash \ell_{22}$ is $3$-connected\ having a c-minor isomorphic to $N$. Thus $r(D_2) \ge 3$. Now suppose that $r(D_2) = 4$. Then $r(D_1) = r(M) - 3$. Clearly $r(D_1 \cup \ell_{12}) \le r(M) - 1$. Now $D_1 \cup \ell_{12} \supseteq C_2$ so $r(D_1 \cup \ell_{12} \cup \ell) \le r(M) - 1$. Hence $\{\ell_{22}\}$ is 2-separating in $M$; a contradiction. Hence (i) holds. Since $C_2 \cup D_2 = Y_1 - \ell$, we see that $D_1 = X_1 \cup \ell_{21}$. Suppose $r(D_1) = r(X_1)$. As $X_1 \subseteq C_1$, we deduce that $\ell_{21} \in {\rm cl}(C_1)$. But $\ell \in {\rm cl}(C_1)$. Hence \begin{align*} r(M) + 3 & = r(C_1) + r(C_2)\\ & =r(C_1 \cup \ell) + r(C_2 \cup \ell)\\ & = r(C_1 \cup \ell\cup \ell_{21}) + r(C_2 \cup \ell)\\ & \ge r(C_1 \cup C_2 \cup \ell) + r(\{\ell,\ell_{21}\}). \end{align*} Thus $r(\{\ell,\ell_{21}\}) \le 3$, so $\sqcap(D_1,\{\ell\}) \ge 1$; a contradiction. Hence $r(D_1) \ge r(X_1) + 1$. Suppose $r(D_1) = r(X_1) + 2.$ Then $$r(M) + 1 = r(D_1) + r(D_2) = r(X_1) + 2 + 3,$$ so $r(X_1) = r(M) - 4$. Thus $r(Y_1) = 6$. Now $r(C_2) = r(C_2 \cup \ell)$. Thus $6 = r(Y_1) = r(Y_1 - \ell)$. Since $Y_1 - \ell$ consists of three lines, two of which are in $D_2$, we deduce that $r(D_2) = 4$; a contradiction\ to (i). We conclude that $r(D_1) = r(X_1) + 1$, that is, (ii) holds. Finally, as $r(D_2) = 3$, we see that $r(M) = r(D_1) + r(D_2) - 1 = [r(X_1) +1] + 3 - 1$. But $r(M) = r(X_1) + r(Y_1) - 2$. Thus $r(Y_1) = 5$, so (iii) holds. Moreover, $r(Y_1 - \ell) = 5$, that is, $r(C_2 \cup D_2) = 5.$ Now $C_2$ consists of two lines so $r(C_2) \le 4$. Thus \begin{align*} 4 + 3 & \ge r(C_2) + 3\\ & = r(C_2) + r(D_2)\\ & \ge r(C_2 \cup D_2) + r(C_2 \cap D_2)\\ & = 5 + r(\{\ell_{22}\})\\ & = 5 + 2. \end{align*} We deduce that $r(C_2) = 4$ so (iv) holds. \end{proof} By proving the following lemma, we will establish the final contradiction\ that completes the proof of Theorem~\ref{modc}. \begin{lemma} \label{final} The $2$-polymatroid $M/ \ell_{22}$ is $3$-connected having a c-minor isomorphic to $N$. \end{lemma} \begin{proof} First we show the following. \begin{sublemma} \label{finalfirst} $\sqcap(D_1,\{\ell_{i2}\}) = 0$ for each $i$ in $\{1,2\}$. \end{sublemma} Suppose $\sqcap(D_1,\{\ell_{i2}\}) \ge 1$. Then $r(D_1 \cup \ell_{i2}) \le r(D_1) + 1$. But $\ell \in {\rm cl}(C_i) \subseteq {\rm cl}(D_1 \cup \ell_{i2})$, so $r(D_1 \cup \ell \cup \ell_{i2}) \le r(D_1) + 1$. Also $r(\{\ell_{j2}\}) = 2$ where $\{i,j\} = \{1,2\}$. Thus \begin{align*} r(D_1 \cup \ell \cup \ell_{i2})+ r(\{\ell_{j2}\}) & \le r(D_1) + 1 + 2\\ & = r(M) - 2 + 1 +2\\ & = r(M) + 1. \end{align*} Hence $\{\ell_{j2}\}$ is 2-separating in $M$; a contradiction. Hence \ref{finalfirst} holds. Now $M\backslash \ell$ has a c-minor isomorphic to $N$ and $\sqcap(D_1,D_2) = 1$. As $\sqcap(D_1,\{\ell_{i2}\}) = 0$, Lemma~\ref{obs1} implies that $\sqcap_{M/\ell_{i2}}(D_1,D_2 - \ell_{i2}) = 1$ for each $i$ in $\{1,2\}$. Thus, by Lemma~\ref{claim1}(ii), \begin{sublemma} \label{finalsecond} $M\backslash \ell/\ell_{i2}$ has a c-minor isomorphic to $N$ for each $i$ in $\{1,2\}$. \end{sublemma} It remains to show that $M/\ell_{22}$ is $3$-connected. This matroid is certainly $2$-connected. Next we show that \begin{sublemma} \label{end1} $\ell$ and $\ell_{21}$ are parallel lines in $M/\ell_{22}$. \end{sublemma} To see this, note that, by Lemma~\ref{ranks}(iv), $$r(C_2 \cup \ell) = r(C_2) = r(\{\ell_{21},\ell_{22}\}.$$ Also, for each $i$ in $\{1,2\}$, we have $\sqcap(\{\ell\}, \{\ell_{2i}\}) \le \sqcap(\{\ell\},D_i) = 0$, so \ref{end1} holds. Now take a fixed c-minor of $M\backslash \ell/\ell_{22}$ isomorphic to $N$; call it $N_1$. Let $(A' \cup \ell, B')$ be a $2$-separation\ of $M/\ell_{22}$ in which the non-$N_1$-side has maximum size and $\ell \not\in A'$. By Lemma~\ref{dualmu}, both $A' \cup \ell$ and $B'$ have at least three elements. \begin{sublemma} \label{ell12a'} $\ell_{21} \in A'$. \end{sublemma} To see this, note that, since $\ell$ and $\ell_{21}$ are parallel lines in $M/\ell_{22}$, if $\ell_{21} \in B'$, then $\ell \in {\rm cl}_{M/\ell_{22}}(B')$, so $\sqcap_{M/\ell_{22}}(A' \cup \ell, B') \ge 2$; a contradiction. \begin{sublemma} \label{ranksagain} $r_M(D_1 \cap C_1) = r(M) -3= r_{M/\ell_{22}}(D_1 \cap C_1)$. \end{sublemma} By Lemma~\ref{linemup}(iii), $D_1 \cap C_1 = X_1$. By Lemma~\ref{ranks}(i) and (ii), $r(D_1) = r(M) - 2$ and $r(D_1) = r(D_1 \cap C_1) + 1$, so $r_M(D_1 \cap C_1) = r(M) -3$. By \ref{finalfirst}, $\sqcap(D_1 \cap C_1, \{\ell_{22}\}) = 0$, so $r_M(D_1 \cap C_1) = r_{M/\ell_{22}}(D_1 \cap C_1)$. \begin{sublemma} \label{ranksagain2} $r_{M/\ell_{22}}((D_1 \cap C_1)\cup \ell_{12})= r(M) -2$. \end{sublemma} To see this, observe that \begin{align*} r_{M/\ell_{22}}((D_1 \cap C_1)\cup \ell_{12}) & = r((D_1 \cap C_1)\cup \ell_{12}\cup \ell_{22}) - 2\\ & = r((D_1 \cap C_1)\cup \ell_{12}\cup \ell \cup \ell_{22}) - 2 \text{~~as $\ell \in {\rm cl}(C_1)$;}\\ & = r(M\backslash \ell_{21}) - 2\\ &= r(M) - 2. \end{align*} By combining \ref{ranksagain} and \ref{ranksagain2}, we deduce that \begin{sublemma} \label{ell12not} $r_{M/\ell_{22}}(D_1 \cap C_1) = r_{M/\ell_{22}}((D_1 \cap C_1)\cup \ell_{12}) - 1$. \end{sublemma} Next we show that \begin{sublemma} \label{onetime} $\lambda_{M/\ell_{22}}(\{\ell_{12}, \ell_{21},\ell\}) = 1$ or $\lambda_{M/\ell_{22}}(\{\ell_{21},\ell\}) = 1$. \end{sublemma} Recall that $X_1 = D_1 \cap C_1$ and $Y_1 = \{\ell,\ell_{12},\ell_{21},\ell_{22}\}$. By uncrossing, we have \begin{align*} 1 + 2 & = \lambda_{M/\ell_{22}}(B') + \lambda_{M/\ell_{22}}(X_1)\\ & \ge \lambda_{M/\ell_{22}}(B' \cup X_1) + \lambda_{M/\ell_{22}}(B' \cap X_1). \end{align*} As $|B'| \ge 3$, it follows by \ref{ell12a'} that $|B'\cap X_1| \ge 2$. Suppose $\lambda_{M/\ell_{22}}(B' \cap X_1) = 1$. Now $B'\cap X_1 \subseteq D_1$, so $\sqcap(B' \cap X_1,\{\ell_{22}\}) \le \sqcap(D_1,\{\ell_{22}\}) \le 0$. Thus, by Lemma~\ref{obs1}, $ \lambda_{M}(B' \cap X_1) = 1$. This contradiction\ implies that $\lambda_{M/\ell_{22}}(B' \cap X_1) = 2$, so $1 = \lambda_{M/\ell_{22}}(B' \cup X_1)$. Now, by \ref{ell12a'}, $\ell_{21} \in A'$, so $E- \ell - (B' \cup X_1)$ is $\{\ell_{12}, \ell_{21},\ell\}$ or $\{\ell_{21},\ell\}$. Thus \ref{onetime} holds. Suppose $\lambda_{M/\ell_{22}}(\{\ell_{12}, \ell_{21},\ell\}) = 1$. Then, as $\ell_{22}$ is skew to $X_1$ in $M$, we deduce that $\lambda_{M}(\{\ell_{12}, \ell_{21},\ell, \ell_{22}\}) = 1$, that is, $\lambda_{M}(Y_1) = 1$; a contradiction. We conclude that $\lambda_{M/\ell_{22}}(\{\ell_{21},\ell\}) = 1$. By Lemma~\ref{obs1}, as $\lambda_{M}(D_1 \cap C_1) = 2$ and $\sqcap(D_1 \cap C_1, \{\ell_{22}\}) = 0$, we see that $\lambda_{M/\ell_{22}}(D_1 \cap C_1) = 2$. Thus, by \ref{ell12not} and Lemma~\ref{ranks}, \begin{align*} 2 & = r_{M/\ell_{22}}(D_1 \cap C_1) + r_{M/\ell_{22}}(\{\ell_{12},\ell_{21},\ell\}) - r(M/\ell_{22})\\ & = [r_{M/\ell_{22}}(D_1 \cap C_1) + 1] + [r_{M/\ell_{22}}(\{\ell_{12},\ell_{21},\ell\}) - 1] - r(M/\ell_{22})\\ & = r_{M/\ell_{22}}((D_1 \cap C_1) \cup \ell_{12}) + [r(Y_1) - 2 - 1] - r(M/\ell_{22})\\ & = r_{M/\ell_{22}}((D_1 \cap C_1) \cup \ell_{12}) + [r(C_2) - 2] - r(M/\ell_{22})\\ & = r_{M/\ell_{22}}((D_1 \cap C_1) \cup \ell_{12}) + r_{M/\ell_{22}}(\{\ell_{21},\ell\}) - r(M/\ell_{22})\\ & = \lambda_{M/\ell_{22}}(\{\ell_{21},\ell\}). \end{align*} This contradiction\ to \ref{onetime} completes the proof of the lemma and thereby finishes the proof of Theorem~\ref{modc}. \end{proof} \end{document}
3,989
142,722
en
train
0.92.0
\begin{document} \title{On a series of finite automata\defining free transformation groups} \begin{abstract} We introduce two series of finite automata starting from the so-called Aleshin and Bellaterra automata. We prove that each automaton in the first series defines a free non-Abelian group while each automaton in the second series defines the free product of groups of order $2$. Furthermore, these properties are shared by disjoint unions of any number of distinct automata from either series. \end{abstract}
129
40,263
en
train
0.92.1
\section{Introduction}\lambdabel{main} A (Mealy) automaton over a finite alphabet $X$ is determined by the set of internal states, the state transition function and the output function. A finite (or finite-state) automaton has finitely many internal states. An initial automaton has a distinguished initial state. Any initial automaton over $X$ defines a transformation $T$ of the set $X^*$ of finite words in the alphabet $X$. That is, the automaton transduces any input word $w\in X^*$ into the output word $T(w)$. The transformation $T$ preserves the lengths of words and common beginnings. The set $X^*$ is endowed with the structure of a regular rooted tree so that $T$ is an endomorphism of the tree. A detailed account of the theory of Mealy automata is given in \cite{GNS}. The set of all endomorphisms of the regular rooted tree $X^*$ is of continuum cardinality. Any endomorphism can be defined by an automaton. However the most interesting are finite automaton transformations that constitute a countable subset. If $T_1$ and $T_2$ are mappings defined by finite initial automata over the same alphabet $X$, then their composition is also defined by a finite automaton over $X$. If a finite automaton transformation $T$ is invertible, then the inverse transformation is also defined by a finite automaton. Furthermore, there are simple algorithms to construct the corresponding composition automaton and inverse automaton. In particular, all invertible transformations defined by finite automata over $X$ constitute a transformation group $\mathcal{G}(X)$. This fact was probably first observed by Ho\v{r}ej\v{s} \cite{H}. A finite non-initial automaton $A$ over an alphabet $X$ defines a finite collection of transformations of $X^*$ corresponding to various choices of the initial state. Assuming all of them are invertible, these transformations generate a group $G(A)$, which is a finitely generated subgroup of $\mathcal{G}(X)$. We say that the group $G(A)$ is defined by the automaton $A$. The groups defined by finite automata were introduced by Grigorchuk \cite{G} in connection with the Grigorchuk group of intermediate growth. The finite automaton nature of this group has great impact on its properties. The formalization of these properties has resulted in the notions of a branch group (see \cite{BGS}), a fractal group (see \cite{BGN}), and, finally, the most general notion of a self-similar group \cite{N}, which covers all automaton groups. The main issue of this paper are free non-Abelian groups of finite automaton transformations. Also, we are interested in the free products of groups of order $2$ (such a product contains a free subgroup of index $2$). Brunner and Sidki \cite{BS} proved that the free group embeds into the group of finite automaton transformations over a $4$-letter alphabet. Olijnyk \cite{O1}, \cite{O2} showed that the group of finite automaton transformations over a $2$-letter alphabet contains a free group as well as free products of groups of order $2$. In the above examples, all automata are of linear algebraic origin. A harder problem is to present the free group as the group defined by a single finite non-initial automaton. This problem was solved by Glasner and Mozes \cite{GM}. They constructed infinitely many finite automata of algebraic origin that define transformation groups with various properties, in particular, free groups. A finite automaton that defines the free product of $3$ groups of order $2$ was found by Muntyan and Savchuk (see \cite{N} and Theorem \ref{main4} below). Actually, the first attempt to embed the free non-Abelian group into a group of finite automaton transformations was made by Aleshin \cite{A} a long ago. He introduced two finite initial automata over alphabet $\{0,1\}$ and claimed that two automorphisms of the rooted binary tree $\{0,1\}^*$ defined by these automata generate a free group. However the argument in \cite{A} seems to be incomplete. Aleshin's automata are depicted in Figure \ref{fig1} by means of Moore diagrams. The Moore diagram of an automaton is a directed graph with labeled edges. The vertices are the states of the automaton and edges are state transition routes. Each label consists of two letters from the alphabet. The left one is the input field, it is used to choose a transition route. The right one is the output generated by the automaton. Aleshin considered these automata as initial, with initial state $b$. \begin{figure} \caption{ \lambdabel{fig1} \end{figure} The Aleshin automata are examples of bi-reversible automata. This notion, which generalizes the notion of invertibility, was introduced in \cite{MNS} (see also \cite{GM}). The class of bi-reversible automata is in a sense opposite to the class of automata defining branch groups. All automata considered in this paper are bi-reversible. In this paper, we are looking for finite automata that define free non-Abelian groups of maximal rank, i.e., the free rank of the group is equal to the number of states of the automaton. Note that the automata constructed by Glasner and Mozes do not enjoy this property. For any of those automata, the transformations assigned to various internal states form a symmetric generating set so that the free rank of the group is half of the number of the states. Brunner and Sidki conjectured (see \cite{S}) that the first of two Aleshin's automata shown in Figure \ref{fig1} is the required one. The conjecture was proved in \cite{VV}. \begin{theorem}[\cite{VV}]\lambdabel{main1} The first Aleshin automaton defines a free group on $3$ generators. \end{theorem} In this paper we generalize and extend Theorem \ref{main1} in several directions. The two automata of Aleshin are related as follows. When the first automaton is in the state $c$, it is going to make transition to the state $a$ independently of the next input letter, which is sent directly to the output. The second automaton is obtained from the first one by inserting two additional states on the route from $c$ to $a$ (see Figure \ref{fig1}). For any integer $n\ge1$ we define a $(2n+1)$-state automaton $A^{(n)}$ of Aleshin type. Up to renaming of internal states, $A^{(n)}$ is obtained from the first Aleshin automaton by inserting $2n-2$ additional states on the route from $c$ to $a$ (for a precise definition, see Section \ref{series}); in particular, $A^{(1)}$ and $A^{(2)}$ are the Aleshin automata. The Moore diagram of the automaton $A^{(3)}$ is depicted in Figure \ref{fig6} below. Note that the number of internal states of an Aleshin type automaton is always odd. This is crucial for the proof of the following theorem. \begin{theorem}\lambdabel{main2} For any $n\ge1$ the automaton $A^{(n)}$ defines a free group on $2n+1$ generators. \end{theorem} Given a finite number of automata $Y^{(1)},\dots,Y^{(k)}$ over the same alphabet with disjoint sets of internal states $S_1,\dots,S_k$, we can regard them as a single automaton $Y$ with the set of internal states $S_1\cup\dots\cup S_k$. The automaton $Y$ is called the disjoint union of the automata $Y^{(1)},\dots,Y^{(k)}$ as its Moore diagram is the disjoint union of the Moore diagrams of $Y^{(1)},\dots,Y^{(k)}$. The group defined by $Y$ is generated by the groups $G(Y^{(1)}),\dots,G(Y^{(k)})$. We define the Aleshin type automata so that their sets of internal states are disjoint. Hence the disjoint union of any finite number of distinct automata of Aleshin type is well defined. \begin{theorem}\lambdabel{main3} Let $N$ be a nonempty set of positive integers and denote by $A^{(N)}$ the disjoint union of automata $A^{(n)}$, $n\in N$. Then the automaton $A^{(N)}$ defines a free group on $\sum_{n\in N}(2n+1)$ generators. \end{theorem} One consequence of Theorem \ref{main3} is that the $8$ transformations defined by the two Aleshin automata generate a free group on $8$ generators. In particular, any two of them generate a free non-Abelian group. Thus Aleshin's claim is finally justified. \begin{figure} \caption{ \lambdabel{fig2} \end{figure} The Bellaterra automaton $B$ is a $3$-state automaton over a $2$-letter alphabet. Its Moore diagram is depicted in Figure \ref{fig2}. The automaton $B$ coincides with its inverse automaton and hence all $3$ transformations defined by $B$ are involutions. Otherwise there are no more relations in the group $G(B)$. \begin{theorem}[\cite{N}]\lambdabel{main4} The Bellaterra automaton defines the free product of $3$ groups of order $2$. \end{theorem} Theorem \ref{main4} is due to Muntyan and Savchuk. It was proved during the 2004 summer school on automata groups at the Autonomous University of Barcelona and so the automaton $B$ was named after the location of the university. The Bellaterra automaton $B$ is closely related to the Aleshin automaton $A$. Namely, the two automata share the alphabet, internal states, and the state transition function while their output functions never coincide. We use this relation to define a series $B^{(1)},B^{(2)},\dots$ of automata of Bellaterra type. By definition, $B^{(n)}$ is a $(2n+1)$-state automaton obtained from $A^{(n)}$ by changing values of the output function at all elements of its domain. Also, we define a one-state automaton $B^{(0)}$ that interchanges letters $0$ and $1$ of the alphabet. All transformations defined by a Bellaterra type automaton are involutions. \begin{theorem}\lambdabel{main5} For any $n\ge0$ the automaton $B^{(n)}$ defines the free product of $2n+1$ groups of order $2$. \end{theorem} \begin{theorem}\lambdabel{main6} Let $N$ be a nonempty set of nonnegative integers and denote by $B^{(N)}$ the disjoint union of automata $B^{(n)}$, $n\in N$. Then the automaton $B^{(N)}$ defines the free product of $\sum_{n\in N}(2n+1)$ groups of order $2$. \end{theorem} Theorems \ref{main3} and \ref{main6} have the following obvious corollary. \begin{corollary}\lambdabel{main7} (i) Let $n$ be an integer such that $n=3$ or $n=5$ or $n\ge7$. Then there exists an $n$-state automaton over alphabet $\{0,1\}$ that define a free transformation group on $n$ generators. (ii) For any integer $n\ge3$ there exists an $n$-state automaton over alphabet $\{0,1\}$ that define a transformation group freely generated by $n$ involutions. \end{corollary} We prove Theorems \ref{main1}, \ref{main2}, and \ref{main3} using the dual automaton approach. Namely, each finite automaton $Y$ is assigned a dual automaton $Y'$ obtained from $Y$ by interchanging the alphabet with the set of internal states and the state transition function with the output function. It turns out that there is a connection between transformation groups defined by $Y$ and $Y'$. As intermediate results, we obtain some information on the dual automata of the Aleshin type automata. \begin{proposition}\lambdabel{main8} (i) The dual automaton of the Aleshin automaton defines a group that acts transitively on each level of the rooted ternary tree $\{a,b,c\}^*$. (ii) For any $n\ge1$ the dual automaton of $A^{(n)}$ defines a group that acts transitively on each level of the rooted $(2n+1)$-regular tree $Q_n^*$. \end{proposition} The proof of Theorem \ref{main4} given in \cite{N} also relies on the dual automaton approach. In particular, it involves a statement on the dual automaton $\widehat D$ of $B$. Since the group $G(B)$ is generated by involutions, it follows that the set of double letter words over the alphabet $\{a,b,c\}$ is invariant under the action of the group $G(\widehat D)$. Hence $G(\widehat D)$ does not act transitively on levels of the rooted tree $\{a,b,c\}$. \begin{proposition}[\cite{N}]\lambdabel{main9} The dual automaton of the Bellaterra automaton defines a transformation group that acts transitively on each level of the rooted subtree of $\{a,b,c\}^*$ formed by no-double-letter words. \end{proposition} We derive Theorems \ref{main4}, \ref{main5}, and \ref{main6} from Theorem \ref{main3}. This does not involve dual automata. Nonetheless we obtain a new proof of Proposition \ref{main9} that also works for all Bellaterra type automata. \begin{proposition}\lambdabel{main10} For any $n\ge1$ the dual automaton of $B^{(n)}$ defines a group that acts transitively on each level of the rooted subtree of $Q_n^*$ formed by no-double-letter words. \end{proposition} Finally, we establish relations between groups defined by automata of Aleshin type and of Bellaterra type. \begin{proposition}\lambdabel{main11} (i) The group $G(A)$ is an index $2$ subgroup of $G(B^{(\{0,1\})})$; (ii) for any $n\ge1$ the group $G(A^{(n)})$ is an index $2$ subgroup of $G(B^{(\{0,n\})})$; (iii) for any nonempty set $N$ of positive integers the group $G(A^{(N)})$ is an index $2$ subgroup of $G(B^{(N\cup\{0\})})$. \end{proposition} \begin{proposition}\lambdabel{main12} (i) $G(A)\cap G(B)$ is a free group on $2$ generators and an index $2$ subgroup of $G(B)$. (ii) For any $n\ge1$, $G(A^{(n)})\cap G(B^{(n)})$ is a free group on $2n$ generators and an index $2$ subgroup of $G(B^{(n)})$. (ii) For any nonempty set $N$ of positive integers, $G(A^{(N)})\cap G(B^{(N)})$ is an index $2$ subgroup of $G(B^{(N)})$. Also, $G(A^{(N)})\cap G(B^{(N)})$ is a free group of rank less by $1$ than the free rank of $G(A^{(N)})$. \end{proposition}
4,011
40,263
en
train
0.92.2
The Bellaterra automaton $B$ is closely related to the Aleshin automaton $A$. Namely, the two automata share the alphabet, internal states, and the state transition function while their output functions never coincide. We use this relation to define a series $B^{(1)},B^{(2)},\dots$ of automata of Bellaterra type. By definition, $B^{(n)}$ is a $(2n+1)$-state automaton obtained from $A^{(n)}$ by changing values of the output function at all elements of its domain. Also, we define a one-state automaton $B^{(0)}$ that interchanges letters $0$ and $1$ of the alphabet. All transformations defined by a Bellaterra type automaton are involutions. \begin{theorem}\lambdabel{main5} For any $n\ge0$ the automaton $B^{(n)}$ defines the free product of $2n+1$ groups of order $2$. \end{theorem} \begin{theorem}\lambdabel{main6} Let $N$ be a nonempty set of nonnegative integers and denote by $B^{(N)}$ the disjoint union of automata $B^{(n)}$, $n\in N$. Then the automaton $B^{(N)}$ defines the free product of $\sum_{n\in N}(2n+1)$ groups of order $2$. \end{theorem} Theorems \ref{main3} and \ref{main6} have the following obvious corollary. \begin{corollary}\lambdabel{main7} (i) Let $n$ be an integer such that $n=3$ or $n=5$ or $n\ge7$. Then there exists an $n$-state automaton over alphabet $\{0,1\}$ that define a free transformation group on $n$ generators. (ii) For any integer $n\ge3$ there exists an $n$-state automaton over alphabet $\{0,1\}$ that define a transformation group freely generated by $n$ involutions. \end{corollary} We prove Theorems \ref{main1}, \ref{main2}, and \ref{main3} using the dual automaton approach. Namely, each finite automaton $Y$ is assigned a dual automaton $Y'$ obtained from $Y$ by interchanging the alphabet with the set of internal states and the state transition function with the output function. It turns out that there is a connection between transformation groups defined by $Y$ and $Y'$. As intermediate results, we obtain some information on the dual automata of the Aleshin type automata. \begin{proposition}\lambdabel{main8} (i) The dual automaton of the Aleshin automaton defines a group that acts transitively on each level of the rooted ternary tree $\{a,b,c\}^*$. (ii) For any $n\ge1$ the dual automaton of $A^{(n)}$ defines a group that acts transitively on each level of the rooted $(2n+1)$-regular tree $Q_n^*$. \end{proposition} The proof of Theorem \ref{main4} given in \cite{N} also relies on the dual automaton approach. In particular, it involves a statement on the dual automaton $\widehat D$ of $B$. Since the group $G(B)$ is generated by involutions, it follows that the set of double letter words over the alphabet $\{a,b,c\}$ is invariant under the action of the group $G(\widehat D)$. Hence $G(\widehat D)$ does not act transitively on levels of the rooted tree $\{a,b,c\}$. \begin{proposition}[\cite{N}]\lambdabel{main9} The dual automaton of the Bellaterra automaton defines a transformation group that acts transitively on each level of the rooted subtree of $\{a,b,c\}^*$ formed by no-double-letter words. \end{proposition} We derive Theorems \ref{main4}, \ref{main5}, and \ref{main6} from Theorem \ref{main3}. This does not involve dual automata. Nonetheless we obtain a new proof of Proposition \ref{main9} that also works for all Bellaterra type automata. \begin{proposition}\lambdabel{main10} For any $n\ge1$ the dual automaton of $B^{(n)}$ defines a group that acts transitively on each level of the rooted subtree of $Q_n^*$ formed by no-double-letter words. \end{proposition} Finally, we establish relations between groups defined by automata of Aleshin type and of Bellaterra type. \begin{proposition}\lambdabel{main11} (i) The group $G(A)$ is an index $2$ subgroup of $G(B^{(\{0,1\})})$; (ii) for any $n\ge1$ the group $G(A^{(n)})$ is an index $2$ subgroup of $G(B^{(\{0,n\})})$; (iii) for any nonempty set $N$ of positive integers the group $G(A^{(N)})$ is an index $2$ subgroup of $G(B^{(N\cup\{0\})})$. \end{proposition} \begin{proposition}\lambdabel{main12} (i) $G(A)\cap G(B)$ is a free group on $2$ generators and an index $2$ subgroup of $G(B)$. (ii) For any $n\ge1$, $G(A^{(n)})\cap G(B^{(n)})$ is a free group on $2n$ generators and an index $2$ subgroup of $G(B^{(n)})$. (ii) For any nonempty set $N$ of positive integers, $G(A^{(N)})\cap G(B^{(N)})$ is an index $2$ subgroup of $G(B^{(N)})$. Also, $G(A^{(N)})\cap G(B^{(N)})$ is a free group of rank less by $1$ than the free rank of $G(A^{(N)})$. \end{proposition} The paper is organized as follows. Section \ref{auto} addresses some general constructions concerning automata and their properties. In Section \ref{a} we recall constructions and arguments of the paper \cite{VV} where Theorem \ref{main1} was proved. In Section \ref{series} they are applied to the Aleshin type automata, which results in the proof of Theorem \ref{main2} (Theorem \ref{series7}). Besides, Proposition \ref{main8} is established in Sections \ref{a} and \ref{series} (see Corollaries \ref{a5plus} and \ref{series6plus}). In Section \ref{union} we consider disjoint unions of Aleshin type automata and obtain Theorem \ref{main3} (Theorem \ref{union6}). Section \ref{b} is devoted to the study of the Bellaterra automaton, automata of Bellaterra type, and their relation to automata of Aleshin type. Here we prove Theorems \ref{main4}, \ref{main5}, and \ref{main6} (Theorems \ref{b3} and \ref{b4}), Propositions \ref{main9} and \ref{main10} (Propositions \ref{b9} and \ref{b10}), Proposition \ref{main11} (Proposition \ref{b2}), and Proposition \ref{main12} (Propositions \ref{b6}, \ref{b7}, and \ref{b8}).
1,880
40,263
en
train
0.92.3
\section{Automata}\lambdabel{auto} An {\em automaton\/} $A$ is a quadruple $(Q,X,\phi,\psi)$ formed by two nonempty sets $Q$ and $X$ along with two maps $\phi:Q\times X\to Q$ and $\psi:Q\times X\to X$. The set $X$ is to be finite, it is called the {\em (input/output) alphabet\/} of the automaton. We say that $A$ is an automaton over the alphabet $X$. $Q$ is called the set of {\em internal states\/} of $A$. The automaton $A$ is called {\em finite\/} (or {\em finite-state\/}) if the set $Q$ is finite. $\phi$ and $\psi$ are called the {\em state transition function\/} and the {\em output function}, respectively. One may regard these functions as a single map $(\phi,\psi): Q\times X\to Q\times X$. The automaton $A$ canonically defines a collection of transformations. First we introduce the set on which these transformations act. This is the set of words over the alphabet $X$, which is denoted by $X^*$. A {\em word\/} $w\in X^*$ is merely a finite sequence whose elements belong to $X$. The elements of $w$ are called {\em letters\/} and $w$ is usually written so that its elements are not separated by delimiters. The number of letters of $w$ is called its {\em length\/}. It is assumed that $X^*$ contains the empty word $\varnothing$. The set $X$ is embedded in $X^*$ as the subset of one-letter words. If $w_1=x_1\dots x_n$ and $w_2=y_1\dots y_m$ are words over the alphabet $X$ then $w_1w_2$ denotes their concatenation $x_1\dots x_ny_1\dots y_m$. The operation $(w_1,w_2)\mapsto w_1w_2$ makes $X^*$ into the free monoid generated by all elements of $X$. The unit element of the monoid $X^*$ is the empty word. Another structure on $X^*$ is that of a rooted $k$-regular tree, where $k$ is the cardinality of $X$. Namely, we consider a graph with the set of vertices $X^*$ where two vertices $w_1,w_2\in X^*$ are joined by an edge if $w_1=w_2x$ or $w_2=w_1x$ for some $x\in X$. The root of the tree is the empty word. For any integer $n\ge0$ the $n$-th {\em level\/} of a rooted tree is the set of vertices that are at distance $n$ from the root. Clearly, the $n$-th level of the rooted tree $X^*$ is formed by all words of length $n$ in the alphabet $X$. Now let us explain how the automaton $A$ functions. First we choose an {\em initial state\/} $q\in Q$ and prepare an {\em input word\/} $w=x_1x_2\dots x_n\in X^*$. Then we set the automaton to the state $q$ and start inputting the word $w$ into it, letter by letter. After reading a letter $x'$ in a state $q'$, the automaton produces the output letter $\psi(q',x')$ and makes transition to the state $\phi(q',x')$. Hence the automaton's job results in two sequences: a sequence of states $q_0=q,q_1,\dots,q_n$, which describes the internal work of the automaton, and the {\em output word\/} $v=y_1y_2\dots y_n\in X^*$. Here $q_i=\phi(q_{i-1},x_i)$ and $y_i=\psi(q_{i-1},x_i)$ for $1\le i\le n$. For every choice of the initial state $q\in Q$ of $A$ we get a mapping $A_q:X^*\to X^*$ that sends any input word to the corresponding output word. We say that $A_q$ is the transformation defined by the automaton $A$ with the initial state $q$. Clearly, $A_q$ preserves the length of words. Besides, $A_q$ transforms words from the left to the right, that is, the first $n$ letters of $A_q(w)$ depend only on the first $n$ letters of $w$. This implies that $A_q$ is an endomorphism of $X^*$ as a rooted tree. If $A_q$ is invertible then it belongs to the group $\mathop{\mathrm{Aut}}(X^*)$ of automorphisms of the rooted tree $X^*$. The set of transformations $A_q$, $q\in Q$ is self-similar in the following sense. For any $q\in Q$, $x\in X$, and $w\in X^*$ we have that $A_q(xw)=yA_p(w)$, where $p=\phi(q,x)$, $y=\psi(q,x)$. The semigroup of transformations of $X^*$ generated by $A_q$, $q\in Q$ is denoted by $S(A)$. The automaton $A$ is called {\em invertible\/} if $A_q$ is invertible for all $q\in Q$. If $A$ is invertible then $A_q$, $q\in Q$ generate a transformation group $G(A)$, which is a subgroup of $\mathop{\mathrm{Aut}}(X^*)$. We say that $S(A)$ (resp. $G(A)$) is the semigroup (resp. group) defined by the automaton $A$. \begin{lemma}[\cite{VV}]\lambdabel{auto1} Suppose the automaton $A$ is invertible. Then the actions of the semigroup $S(A)$ and the group $G(A)$ on $X^*$ have the same orbits. \end{lemma} One way to picture an automaton, which we use in this paper, is the {\em Moore diagram}. The Moore diagram of an automaton $A=(Q,X,\phi,\psi)$ is a directed graph with labeled edges defined as follows. The vertices of the graph are states of the automaton $A$. Every edge carries a label of the form $x|y$, where $x,y\in X$. The left field $x$ of the label is referred to as the {\em input field\/} while the right field $y$ is referred to as the {\em output field}. The set of edges of the graph is in a one-to-one correspondence with the set $Q\times X$. Namely, for any $q\in Q$ and $x\in X$ there is an edge that goes from the vertex $q$ to $\phi(q,x)$ and carries the label $x|\psi(q,x)$. The Moore diagram of an automaton can have loops (edges joining a vertex to itself) and multiple edges. To simplify pictures, we do not draw multiple edges in this paper. Instead, we use multiple labels. The transformations $A_q$, $q\in Q$ can be defined in terms of the Moore diagram of the automaton $A$. For any $q\in Q$ and $w\in X^*$ we find a path $\gamma$ in the Moore diagram such that $\gamma$ starts at the vertex $q$ and the word $w$ can be obtained by reading the input fields of labels along $\gamma$. Such a path exists and is unique. Then the word $A_q(w)$ is obtained by reading the output fields of labels along the path $\gamma$. Let $\Gamma$ denote the Moore diagram of the automaton $A$. We associate to $\Gamma$ two directed graphs $\Gamma_1$ and $\Gamma_2$ with labeled edges. $\Gamma_1$ is obtained from $\Gamma$ by interchanging the input and output fields of all labels. That is, a label $x|y$ is replaced by $y|x$. $\Gamma_2$ is obtained from $\Gamma$ by reversing all edges. The {\em inverse automaton\/} of $A$ is the automaton whose Moore diagram is $\Gamma_1$. The {\em reverse automaton\/} of $A$ is the automaton whose Moore diagram is $\Gamma_2$. The inverse and reverse automata of $A$ share the alphabet and internal states with $A$. Notice that any automaton is completely determined by its Moore diagram. However neither $\Gamma_1$ nor $\Gamma_2$ must be the Moore diagram of an automaton. So it is possible that the inverse automaton or the reverse automaton (or both) of $A$ is not well defined. \begin{lemma}[\cite{GNS}]\lambdabel{auto2} An automaton $A=(Q,X,\phi,\psi)$ is invertible if and only if for any $q\in Q$ the map $\psi(q,\cdot):X\to X$ is bijective. The inverse automaton $I$ of $A$ is well defined if and only if $A$ is invertible. If this is the case, then $I_q=A_q^{-1}$ for all $q\in Q$. \end{lemma} An automaton $A$ is called {\em reversible\/} if the reverse automaton of $A$ is well defined. \begin{lemma}[\cite{VV}]\lambdabel{auto3} An automaton $A=(Q,X,\phi,\psi)$ is reversible if and only if for any $x\in X$ the map $\phi(\cdot,x):Q\to Q$ is bijective. \end{lemma} Let $A=(Q,X,\phi,\psi)$ be an automaton. For any nonempty word $\xi=q_1q_2\dots q_n\in Q^*$ we let $A_\xi=A_{q_n}\dots A_{q_2}A_{q_1}$. Also, we let $A_\varnothing=1$ (here $1$ stands for the unit element of the group $\mathop{\mathrm{Aut}}(X^*)$, i.e., the identity mapping on $X^*$). Clearly, any element of the semigroup $S(A)$ is represented as $A_\xi$ for a nonempty word $\xi\in Q^*$. The map $X^*\times Q^*\to X^*$ given by $(w,\xi)\mapsto A_\xi(w)$ defines a right action of the monoid $Q^*$ on the rooted regular tree $X^*$. That is, $A_{\xi_1\xi_2}(w)=A_{\xi_2}(A_{\xi_1}(w))$ for all $\xi_1,\xi_2\in Q^*$ and $w\in X^*$. To each finite automaton $A=(Q,X,\phi,\psi)$ we associate a {\em dual automaton\/} $D$, which is obtained from $A$ by interchanging the alphabet with the set of internal states and the state transition function with the output function. To be precise, $D=(X,Q,\tilde\phi,\tilde\psi)$, where $\tilde\phi(x,q)=\psi(q,x)$ and $\tilde\psi(x,q)=\phi(q,x)$ for all $x\in X$ and $q\in Q$. Unlike the inverse and reverse automata, the dual automaton is always well defined. It is easy to see that $A$ is the dual automaton of $D$. The dual automaton $D$ defines a right action of the monoid $X^*$ on $Q^*$ given by $(\xi,w)\mapsto D_w(\xi)$. This action and the action of $Q^*$ on $X^*$ defined by the automaton $A$ are related in the following way. \begin{proposition}[\cite{VV}]\lambdabel{auto4} For any $w,u\in X^*$ and $\xi\in Q^*$, $$ A_\xi(wu)=A_\xi(w)A_{D_w(\xi)}(u). $$ \end{proposition} \begin{corollary}[\cite{VV}]\lambdabel{auto5} Suppose $A_\xi=1$ for some $\xi\in Q^*$. Then $A_{g(\xi)}=1$ for every $g\in S(D)$. \end{corollary} A finite automaton $A=(Q,X,\phi,\psi)$ is called {\em bi-reversible\/} if the map $\phi(\cdot,x):Q\to Q$ is bijective for any $x\in X$, the map $\psi(q,\cdot):X\to X$ is bijective for any $q\in Q$, and the map $(\phi,\psi):Q\times X\to Q\times X$ is bijective as well. All automata that we consider in this paper are bi-reversible. Below we formulate some basic properties of bi-reversible automata (see also \cite{N}). \begin{lemma}\lambdabel{auto6} Given a finite automaton $A$, the following are equivalent: (i) $A$ is bi-reversible; (ii) $A$ is invertible, reversible, and its reverse automaton is invertible; (iii) $A$ is invertible, reversible, and its inverse automaton is reversible; (iv) $A$ is invertible, its dual automaton is invertible, and the dual automaton of its inverse is invertible. \end{lemma} \begin{proof} Suppose $A=(Q,X,\phi,\psi)$ is a finite automaton. By Lemma \ref{auto2}, $A$ is invertible if and only if maps $\psi(q,\cdot):X\to X$ are bijective for all $q\in Q$. By Lemma \ref{auto3}, $A$ is reversible if and only if maps $\phi(\cdot,x):Q\to Q$ are bijective for all $x\in X$. Let $\Gamma$ be the Moore diagram of $A$ and $\Gamma'$ be the graph obtained from $\Gamma$ by reversing all edges and interchanging fields of all labels. The graph $\Gamma'$ is the Moore diagram of an automaton if for any $q\in Q$ and $x\in X$ there is exactly one edge of $\Gamma'$ that starts at the vertex $q$ and has $x$ as the input field of its label. By definition of $\Gamma'$ the number of edges with the latter property is equal to the number of pairs $(p,y)\in Q\times X$ such that $q=\phi(p,y)$ and $x=\psi(p,y)$. Therefore $\Gamma'$ is the Moore diagram of an automaton if and only if the map $(\phi,\psi):Q\times X\to Q\times X$ is bijective. Thus $A$ is bi-reversible if and only if it is invertible, reversible, and $\Gamma'$ is the Moore diagram of an automaton. Assume that the automaton $A$ is invertible and reversible. Let $I$ and $R$ be the inverse and reverse automata of $A$, respectively. If the graph $\Gamma'$ is the Moore diagram of an automaton then the automaton is both the inverse automaton of $R$ and the reverse automaton of $I$. On the other hand, if $\Gamma'$ is not the Moore diagram of an automaton then $R$ is not invertible and $I$ is not reversible. It follows that conditions (i), (ii), and (iii) are equivalent. It follows from Lemmas \ref{auto2} and \ref{auto3} that a finite automaton is reversible if and only if its dual automaton is invertible. This implies that conditions (iii) and (iv) are equivalent. \end{proof}
3,959
40,263
en
train
0.92.4
\begin{lemma}\lambdabel{auto7} If an automaton is bi-reversible then its inverse, reverse, and dual automata are also bi-reversible. \end{lemma} \begin{proof} It follows directly from definitions that an automaton is bi-reversible if and only if its dual automaton is bi-reversible. Suppose $A$ is a bi-reversible automaton. By Lemma \ref{auto6}, $A$ is invertible and reversible. Let $I$ and $R$ denote the inverse and reverse automata of $A$, respectively. By Lemma \ref{auto6}, $I$ is reversible and $R$ is invertible. It is easy to see that $A$ is both the inverse automaton of $I$ and the reverse automaton of $R$. Therefore the automata $I$ and $R$ are invertible and reversible. Moreover, the inverse automaton of $I$ is reversible and the reverse automaton of $R$ is invertible. By Lemma \ref{auto6}, the automata $I$ and $R$ are bi-reversible. \end{proof} Suppose $A^{(1)}=(Q_1,X,\phi_1,\psi_1),\dots,A^{(k)}=(Q_k,X,\phi_k,\psi_k)$ are automata over the same alphabet $X$ such that their sets of internal states $Q_1,Q_2,\dots,Q_k$ are disjoint. The {\em disjoint union\/} of automata $A^{(1)},A^{(2)},\dots,A^{(k)}$ is an automaton $U=(Q_1\cup\dots\cup Q_k,X,\phi,\psi)$, where the functions $\phi$, $\psi$ are defined so that $\phi=\phi_i$ and $\psi=\psi_i$ on $Q_i\times X$ for $1\le i\le k$. Obviously, $U_q=A^{(i)}_q$ for all $q\in Q_i$, $1\le i\le k$. The Moore diagram of the automaton $U$ is the disjoint union of the Moore diagrams of $A^{(1)},A^{(2)},\dots,A^{(k)}$. \begin{lemma}\lambdabel{auto8} The disjoint union of automata $A^{(1)},A^{(2)},\dots,A^{(k)}$ is invertible (resp. reversible, bi-reversible) if and only if each $A^{(i)}$ is invertible (resp. reversible, bi-reversible). \end{lemma} \begin{proof} Suppose that an automaton $U$ is the disjoint union of automata $A^{(1)}, \dots,A^{(k)}$. Note that the disjoint union of graphs $\Gamma_1,\dots, \Gamma_k$ is the Moore diagram of an automaton over an alphabet $X$ if and only if each $\Gamma_i$ is the Moore diagram of an automaton defined over $X$. Since the Moore diagram of $U$ is the disjoint union of the Moore diagrams of $A^{(1)},\dots,A^{(k)}$, it follows that $U$ is invertible (resp. reversible) if and only if each $A^{(i)}$ is invertible (resp. reversible). Moreover, if $U$ is invertible then its inverse automaton is the disjoint union of the inverse automata of $A^{(1)},\dots,A^{(k)}$. Hence the inverse automaton of $U$ is reversible if and only if the inverse automaton of each $A^{(i)}$ is reversible. Now Lemma \ref{auto6} implies that $U$ is bi-reversible if and only if each $A^{(i)}$ is bi-reversible. \end{proof}
937
40,263
en
train
0.92.5
\section{The Aleshin automaton}\lambdabel{a} In this section we recall constructions and results of the paper \cite{VV} where the Aleshin automaton was studied. Some constructions are slightly modified. The Aleshin automaton is an automaton $A$ over the alphabet $X=\{0,1\}$ with the set of internal states $Q=\{a,b,c\}$. The state transition function $\phi$ and the output function $\psi$ of $A$ are defined as follows: $\phi(a,0)=\phi(b,1)=c$, $\phi(a,1)=\phi(b,0)=b$, $\phi(c,0)= \phi(c,1)=a$; $\psi(a,0)=\psi(b,0)=\psi(c,1)=1$, $\psi(a,1)=\psi(b,1)= \psi(c,0)=0$. The Moore diagram of $A$ is depicted in Figure \ref{fig1}. It is easy to verify that the automaton $A$ is invertible and reversible. Moreover, the inverse automaton of $A$ can be obtained from $A$ by renaming letters $0$ and $1$ of the alphabet to $1$ and $0$, respectively. The reverse automaton of $A$ can be obtained from $A$ by renaming its states $a$ and $c$ to $c$ and $a$, respectively. Lemma \ref{auto6} implies that $A$ is bi-reversible. \begin{figure} \caption{ \lambdabel{fig3} \end{figure} Let $I$ denote the automaton obtained from the inverse of $A$ by renaming its states $a$, $b$, $c$ to $a^{-1}$, $b^{-1}$, $c^{-1}$, respectively. Here, $a^{-1}$, $b^{-1}$, and $c^{-1}$ are assumed to be elements of the free group on generators $a$, $b$, $c$. Further, let $U$ denote the disjoint union of automata $A$ and $I$. The automaton $U$ is defined over the alphabet $X=\{0,1\}$, with the set of internal states $Q^\pm= \{a,b,c,a^{-1},b^{-1},c^{-1}\}$. By definition, $U_a=A_a$, $U_b=A_b$, $U_c=A_c$, $U_{a^{-1}}=A_a^{-1}$, $U_{b^{-1}}=A_b^{-1}$, $U_{c^{-1}}= A_c^{-1}$. \begin{figure} \caption{ \lambdabel{fig4} \end{figure} Let $D$ denote the dual automaton of the automaton $U$. The automaton $D$ is defined over the alphabet $Q^\pm$, with two internal states $0$ and $1$. By $\phi_D$ denote its transition function. Then $\phi_D(0,q)=1$ and $\phi_D(1,q)=0$ for $q\in\{a,b,a^{-1},b^{-1}\}$, while $\phi_D(0,q)=0$ and $\phi_D(1,q)=1$ for $q\in\{c,c^{-1}\}$. Also, we consider an auxiliary automaton $E$ that is closely related to $D$. By definition, the automaton $E$ shares with $D$ the alphabet, the set of internal states, and the state transition function. The output function $\psi_E$ of $E$ is defined so that $\psi_E(0,q)=\sigma_0(q)$ and $\psi_E(1,q)=\sigma_1(q)$ for all $q\in Q^\pm$, where $\sigma_0=(a^{-1}b^{-1})$ and $\sigma_1=(ab)$ are permutations on the set $Q^\pm$. \begin{figure} \caption{ \lambdabel{fig5} \end{figure} Lemmas \ref{auto7} and \ref{auto8} imply that $I$, $U$, and $D$ are bi-reversible automata. As for the automaton $E$, it is easy to verify that $E$ coincides with its inverse automaton while the reverse automaton of $E$ can be obtained from $E$ by renaming its states $0$ and $1$ to $1$ and $0$, respectively. Hence $E$ is bi-reversible due to Lemma \ref{auto6}. To each permutation $\tau$ on the set $Q=\{a,b,c\}$ we assign an automorphism $\pi_\tau$ of the free monoid $(Q^\pm)^*$. The automorphism $\pi_\tau$ is uniquely defined by $\pi_\tau(q)=\tau(q)$, $\pi_\tau(q^{-1})= (\tau(q))^{-1}$ for all $q\in Q$. Let $\lambdangle a,b,c\rangle$ denote the free group on generators $a$, $b$, and $c$, let $\delta:(Q^\pm)^*\to\lambdangle a,b,c\rangle$ be the homomorphism that sends each element of $Q^\pm\subset (Q^\pm)^*$ to itself, and let $p_\tau$ be the automorphism of $\lambdangle a,b,c\rangle$ defined by $p_\tau(q)=\tau(q)$, $q\in Q$. Then $\delta(\pi_\tau(\xi))=p_\tau(\delta(\xi))$ for all $\xi\in(Q^\pm)^*$. \begin{lemma}[\cite{VV}]\lambdabel{a1} (i) $E_0^2=E_1^2=1$, $E_0E_1=E_1E_0=\pi_{(ab)}$; (ii) $D_0=\pi_{(ac)}E_0=\pi_{(abc)}E_1$, $D_1=\pi_{(abc)}E_0= \pi_{(ac)}E_1$. \end{lemma} \begin{proposition}[\cite{VV}]\lambdabel{a2} The group $G(D)$ contains $E_0$, $E_1$, and all transformations of the form $\pi_\tau$. Moreover, $G(D)$ is generated by $E_0$, $\pi_{(ab)}$, and $\pi_{(bc)}$. \end{proposition} As shown in Section \ref{auto}, the automaton $U$ defines a right action $X^*\times(Q^\pm)^*\to X^*$ of the monoid $(Q^\pm)^*$ on the rooted binary tree $X^*$ given by $(w,\xi)\mapsto U_\xi(w)$. Let $\chi:(Q^\pm)^*\to \{-1,1\}$ be the unique homomorphism such that $\chi(a)=\chi(b)= \chi(a^{-1})=\chi(b^{-1})=-1$, $\chi(c)=\chi(c^{-1})=1$. \begin{lemma}[\cite{VV}]\lambdabel{a3} Given $\xi\in(Q^\pm)^*$, the automorphism $U_\xi$ of the rooted binary tree $\{0,1\}^*$ acts trivially on the first level of the tree (i.e., on one-letter words) if and only if $\chi(\xi)=1$. \end{lemma} Now we introduce an alphabet consisting of two symbols $*$ and $*^{-1}$. A word over the alphabet $\{*,*^{-1}\}$ is called a {\em pattern}. Every word $\xi$ over the alphabet $Q^\pm$ is assigned a pattern $v$ that is obtained from $\xi$ by substituting $*$ for each occurrence of letters $a,b,c$ and substituting $*^{-1}$ for each occurrence of letters $a^{-1},b^{-1},c^{-1}$. We say that $v$ is the pattern of $\xi$ or that $\xi$ follows the pattern $v$. A word $\xi=q_1q_2\dots q_n\in(Q^\pm)^*$ is called {\em freely irreducible\/} if none of its two-letter subwords $q_1q_2,q_2q_3,\dots, q_{n-1}q_n$ coincides with one of the following words: $aa^{-1},bb^{-1}, cc^{-1},a^{-1}a,b^{-1}b,c^{-1}c$. Otherwise $\xi$ is called {\em freely reducible}. \begin{lemma}[\cite{VV}]\lambdabel{a4} For any nonempty pattern $v$ there exist words $\xi_1,\xi_2\in(Q^\pm)^*$ such that $\xi_1$ and $\xi_2$ are freely irreducible, follow the pattern $v$, and $\chi(\xi_2)=-\chi(\xi_1)$. \end{lemma} \begin{proposition}[\cite{VV}]\lambdabel{a5} Suppose $\xi\in(Q^\pm)^*$ is a freely irreducible word. Then the orbit of $\xi$ under the action of the group $G(D)$ on $(Q^\pm)^*$ consists of all freely irreducible words following the same pattern as $\xi$. \end{proposition} \begin{corollary}\lambdabel{a5plus} The group defined by the dual automaton of $A$ acts transitively on each level of the rooted ternary tree $Q^*$. \end{corollary} \begin{proof} Let $D^+$ denote the dual automaton of $A$. The rooted tree $Q^*$ is a subtree of $(Q^\pm)^*$. It is easy to see that $Q^*$ is invariant under transformations $D_0$, $D_1$ and the restrictions of these transformations to $Q^*$ are $D^+_0$, $D^+_1$. In particular, the orbits of the $G(D^+)$ action on $Q^*$ are those orbits of the $G(D)$ action on $(Q^\pm)^*$ that are contained in $Q^*$. Any level of the tree $Q^*$ consists of words of a fixed length over the alphabet $Q$. As elements of $(Q^\pm)^*$, all these words are freely irreducible and follow the same pattern. Proposition \ref{a5} implies that they are in the same orbit of the $G(D^+)$ action. \end{proof} Lemmas \ref{a3}, \ref{a4} and Proposition \ref{a5} lead to the following statement. \begin{theorem}[\cite{VV}]\lambdabel{a6} The group $G(A)$ is the free non-Abelian group on generators $A_a$, $A_b$, $A_c$. \end{theorem}
2,689
40,263
en
train
0.92.6
\section{Series of finite automata of Aleshin type}\lambdabel{series} In this section we consider a series of finite automata starting from the Aleshin automaton. We use the notation of the previous section. For any integer $n\ge1$ we define an Aleshin type automaton $A^{(n)}$. This is an automaton over the alphabet $X=\{0,1\}$ with a set of states $Q_n$ of cardinality $2n+1$. The states of $A^{(n)}$ are denoted so that $Q_1=\{a_1,b_1,c_1\}$ and $Q_n=\{a_n,b_n,c_n,q_{n1},\dots,q_{n,2n-2}\}$ for $n\ge2$. The state transition function $\phi_n$ of $A^{(n)}$ is defined as follows: $\phi_n(a_n,0)=\phi_n(b_n,1)=c_n$, $\phi_n(a_n,1)=\phi_n(b_n,0)= b_n$, and $\phi_n(q_{ni},0)=\phi_n(q_{ni},1)=q_{n,i+1}$ for $0\le i\le 2n-2$, where by definition $q_{n0}=c_n$ and $q_{n,2n-1}=a_n$. The output function $\psi_n$ of $A^{(n)}$ is defined so that for any $x\in X$ we have $\psi_n(q,x)=1-x$ if $q\in\{a_n,b_n\}$ and $\psi_n(q,x)=x$ if $q\in Q_n\setminus\{a_n,b_n\}$. \begin{figure} \caption{ \lambdabel{fig6} \end{figure} Up to renaming of the internal states, $A^{(1)}$ and $A^{(2)}$ are the two automata introduced by Aleshin \cite{A} (see Figure \ref{fig1}). We shall deal with automata $A^{(n)}$ by following the framework developed in the paper \cite{VV} and described in Section \ref{a}. Let us fix a positive integer $n$. It is easy to see that the inverse automaton of the automaton $A^{(n)}$ can be obtained from $A^{(n)}$ by renaming letters $0$ and $1$ of the alphabet to $1$ and $0$, respectively. Besides, the reverse automaton of $A^{(n)}$ can be obtained from $A^{(n)}$ by renaming its states $c_n,q_{n1},\dots,q_{n,2n-2},a_n$ to $a_n,q_{n,2n-2},\dots,q_{n1},c_n$, respectively. Lemma \ref{auto6} implies that $A^{(n)}$ is bi-reversible. Let $I^{(n)}$ denote the automaton obtained from the inverse of $A^{(n)}$ by renaming each state $q\in Q_n$ to $q^{-1}$, where $q^{-1}$ is regarded as an element of the free group on generators $a_n,b_n,c_n,q_{n1},\dots, q_{n,2n-2}$. Further, let $U^{(n)}$ denote the disjoint union of automata $A^{(n)}$ and $I^{(n)}$. The automaton $U^{(n)}$ is defined over the alphabet $X=\{0,1\}$, with the set of internal states $Q_n^\pm= \bigcup_{q\in Q_n}\{q,q^{-1}\}$. By definition, $U^{(n)}_q=A^{(n)}_q$ and $U^{(n)}_{q^{-1}}=(A^{(n)}_q)^{-1}$ for all $q\in Q_n$. Let $D^{(n)}$ denote the dual automaton of the automaton $U^{(n)}$. The automaton $D^{(n)}$ is defined over the alphabet $Q_n^\pm$, with two internal states $0$ and $1$. By $\lambda_n$ denote its transition function. Then $\lambda_n(0,q)=1$ and $\lambda_n(1,q)=0$ if $q\in\{a_n,b_n,a_n^{-1}, b_n^{-1}\}$ while $\lambda_n(0,q)=0$ and $\lambda_n(1,q)=1$ otherwise. Also, we consider an auxiliary automaton $E^{(n)}$. By definition, the automaton $E^{(n)}$ shares with $D^{(n)}$ the alphabet, the set of internal states, and the state transition function. The output function $\mu_n$ of $E^{(n)}$ is defined so that $\mu_n(0,q)=\sigma_0(q)$ and $\mu_n(1,q)= \sigma_1(q)$ for all $q\in Q_n^\pm$, where $\sigma_0=(a_n^{-1}b_n^{-1})$ and $\sigma_1=(a_nb_n)$ are permutations on the set $Q_n^\pm$. Lemmas \ref{auto7} and \ref{auto8} imply that $I^{(n)}$, $U^{(n)}$, and $D^{(n)}$ are bi-reversible automata. Further, it is easy to see that the automaton $E^{(n)}$ coincides with its inverse automaton while the reverse automaton of $E^{(n)}$ can be obtained from $E^{(n)}$ by renaming its states $0$ and $1$ to $1$ and $0$, respectively. By Lemma \ref{auto6}, $E^{(n)}$ is bi-reversible. To each permutation $\tau$ on the set $Q_n$ we assign an automorphism $\pi^{(n)}_\tau$ of the free monoid $(Q_n^\pm)^*$ such that $\pi^{(n)}_\tau(q)=\tau(q)$, $\pi^{(n)}_\tau(q^{-1})=(\tau(q))^{-1}$ for all $q\in Q_n$. The automorphism $\pi^{(n)}_\tau$ is uniquely determined by $\tau$. \begin{lemma}\lambdabel{series1} (i) $(E^{(n)}_0)^2=(E^{(n)}_1)^2=1$, $E^{(n)}_0E^{(n)}_1= E^{(n)}_1E^{(n)}_0=\pi^{(n)}_{(a_nb_n)}$; (ii) $D^{(n)}_0=\pi^{(n)}_{\tau_0}E^{(n)}_0=\pi^{(n)}_{\tau_1}E^{(n)}_1$, $D^{(n)}_1=\pi^{(n)}_{\tau_1}E^{(n)}_0=\pi^{(n)}_{\tau_0}E^{(n)}_1$, where $\tau_0=(a_nc_nq_{n1}\dots q_{n,2n-2})$, $\tau_1=(a_nb_nc_nq_{n1}\dots q_{n,2n-2})$. \end{lemma} \begin{proof} Since the inverse automaton of $E^{(n)}$ coincides with $E^{(n)}$, Lemma \ref{auto2} implies that $(E^{(n)}_0)^2=(E^{(n)}_1)^2=1$. We have that $E^{(n)}=(X,Q_n^\pm,\lambda_n,\mu_n)$, where the functions $\lambda_n$ and $\mu_n$ are defined above. Note that the function $\lambda_n$ does not change when elements $0$ and $1$ of the set $X$ are renamed to $1$ and $0$, respectively. For any permutation $\sigma$ on the set $Q_n^\pm$ we define an automaton $Y^\sigma=(X,Q_n^\pm,\lambda_n,\sigma\mu_n)$. The Moore diagram of $Y^\sigma$ is obtained from the Moore diagram of $E^{(n)}$ by applying $\sigma$ to the output fields of all labels. It is easy to observe that $Y^\tau_0= \alpha_\sigma E^{(n)}_0$ and $Y^\tau_1=\alpha_\sigma E^{(n)}_1$, where $\alpha_\sigma$ is the unique automorphism of the monoid $(Q_n^\pm)^*$ such that $\alpha_\sigma(q)=\sigma(q)$ for all $q\in Q_n^\pm$. Let us consider the following permutations on $Q_n^\pm$: \begin{eqnarray*} & \sigma_0=(a_n^{-1}b_n^{-1}), \qquad \sigma_1=(a_nb_n), \qquad \sigma_2=(a_nb_n)(a_n^{-1}b_n^{-1}),\\ & \sigma_3=(a_nc_nq_{n1}\dots q_{n,2n-2})(a_n^{-1}b_n^{-1}c_n^{-1}q_{n1}^{-1} \dots q_{n,2n-2}^{-1}),\\ & \sigma_4=(a_nb_nc_nq_{n1}\dots q_{n,2n-2})(a_n^{-1}c_n^{-1}q_{n1}^{-1}\dots q_{n,2n-2}^{-1}),\\ & \sigma_5=(a_nc_nq_{n1}\dots q_{n,2n-2})(a_n^{-1}c_n^{-1}q_{n1}^{-1}\dots q_{n,2n-2}^{-1}),\\ & \sigma_6=(a_nb_nc_nq_{n1}\dots q_{n,2n-2}) (a_n^{-1}b_n^{-1}c_n^{-1}q_{n1}^{-1}\dots q_{n,2n-2}^{-1}). \end{eqnarray*} Since $\sigma_2\sigma_0=\sigma_1$ and $\sigma_2\sigma_1=\sigma_0$, it follows that the automaton $Y^{\sigma_2}$ can be obtained from $E^{(n)}$ by renaming its states $0$ and $1$ to $1$ and $0$, respectively. Therefore $E^{(n)}_0= Y^{\sigma_2}_1=\alpha_{\sigma_2}E^{(n)}_1$ and $E^{(n)}_1=Y^{\sigma_2}_0= \alpha_{\sigma_2}E^{(n)}_0$. Consequently, $E^{(n)}_0E^{(n)}_1=\alpha_{\sigma_2} (E^{(n)}_1)^2=\alpha_{\sigma_2}$ and $E^{(n)}_1E^{(n)}_0=\alpha_{\sigma_2} (E^{(n)}_0)^2=\alpha_{\sigma_2}$. Clearly, $\alpha_{\sigma_2}= \pi^{(n)}_{(a_nb_n)}$. Since $\sigma_5\sigma_0=\sigma_3$ and $\sigma_5\sigma_1=\sigma_4$, it follows that $Y^{\sigma_5}=D^{(n)}$. Hence $D^{(n)}_0=\alpha_{\sigma_5}E^{(n)}_0$ and $D^{(n)}_1=\alpha_{\sigma_5}E^{(n)}_1$. Furthermore, the equalities $\sigma_6\sigma_0=\sigma_4$ and $\sigma_6\sigma_1=\sigma_3$ imply that the automaton $Y^{\sigma_6}$ can be obtained from $D^{(n)}$ by renaming its states $0$ and $1$ to $1$ and $0$, respectively. Therefore $D^{(n)}_0=Y^{\sigma_6}_1= \alpha_{\sigma_6}E^{(n)}_1$ and $D^{(n)}_1=Y^{\sigma_6}_0=\alpha_{\sigma_6} E^{(n)}_0$. It remains to notice that $\alpha_{\sigma_5}=\pi^{(n)}_{\tau_0}$ and $\alpha_{\sigma_6}=\pi^{(n)}_{\tau_1}$. \end{proof} \begin{lemma}\lambdabel{series2} For any integer $M\ge3$ the group of permutations on the set $\{1,2,\dots, M\}$ is generated by permutations $(12)$ and $(123\dots M)$. \end{lemma} \begin{proof} Let $\tau_0=(12)$, $\tau_1=(123\dots M)$, and $\tau_2=(23\dots M)$. Then $\tau_2=\tau_0\tau_1$. For any $k$, $2\le k\le M$ we have $(1k)= \tau_2^{k-2}\tau_0\tau_2^{-(k-2)}$. Further, for any $l$ and $m$, $1\le l< m\le M$ we have $(lm)=\tau_1^{l-1}(1k)\tau_1^{-(l-1)}$, where $k=m-l+1$. Therefore the group generated by $\tau_0$ and $\tau_1$ contains all transpositions $(lm)$, $1\le l<m\le M$. It remains to notice that any permutation on $\{1,2,\dots,M\}$ is a product of transpositions. \end{proof}
3,249
40,263
en
train
0.92.7
\begin{lemma}\lambdabel{series2} For any integer $M\ge3$ the group of permutations on the set $\{1,2,\dots, M\}$ is generated by permutations $(12)$ and $(123\dots M)$. \end{lemma} \begin{proof} Let $\tau_0=(12)$, $\tau_1=(123\dots M)$, and $\tau_2=(23\dots M)$. Then $\tau_2=\tau_0\tau_1$. For any $k$, $2\le k\le M$ we have $(1k)= \tau_2^{k-2}\tau_0\tau_2^{-(k-2)}$. Further, for any $l$ and $m$, $1\le l< m\le M$ we have $(lm)=\tau_1^{l-1}(1k)\tau_1^{-(l-1)}$, where $k=m-l+1$. Therefore the group generated by $\tau_0$ and $\tau_1$ contains all transpositions $(lm)$, $1\le l<m\le M$. It remains to notice that any permutation on $\{1,2,\dots,M\}$ is a product of transpositions. \end{proof} \begin{proposition}\lambdabel{series3} The group $G(D^{(n)})$ contains $E^{(n)}_0$, $E^{(n)}_1$, and all transformations of the form $\pi^{(n)}_\tau$. Moreover, $G(D^{(n)})$ is generated by $E^{(n)}_0$, $\pi^{(n)}_{\tau_0}$, and $\pi^{(n)}_{\tau_1}$, where $\tau_0=(a_nc_nq_{n1}\dots q_{n,2n-2})$, $\tau_1= (a_nb_nc_nq_{n1}\dots q_{n,2n-2})$. \end{proposition} \begin{proof} It is easy to see that $\pi^{(n)}_{\tau\sigma}=\pi^{(n)}_\tau\pi^{(n)}_\sigma$ for any permutations $\tau$ and $\sigma$ on the set $Q_n$. It follows that $\pi^{(n)}_{\tau^{-1}}=(\pi^{(n)}_\tau)^{-1}$ for any permutation $\tau$ on $Q_n$. By Lemma \ref{series1}, the group generated by $E^{(n)}_0$, $\pi^{(n)}_{\tau_0}$, and $\pi^{(n)}_{\tau_1}$ contains $G(D^{(n)})$. Besides, $D^{(n)}_0(D^{(n)}_1)^{-1}=\pi^{(n)}_{\tau_0}E^{(n)}_0 (\pi^{(n)}_{\tau_1}E^{(n)}_0)^{-1}=\pi^{(n)}_{\tau_0} (\pi^{(n)}_{\tau_1})^{-1}$. By the above $\pi^{(n)}_{\tau_0} (\pi^{(n)}_{\tau_1})^{-1}=\pi^{(n)}_{\tau_2}$, where $\tau_2= \tau_0\tau_1^{-1}=(b_nc_n)$. Similarly, $$ (D^{(n)}_0)^{-1}D^{(n)}_1=(\pi^{(n)}_{\tau_0}E^{(n)}_0)^{-1} \pi^{(n)}_{\tau_1}E^{(n)}_0=(E^{(n)}_0)^{-1}\pi^{(n)}_{\tau_3}E^{(n)}_0, $$ where $\tau_3=\tau_0^{-1}\tau_1=(a_nb_n)$. Lemma \ref{series1} implies that $E^{(n)}_0$ and $\pi^{(n)}_{\tau_3}$ commute, hence $(D^{(n)}_0)^{-1}D^{(n)}_1=\pi^{(n)}_{\tau_3}$. Consider two more permutations on $Q_n$: $\tau_4=(a_nc_n)$ and $\tau_5=(c_nq_{n1}\dots q_{n,2n-2})$. Note that $\tau_4=\tau_2\tau_3\tau_2$ and $\tau_5= \tau_4\tau_0$. By the above $\pi^{(n)}_{\tau_2},\pi^{(n)}_{\tau_3}\in G(D^{(n)})$, hence $\pi^{(n)}_{\tau_4}\in G(D^{(n)})$. Then $\pi^{(n)}_{\tau_5}E^{(n)}_0=\pi^{(n)}_{\tau_4}\pi^{(n)}_{\tau_0} E^{(n)}_0=\pi^{(n)}_{\tau_4}D^{(n)}_0\in G(D^{(n)})$. Since $\tau_5(a_n)= a_n$ and $\tau_5(b_n)=b_n$, it easily follows that transformations $\pi^{(n)}_{\tau_5}$ and $E^{(n)}_0$ commute. As $\tau_5$ is a permutation of odd order $2n-1$ while $E^{(n)}_0$ is an involution, we have that $(\pi^{(n)}_{\tau_5}E^{(n)}_0)^{2n-1}=E^{(n)}_0$. In particular, $E^{(n)}_0\in G(D^{(n)})$. Now Lemma \ref{series1} implies that $\pi^{(n)}_{\tau_0},\pi^{(n)}_{\tau_1},E^{(n)}_1\in G(D^{(n)})$. By Lemma \ref{series2}, the group of all permutations on the set $Q_n$ is generated by permutations $\tau_1$ and $\tau_3$. Since $\pi^{(n)}_{\tau_1},\pi^{(n)}_{\tau_3}\in G(D^{(n)})$, it follows that $G(D^{(n)})$ contains all transformations of the form $\pi^{(n)}_\tau$. \end{proof} Recall that words over the alphabet $\{*,*^{-1}\}$ are called patterns. Every word $\xi\in(Q_n^\pm)^*$ is assigned a pattern $v$ that is obtained from $\xi$ by substituting $*$ for each occurrence of letters $a_n,b_n,c_n, q_{n1},\dots,q_{n,2n-2}$ and substituting $*^{-1}$ for each occurrence of letters $a_n^{-1},b_n^{-1},c_n^{-1},q_{n1}^{-1},\dots,q_{n,2n-2}^{-1}$. We say that $\xi$ follows the pattern $v$. A word $\xi=q_1q_2\dots q_k\in(Q_n^\pm)^*$ is called freely irreducible if none of its two-letter subwords $q_1q_2,q_2q_3,\dots,q_{k-1}q_k$ is of the form $qq^{-1}$ or $q^{-1}q$, where $q\in Q_n$. Otherwise $\xi$ is called freely reducible. \begin{lemma}\lambdabel{series4} For any nonempty pattern $v$ there exists a freely irreducible word $\xi\in(Q_n^\pm)^*$ such that $v$ is the pattern of $\xi$ and the transformation $U^{(n)}_\xi$ acts nontrivially on the first level of the rooted binary tree $X^*$. \end{lemma} \begin{proof} Given a nonempty pattern $v$, let us substitute $a_n$ for each occurrence of $*$ in $v$ and $b_n^{-1}$ for each occurrence of $*^{-1}$. We get a word $\xi\in(Q_n^\pm)^*$ that follows the pattern $v$. Now let us modify $\xi$ by changing its last letter. If this letter is $a_n$, we change it to $c_n$. If the last letter of $\xi$ is $b_n^{-1}$, we change it to $c_n^{-1}$. This yields another word $\eta\in(Q_n^\pm)^*$ that follows the pattern $v$. By construction, $\xi$ and $\eta$ are freely irreducible. Furthermore, $U^{(n)}_\eta=A^{(n)}_{c_n}(A^{(n)}_{a_n})^{-1}U^{(n)}_\xi$ if the last letter of $v$ is $*$ while $U^{(n)}_\eta=(A^{(n)}_{c_n})^{-1} A^{(n)}_{b_n}U^{(n)}_\xi$ if the last letter of $v$ is $*^{-1}$. Both $A^{(n)}_{c_n}(A^{(n)}_{a_n})^{-1}$ and $(A^{(n)}_{c_n})^{-1}A^{(n)}_{b_n}$ interchange one-letter words $0$ and $1$. It follows that one of the transformations $U^{(n)}_\xi$ and $U^{(n)}_\eta$ also acts nontrivially on the first level of the rooted tree $\{0,1\}^*$. \end{proof} Given a nonempty, freely irreducible word $\xi\in(Q_n^\pm)^*$, let $Z_n(\xi)$ denote the set of all freely irreducible words in $(Q_n^\pm)^*$ that follow the same pattern as $\xi$ and match $\xi$ completely or except for the last letter. Obviously, $\xi\in Z_n(\xi)$, and $\eta\in Z_n(\xi)$ if and only if $\xi\in Z_n(\eta)$. The set $Z_n(\xi)$ consists of $2n$ or $2n+1$ words. Namely, there are exactly $2n+1$ words in $(Q_n^\pm)^*$ that follow the same pattern as $\xi$ and match $\xi$ completely or except for the last letter. However if the last two letters in the pattern of $\xi$ are distinct then one of these $2n+1$ words is freely reducible. \begin{lemma}\lambdabel{series5} For any nonempty pattern $v$ there exists a freely irreducible word $\xi\in (Q_n^\pm)^*$ such that $v$ is the pattern of $\xi$ and the set $Z_n(\xi)$ is contained in one orbit of the $G(D^{(n)})$ action on $(Q_n^\pm)^*$. \end{lemma} \begin{proof} Let $h_n:(Q^\pm)^*\to(Q_n^\pm)^*$ be the homomorphism of monoids such that $h_n(a)=a_n$, $h_n(b)=b_n$, $h_n(c)=c_n$, $h_n(a^{-1})=a_n^{-1}$, $h_n(b^{-1})=b_n^{-1}$, $h_n(c^{-1})=c_n^{-1}$. The range of $h_n$ consists of words over alphabet $\{a_n,b_n,c_n,a_n^{-1},b_n^{-1}, c_n^{-1}\}$. For any $\zeta\in(Q^\pm)^*$ the word $h_n(\zeta)$ follows the same pattern as $\zeta$. Besides, $h_n(\zeta)$ is freely irreducible if and only if $\zeta$ is. It is easy to see that $h_n(\pi_{(ab)}(\zeta))= \pi^{(n)}_{(a_nb_n)}(h_n(\zeta))$, $h_n(\pi_{(bc)}(\zeta))= \pi^{(n)}_{(b_nc_n)}(h_n(\zeta))$, and $h_n(E_0(\zeta))= E^{(n)}_0(h_n(\zeta))$. By Proposition \ref{a2}, the group $G(D)$ is generated by $\pi_{(ab)}$, $\pi_{(bc)}$, and $E_0$. On the other hand, $\pi^{(n)}_{(a_nb_n)},\pi^{(n)}_{(b_nc_n)},E^{(n)}_0\in G(D^{(n)})$ due to Proposition \ref{series3}. It follows that for any $g_0\in G(D)$ there exists $g\in G(D^{(n)})$ such that $h_n(g_0(\zeta))=g(h_n(\zeta))$ for all $\zeta\in(Q^\pm)^*$. Now Proposition \ref{a5} implies that two words over alphabet $\{a_n,b_n,c_n,a_n^{-1},b_n^{-1},c_n^{-1}\}$ are in the same orbit of the $G(D^{(n)})$ action on $(Q_n^\pm)^*$ whenever they are freely irreducible and follow the same pattern. Let $v_0$ be the pattern obtained by deleting the last letter of $v$. We substitute $a_n$ for each occurrence of $*$ in $v_0$ and $b_n^{-1}$ for each occurrence of $*^{-1}$. This yields a word $\eta\in(Q_n^\pm)^*$ that follows the pattern $v_0$. Now let $\xi=\eta c_n$ if the last letter of $v$ is $*$ and let $\xi=\eta c_n^{-1}$ otherwise. Clearly, $\xi$ is a freely irreducible word following the pattern $v$. Take any $\zeta\in Z_n(\xi)$. If both $\zeta$ and $\xi$ are words over alphabet $\{a_n,b_n, c_n,a_n^{-1},b_n^{-1},c_n^{-1}\}$, then it follows from the above that $\zeta=g(\xi)$ for some $g\in G(D^{(n)})$. Otherwise the last letter of $\zeta$ is $q_{ni}$ or $q_{ni}^{-1}$, where $1\le i\le 2n-2$. In this case we have $\zeta=(\pi^{(n)}_\tau)^i(\xi)$, where $\tau=(c_nq_{n1}\dots q_{n,2n-2})$. By Proposition \ref{series3}, $\pi^{(n)}_\tau\in G(D^{(n)})$. \end{proof}
3,494
40,263
en
train
0.92.8
Given a nonempty, freely irreducible word $\xi\in(Q_n^\pm)^*$, let $Z_n(\xi)$ denote the set of all freely irreducible words in $(Q_n^\pm)^*$ that follow the same pattern as $\xi$ and match $\xi$ completely or except for the last letter. Obviously, $\xi\in Z_n(\xi)$, and $\eta\in Z_n(\xi)$ if and only if $\xi\in Z_n(\eta)$. The set $Z_n(\xi)$ consists of $2n$ or $2n+1$ words. Namely, there are exactly $2n+1$ words in $(Q_n^\pm)^*$ that follow the same pattern as $\xi$ and match $\xi$ completely or except for the last letter. However if the last two letters in the pattern of $\xi$ are distinct then one of these $2n+1$ words is freely reducible. \begin{lemma}\lambdabel{series5} For any nonempty pattern $v$ there exists a freely irreducible word $\xi\in (Q_n^\pm)^*$ such that $v$ is the pattern of $\xi$ and the set $Z_n(\xi)$ is contained in one orbit of the $G(D^{(n)})$ action on $(Q_n^\pm)^*$. \end{lemma} \begin{proof} Let $h_n:(Q^\pm)^*\to(Q_n^\pm)^*$ be the homomorphism of monoids such that $h_n(a)=a_n$, $h_n(b)=b_n$, $h_n(c)=c_n$, $h_n(a^{-1})=a_n^{-1}$, $h_n(b^{-1})=b_n^{-1}$, $h_n(c^{-1})=c_n^{-1}$. The range of $h_n$ consists of words over alphabet $\{a_n,b_n,c_n,a_n^{-1},b_n^{-1}, c_n^{-1}\}$. For any $\zeta\in(Q^\pm)^*$ the word $h_n(\zeta)$ follows the same pattern as $\zeta$. Besides, $h_n(\zeta)$ is freely irreducible if and only if $\zeta$ is. It is easy to see that $h_n(\pi_{(ab)}(\zeta))= \pi^{(n)}_{(a_nb_n)}(h_n(\zeta))$, $h_n(\pi_{(bc)}(\zeta))= \pi^{(n)}_{(b_nc_n)}(h_n(\zeta))$, and $h_n(E_0(\zeta))= E^{(n)}_0(h_n(\zeta))$. By Proposition \ref{a2}, the group $G(D)$ is generated by $\pi_{(ab)}$, $\pi_{(bc)}$, and $E_0$. On the other hand, $\pi^{(n)}_{(a_nb_n)},\pi^{(n)}_{(b_nc_n)},E^{(n)}_0\in G(D^{(n)})$ due to Proposition \ref{series3}. It follows that for any $g_0\in G(D)$ there exists $g\in G(D^{(n)})$ such that $h_n(g_0(\zeta))=g(h_n(\zeta))$ for all $\zeta\in(Q^\pm)^*$. Now Proposition \ref{a5} implies that two words over alphabet $\{a_n,b_n,c_n,a_n^{-1},b_n^{-1},c_n^{-1}\}$ are in the same orbit of the $G(D^{(n)})$ action on $(Q_n^\pm)^*$ whenever they are freely irreducible and follow the same pattern. Let $v_0$ be the pattern obtained by deleting the last letter of $v$. We substitute $a_n$ for each occurrence of $*$ in $v_0$ and $b_n^{-1}$ for each occurrence of $*^{-1}$. This yields a word $\eta\in(Q_n^\pm)^*$ that follows the pattern $v_0$. Now let $\xi=\eta c_n$ if the last letter of $v$ is $*$ and let $\xi=\eta c_n^{-1}$ otherwise. Clearly, $\xi$ is a freely irreducible word following the pattern $v$. Take any $\zeta\in Z_n(\xi)$. If both $\zeta$ and $\xi$ are words over alphabet $\{a_n,b_n, c_n,a_n^{-1},b_n^{-1},c_n^{-1}\}$, then it follows from the above that $\zeta=g(\xi)$ for some $g\in G(D^{(n)})$. Otherwise the last letter of $\zeta$ is $q_{ni}$ or $q_{ni}^{-1}$, where $1\le i\le 2n-2$. In this case we have $\zeta=(\pi^{(n)}_\tau)^i(\xi)$, where $\tau=(c_nq_{n1}\dots q_{n,2n-2})$. By Proposition \ref{series3}, $\pi^{(n)}_\tau\in G(D^{(n)})$. \end{proof} \begin{proposition}\lambdabel{series6} Suppose $\xi\in(Q_n^\pm)^*$ is a freely irreducible word. Then the orbit of $\xi$ under the action of the group $G(D^{(n)})$ on $(Q_n^\pm)^*$ consists of all freely irreducible words following the same pattern as $\xi$. \end{proposition} \begin{proof} First we shall show that the $G(D^{(n)})$ action on $(Q_n^\pm)^*$ preserves patterns and free irreducibility of words. Let $\phi_n^\pm$ and $\psi_n^\pm$ denote the state transition and output functions of the automaton $U^{(n)}$. By $\tilde\phi_n$ and $\tilde\psi_n$ denote the state transition and output functions of its dual $D^{(n)}$. Take any $q\in Q_n^\pm$ and $x\in X$. By definition of $U^{(n)}$ we have that $\phi_n^\pm(q,x)\in Q_n$ if and only if $q\in Q_n$. Since $\phi_n^\pm(q,x)=\tilde\psi_n(x,q)$, it follows that transformations $D^{(n)}_0$ and $D^{(n)}_1$ preserve patterns of words. So does any $g\in G(D^{(n)})$. Further, let $p=\phi_n^\pm(q,x)$ and $y=\psi_n^\pm(q,x)$. Then $\phi_n^\pm(q^{-1},y)=p^{-1}$ and $\psi_n^\pm(q^{-1},y)=x$. Consequently, $D^{(n)}_x(qq^{-1})=\tilde\psi_n(x,q) \tilde\psi_n(\tilde\phi_n(x,q),q^{-1})=\phi_n^\pm(q,x) \phi_n^\pm(q^{-1},\psi_n^\pm(q,x))=pp^{-1}$. It follows that the set $P=\{qq^{-1}\mid q\in Q_n^\pm\}\subset(Q_n^\pm)^*$ is invariant under $D^{(n)}_0$ and $D^{(n)}_1$. Any freely reducible word $\xi\in(Q_n^\pm)^*$ is represented as $\xi_1\xi_0\xi_2$, where $\xi_0\in P$ and $\xi_1,\xi_2\in (Q_n^\pm)^*$. For any $x\in X$ we have $D^{(n)}_x(\xi)=D^{(n)}_x(\xi_1) D^{(n)}_{x_0}(\xi_0)D^{(n)}_{x_1}(\xi_2)$, where $x_0,x_1\in X$. By the above $D^{(n)}_x(\xi)$ is freely reducible. Thus $D^{(n)}_0$ and $D^{(n)}_1$ preserve free reducibility of words. Since these transformations are invertible, they also preserve free irreducibility, and so does any $g\in G(D^{(n)})$. Now we are going to prove that for any freely irreducible words $\xi_1,\xi_2\in(Q_n^\pm)^*$ following the same pattern $v$ there exists $g\in G(D^{(n)})$ such that $\xi_2=g(\xi_1)$. The claim is proved by induction on the length of the pattern $v$. The empty pattern is followed only by the empty word. Now let $k\ge1$ and assume that the claim holds for all patterns of length less than $k$. Take any pattern $v$ of length $k$. By Lemma \ref{series5}, the pattern $v$ is followed by a freely irreducible word $\xi\in(Q_n^\pm)^*$ such that the set $Z_n(\xi)$ is contained in an orbit of the $G(D^{(n)})$ action. Suppose $\xi_1,\xi_2\in (Q_n^\pm)^*$ are freely irreducible words following the pattern $v$. Let $\eta,\eta_1,\eta_2$ be the words obtained by deleting the last letter of $\xi,\xi_1,\xi_2$, respectively. Then $\eta,\eta_1,\eta_2$ are freely irreducible and follow the same pattern of length $k-1$. By the inductive assumption there are $g_1,g_2\in G(D^{(n)})$ such that $\eta=g_1(\eta_1)= g_2(\eta_2)$. Since the $G(D^{(n)})$ action preserves patterns and free irreducibility, it follows that $g_1(\xi_1),g_2(\xi_2)\in Z_n(\xi)$. As $Z_n(\xi)$ is contained in an orbit, there exists $g_0\in G(D^{(n)})$ such that $g_0(g_1(\xi_1))=g_2(\xi_2)$. Then $\xi_2=g(\xi_1)$, where $g=g_2^{-1}g_0g_1\in G(D^{(n)})$. \end{proof} \begin{corollary}\lambdabel{series6plus} The group defined by the dual automaton of $A^{(n)}$ acts transitively on each level of the rooted tree $Q_n^*$. \end{corollary} Corollary \ref{series6plus} follows from Proposition \ref{series6} in the same way as Corollary \ref{a5plus} follows from Proposition \ref{a5}. We omit the proof. \begin{theorem}\lambdabel{series7} The group $G(A^{(n)})$ is the free non-Abelian group on $2n+1$ generators $A^{(n)}_q$, $q\in Q_n$. \end{theorem} \begin{proof} The group $G(A^{(n)})$ is the free non-Abelian group on generators $A_q$, $q\in Q_n$ if and only if $(A^{(n)}_{q_1})^{m_1}(A^{(n)}_{q_2})^{m_2}\dots (A^{(n)}_{q_k})^{m_k}\ne1$ for any pair of sequences $q_1,\dots,q_k$ and $m_1,\dots,m_k$ such that $k>0$, $q_i\in Q_n$ and $m_i\in\mathbb{Z}\setminus\{0\}$ for $1\le i\le k$, and $q_i\ne q_{i+1}$ for $1\le i\le k-1$. Since $U^{(n)}_q=A^{(n)}_q$ and $U^{(n)}_{q^{-1}}=(A^{(n)}_q)^{-1}$ for all $q\in Q_n$, an equivalent condition is that $U^{(n)}_\xi\ne1$ for any nonempty freely irreducible word $\xi\in(Q_n^\pm)^*$. Suppose $U^{(n)}_\xi=1$ for some freely irreducible word $\xi\in (Q_n^\pm)^*$. By Corollary \ref{auto5}, $U^{(n)}_{g(\xi)}=1$ for all $g\in S(D^{(n)})$. Then Proposition \ref{auto1} imply that $U^{(n)}_{g(\xi)}=1$ for all $g\in G(D^{(n)})$. Now it follows from Proposition \ref{series6} that $U^{(n)}_\eta=1$ for any freely irreducible word $\eta\in(Q_n^\pm)^*$ following the same pattern as $\xi$. In particular, $U^{(n)}_\eta$ acts trivially on the first level of the rooted binary tree $\{0,1\}^*$. Finally, Lemma \ref{series4} implies that $\xi$ follows the empty pattern. Then $\xi$ itself is the empty word. \end{proof}
3,248
40,263
en
train
0.92.9
\section{Disjoint unions}\lambdabel{union} In this section we consider disjoint unions of Aleshin type automata. We use the notation of Sections \ref{a} and \ref{series}. Let $N$ be a nonempty set of positive integers. We denote by $A^{(N)}$ the disjoint union of automata $A^{(n)}$, $n\in N$. Then $A^{(N)}$ is an automaton over the alphabet $X=\{0,1\}$ with the set of internal states $Q_N=\bigcup_{n\in N}Q_n$. It is bi-reversible since each $A^{(n)}$ is bi-reversible. Let $I^{(N)}$ denote the disjoint union of automata $I^{(n)}$, $n\in N$. The automaton $I^{(N)}$ can be obtained from the inverse of $A^{(N)}$ by renaming each state $q\in Q_N$ to $q^{-1}$. Further, let $U^{(N)}$ denote the disjoint union of automata $A^{(N)}$ and $I^{(N)}$. Obviously, the automaton $U^{(N)}$ is the disjoint union of automata $U^{(n)}$, $n\in N$. It is defined over the alphabet $X=\{0,1\}$, with the set of internal states $Q_N^\pm=\bigcup_{n\in N}Q_n^\pm$. Clearly, $U^{(N)}_q=A^{(N)}_q$ and $U^{(N)}_{q^{-1}}=(A^{(N)}_q)^{-1}$ for all $q\in Q_N$. Let $D^{(N)}$ denote the dual automaton of the automaton $U^{(N)}$. The automaton $D^{(N)}$ is defined over the alphabet $Q_N^\pm$, with two internal states $0$ and $1$. Also, we consider an auxiliary automaton $E^{(N)}$. By definition, the automaton $E^{(N)}$ shares with $D^{(N)}$ the alphabet, the set of internal states, and the state transition function. The output function $\mu_N$ of $E^{(N)}$ is defined so that $\mu_N(0,q)=\sigma_0(q)$ and $\mu_N(1,q)=\sigma_1(q)$ for all $q\in Q_N^\pm$, where $\sigma_0=\prod_{n\in N}(a_n^{-1}b_n^{-1})$ and $\sigma_1=\prod_{n\in N} (a_nb_n)$ are permutations on the set $Q_N^\pm$. Lemmas \ref{auto7} and \ref{auto8} imply that $I^{(N)}$, $U^{(N)}$, and $D^{(N)}$ are bi-reversible automata. Further, it is easy to see that the automaton $E^{(N)}$ coincides with its inverse automaton while the reverse automaton of $E^{(N)}$ can be obtained from $E^{(N)}$ by renaming its states $0$ and $1$ to $1$ and $0$, respectively. By Lemma \ref{auto6}, $E^{(N)}$ is bi-reversible. To each permutation $\tau$ on the set $Q_N$ we assign an automorphism $\pi^{(N)}_\tau$ of the free monoid $(Q_N^\pm)^*$ such that $\pi^{(N)}_\tau(q)=\tau(q)$, $\pi^{(N)}_\tau(q^{-1})=(\tau(q))^{-1}$ for all $q\in Q_N$. The automorphism $\pi^{(N)}_\tau$ is uniquely determined by $\tau$. \begin{lemma}\lambdabel{union1} (i) $(E^{(N)}_0)^2=(E^{(N)}_1)^2=1$, $E^{(N)}_0E^{(N)}_1=E^{(N)}_1 E^{(N)}_0=\pi^{(N)}_\tau$, where $\tau=\prod_{n\in N}(a_nb_n)$; (ii) $D^{(N)}_0=\pi^{(N)}_{\tau_0}E^{(N)}_0=\pi^{(N)}_{\tau_1}E^{(N)}_1$, $D^{(N)}_1=\pi^{(N)}_{\tau_1}E^{(N)}_0=\pi^{(N)}_{\tau_0}E^{(N)}_1$, where $\tau_0=\prod_{n\in N}(a_nc_nq_{n1}\dots q_{n,2n-2})$, $\tau_1=\prod_{n\in N}(a_nb_nc_nq_{n1}\dots q_{n,2n-2})$. \end{lemma} The proof of Lemma \ref{union1} is completely analogous to the proof of Lemma \ref{series1} and we omit it. \begin{proposition}\lambdabel{union2} The group $G(D^{(N)})$ contains transformations $E^{(N)}_0$, $E^{(N)}_1$, $\pi^{(N)}_{\tau_1}$, $\pi^{(N)}_{\tau_2}$, $\pi^{(N)}_{\tau_3}$, and $\pi^{(N)}_{\tau_4}$, where $\tau_1=\prod_{n\in N}(a_nb_nc_nq_{n1}\dots q_{n,2n-2})$, $\tau_2=\prod_{n\in N}(c_nq_{n1}\dots q_{n,2n-2})$, $\tau_3= \prod_{n\in N}(a_nb_n)$, and $\tau_4=\prod_{n\in N}(b_nc_n)$. \end{proposition} \begin{proof} It is easy to see that $\pi^{(N)}_{\tau\sigma}=\pi^{(N)}_\tau\pi^{(N)}_\sigma$ for any permutations $\tau$ and $\sigma$ on the set $Q_N$. It follows that $\pi^{(N)}_{\tau^{-1}}=(\pi^{(N)}_\tau)^{-1}$ for any permutation $\tau$ on $Q_N$. By Lemma \ref{union1}, $D^{(N)}_0(D^{(N)}_1)^{-1}=\pi^{(N)}_{\tau_0} E^{(N)}_0(\pi^{(N)}_{\tau_1}E^{(N)}_0)^{-1}=\pi^{(N)}_{\tau_0} (\pi^{(N)}_{\tau_1})^{-1}$, where $\tau_0=\prod_{n\in N}(a_nc_nq_{n1}\dots q_{n,2n-2})$. Since $\tau_0\tau_1^{-1}=\tau_4$, it follows that $D^{(N)}_0 (D^{(N)}_1)^{-1}=\pi^{(N)}_{\tau_4}$. Similarly, $$ (D^{(N)}_0)^{-1}D^{(N)}_1=(\pi^{(N)}_{\tau_0}E^{(N)}_0)^{-1} \pi^{(N)}_{\tau_1}E^{(N)}_0=(E^{(N)}_0)^{-1}\pi^{(N)}_{\tau_3}E^{(N)}_0 $$ since $\tau_3=\tau_0^{-1}\tau_1$. Lemma \ref{union1} implies that $E^{(N)}_0$ and $\pi^{(N)}_{\tau_3}$ commute, hence $(D^{(N)}_0)^{-1} D^{(N)}_1=\pi^{(N)}_{\tau_3}$. Consider the permutation $\tau_5= \prod_{n\in N}(a_nc_n)$ on $Q_N$. Notice that $\tau_5=\tau_4\tau_3\tau_4$ and $\tau_2=\tau_5\tau_0$. By the above $\pi^{(N)}_{\tau_3}, \pi^{(N)}_{\tau_4}\in G(D^{(N)})$, hence $\pi^{(N)}_{\tau_5}\in G(D^{(N)})$. Then $\pi^{(N)}_{\tau_2}E^{(N)}_0=\pi^{(N)}_{\tau_5} \pi^{(N)}_{\tau_0}E^{(N)}_0=\pi^{(N)}_{\tau_5}D^{(N)}_0\in G(D^{(N)})$. Since $\tau_2(a_n)=a_n$ and $\tau_2(b_n)=b_n$ for all $n\in N$, it easily follows that transformations $\pi^{(N)}_{\tau_2}$ and $E^{(N)}_0$ commute. As $\tau_2$ is the product of commuting permutations of odd orders $2n-1$, $n\in N$, while $E^{(N)}_0$ is an involution, we have that $(\pi^{(N)}_{\tau_2}E^{(N)}_0)^m=E^{(N)}_0$, where $m=\prod_{n\in N} (2n-1)$. In particular, $E^{(N)}_0$ and $\pi^{(N)}_{\tau_2}$ are contained in $G(D^{(N)})$. Now Lemma \ref{union1} implies that $\pi^{(N)}_{\tau_1}, E^{(N)}_1\in G(D^{(N)})$. \end{proof} Every word $\xi\in(Q_N^\pm)^*$ is assigned a pattern $v$ (i.e., a word in the alphabet $\{*,*^{-1}\}$) that is obtained from $\xi$ by substituting $*$ for each occurrence of letters $q\in Q_N$ and substituting $*^{-1}$ for each occurrence of letters $q^{-1}$, $q\in Q_N$. We say that $\xi$ follows the pattern $v$. Now we introduce an alphabet $P_N^\pm$ that consists of symbols $*_n$ and $*_n^{-1}$ for all $n\in N$. A word over the alphabet $P_N^\pm$ is called a {\em marked pattern}. Every word $\xi\in(Q_N^\pm)^*$ is assigned a marked pattern $v\in(P_N^\pm)^*$ that is obtained from $\xi$ as follows. For any $n\in N$ we substitute $*_n$ for each occurrence of letters $q\in Q_n$ in $\xi$ and substitute $*_n^{-1}$ for each occurrence of letters $q^{-1}$, $q\in Q_n$. We say that $\xi$ follows the marked pattern $v$. Clearly, the pattern of $\xi$ is uniquely determined by its marked pattern. Notice that each letter of the alphabet $P_N^\pm$ corresponds to a connected component of the Moore diagram of the automaton $U^{(N)}$. Since $D^{(N)}$ is the dual automaton of $U^{(N)}$, it easily follows that the $G(D^{(N)})$ action on $(Q_N^\pm)^*$ preserves marked patterns of words. A word $\xi=q_1q_2\dots q_k\in(Q_N^\pm)^*$ is called freely irreducible if none of its two-letter subwords $q_1q_2,q_2q_3,\dots,q_{k-1}q_k$ is of the form $qq^{-1}$ or $q^{-1}q$, where $q\in Q_N$. Otherwise $\xi$ is called freely reducible. \begin{lemma}\lambdabel{union3} For any nonempty word $v\in(P_N^\pm)^*$ there exists a freely irreducible word $\xi\in(Q_N^\pm)^*$ such that $v$ is the marked pattern of $\xi$ and the transformation $U^{(N)}_\xi$ acts nontrivially on the first level of the rooted binary tree $X^*$. \end{lemma} \begin{proof} For any $n\in N$ let us substitute $a_n$ for each occurrence of $*_n$ in $v$ and $b_n^{-1}$ for each occurrence of $*_n^{-1}$. We get a nonempty word $\xi\in(Q_N^\pm)^*$ that follows the marked pattern $v$. Now let us modify $\xi$ by changing its last letter. If this letter is $a_n$ ($n\in N$), we change it to $c_n$. If the last letter of $\xi$ is $b_n^{-1}$, we change it to $c_n^{-1}$. This yields another word $\eta\in(Q_N^\pm)^*$ that follows the marked pattern $v$. By construction, $\xi$ and $\eta$ are freely irreducible. Furthermore, $U^{(N)}_\eta=A^{(n)}_{c_n} (A^{(n)}_{a_n})^{-1}U^{(N)}_\xi$ if the last letter of $v$ is $*_n$, $n\in N$ while $U^{(N)}_\eta=(A^{(n)}_{c_n})^{-1}A^{(n)}_{b_n}U^{(N)}_\xi$ if the last letter of $v$ is $*_n^{-1}$. For any $n\in N$ both $A^{(n)}_{c_n} (A^{(n)}_{a_n})^{-1}$ and $(A^{(n)}_{c_n})^{-1}A^{(n)}_{b_n}$ interchange one-letter words $0$ and $1$. It follows that one of the transformations $U^{(N)}_\xi$ and $U^{(N)}_\eta$ also acts nontrivially on the first level of the rooted tree $\{0,1\}^*$. \end{proof}
3,310
40,263
en
train
0.92.10
Every word $\xi\in(Q_N^\pm)^*$ is assigned a pattern $v$ (i.e., a word in the alphabet $\{*,*^{-1}\}$) that is obtained from $\xi$ by substituting $*$ for each occurrence of letters $q\in Q_N$ and substituting $*^{-1}$ for each occurrence of letters $q^{-1}$, $q\in Q_N$. We say that $\xi$ follows the pattern $v$. Now we introduce an alphabet $P_N^\pm$ that consists of symbols $*_n$ and $*_n^{-1}$ for all $n\in N$. A word over the alphabet $P_N^\pm$ is called a {\em marked pattern}. Every word $\xi\in(Q_N^\pm)^*$ is assigned a marked pattern $v\in(P_N^\pm)^*$ that is obtained from $\xi$ as follows. For any $n\in N$ we substitute $*_n$ for each occurrence of letters $q\in Q_n$ in $\xi$ and substitute $*_n^{-1}$ for each occurrence of letters $q^{-1}$, $q\in Q_n$. We say that $\xi$ follows the marked pattern $v$. Clearly, the pattern of $\xi$ is uniquely determined by its marked pattern. Notice that each letter of the alphabet $P_N^\pm$ corresponds to a connected component of the Moore diagram of the automaton $U^{(N)}$. Since $D^{(N)}$ is the dual automaton of $U^{(N)}$, it easily follows that the $G(D^{(N)})$ action on $(Q_N^\pm)^*$ preserves marked patterns of words. A word $\xi=q_1q_2\dots q_k\in(Q_N^\pm)^*$ is called freely irreducible if none of its two-letter subwords $q_1q_2,q_2q_3,\dots,q_{k-1}q_k$ is of the form $qq^{-1}$ or $q^{-1}q$, where $q\in Q_N$. Otherwise $\xi$ is called freely reducible. \begin{lemma}\lambdabel{union3} For any nonempty word $v\in(P_N^\pm)^*$ there exists a freely irreducible word $\xi\in(Q_N^\pm)^*$ such that $v$ is the marked pattern of $\xi$ and the transformation $U^{(N)}_\xi$ acts nontrivially on the first level of the rooted binary tree $X^*$. \end{lemma} \begin{proof} For any $n\in N$ let us substitute $a_n$ for each occurrence of $*_n$ in $v$ and $b_n^{-1}$ for each occurrence of $*_n^{-1}$. We get a nonempty word $\xi\in(Q_N^\pm)^*$ that follows the marked pattern $v$. Now let us modify $\xi$ by changing its last letter. If this letter is $a_n$ ($n\in N$), we change it to $c_n$. If the last letter of $\xi$ is $b_n^{-1}$, we change it to $c_n^{-1}$. This yields another word $\eta\in(Q_N^\pm)^*$ that follows the marked pattern $v$. By construction, $\xi$ and $\eta$ are freely irreducible. Furthermore, $U^{(N)}_\eta=A^{(n)}_{c_n} (A^{(n)}_{a_n})^{-1}U^{(N)}_\xi$ if the last letter of $v$ is $*_n$, $n\in N$ while $U^{(N)}_\eta=(A^{(n)}_{c_n})^{-1}A^{(n)}_{b_n}U^{(N)}_\xi$ if the last letter of $v$ is $*_n^{-1}$. For any $n\in N$ both $A^{(n)}_{c_n} (A^{(n)}_{a_n})^{-1}$ and $(A^{(n)}_{c_n})^{-1}A^{(n)}_{b_n}$ interchange one-letter words $0$ and $1$. It follows that one of the transformations $U^{(N)}_\xi$ and $U^{(N)}_\eta$ also acts nontrivially on the first level of the rooted tree $\{0,1\}^*$. \end{proof} Given a nonempty, freely irreducible word $\xi\in(Q_N^\pm)^*$, let $Z_N(\xi)$ denote the set of all freely irreducible words in $(Q_N^\pm)^*$ that follow the same marked pattern as $\xi$ and match $\xi$ completely or except for the last letter. Obviously, $\xi\in Z_N(\xi)$, and $\eta\in Z_N(\xi)$ if and only if $\xi\in Z_N(\eta)$. \begin{lemma}\lambdabel{union4} For any nonempty word $v\in(P_N^\pm)^*$ there exists a freely irreducible word $\xi\in(Q_N^\pm)^*$ such that $v$ is the marked pattern of $\xi$ and the set $Z_N(\xi)$ is contained in one orbit of the $G(D^{(N)})$ action on $(Q_N^\pm)^*$. \end{lemma} \begin{proof} Let $\widetilde Q_N^\pm=\bigcup_{n\in N}\{a_n,b_n,c_n,a_n^{-1},b_n^{-1},c_n^{-1}\}$. The set $(\widetilde Q_N^\pm)^*$ of words in the alphabet $\widetilde Q_N^\pm$ is a submonoid of $(Q_N^\pm)^*$. Let $h_N:(\widetilde Q_N^\pm)^*\to(Q^\pm)^*$ be the homomorphism of monoids such that $h_N(a_n)=a$, $h_N(b_n)=b$, $h_N(c_n)=c$, $h_N(a_n^{-1})=a^{-1}$, $h_N(b_n^{-1})=b^{-1}$, $h_N(c_n^{-1})=c^{-1}$ for all $n\in N$. For any $\zeta\in(\widetilde Q_N^\pm)^*$ the word $h_N(\zeta)$ follows the same pattern as $\zeta$. The word $\zeta$ is uniquely determined by $h_N(\zeta)$ and the marked pattern of $\zeta$. If $h_N(\zeta)$ is freely irreducible then so is $\zeta$ (however $h_N(\zeta)$ can be freely reducible even if $\zeta$ is freely irreducible). It is easy to see that $E_0(h_N(\zeta))=h_N(E^{(N)}_0(\zeta))$, $\pi_{(ab)}(h_N(\zeta))=h_N(\pi^{(N)}_{\sigma_1}(\zeta))$, and $\pi_{(bc)}(h_N(\zeta))=h_N(\pi^{(N)}_{\sigma_2}(\zeta))$, where $\sigma_1= \prod_{n\in N}(a_nb_n)$ and $\sigma_2=\prod_{n\in N}(b_nc_n)$ are permutations on $Q_N$. By Proposition \ref{a2}, the group $G(D)$ is generated by $E_0$, $\pi_{(ab)}$, and $\pi_{(bc)}$. On the other hand, $E^{(N)}_0, \pi^{(N)}_{\sigma_1},\pi^{(N)}_{\sigma_2}\in G(D^{(N)})$ due to Proposition \ref{union2}. Let $\widetilde G$ denote the subgroup of $G(D^{(N)})$ generated by $E^{(N)}_0$, $\pi^{(N)}_{\sigma_1}$, and $\pi^{(N)}_{\sigma_2}$. It follows that for any $g_0\in G(D)$ there exists $g\in\widetilde G$ such that $g_0(h_N(\zeta))= h_N(g(\zeta))$ for all $\zeta\in(\widetilde Q_N^\pm)^*$. Now Proposition \ref{a5} implies that words $\zeta_1,\zeta_2\in(\widetilde Q_N^\pm)^*$ are in the same orbit of the $G(D^{(N)})$ action on $(Q_N^\pm)^*$ whenever they follow the same marked pattern and the words $h_N(\zeta_1)$, $h_N(\zeta_2)$ are freely irreducible. Given a nonempty marked pattern $v\in(P_N^\pm)^*$, let $v_0$ be the word obtained by deleting the last letter of $v$. For any $n\in N$ we substitute $a_n$ for each occurrence of $*_n$ in $v_0$ and $b_n^{-1}$ for each occurrence of $*_n^{-1}$. This yields a word $\eta\in(Q_N^\pm)^*$ that follows the marked pattern $v_0$. Now let $\xi=\eta c_n$ if the last letter of $v$ is $*_n$, $n\in N$ and let $\xi=\eta c_n^{-1}$ if the last letter of $v$ is $*_n^{-1}$. Clearly, $\xi$ is a freely irreducible word following the marked pattern $v$. Moreover, $\xi\in(\widetilde Q_N^\pm)^*$ and the word $h_N(\xi)$ is also freely irreducible. We shall show that the set $Z_N(\xi)$ is contained in the orbit of $\xi$ under the $G(D^{(N)})$ action on $(Q_N^\pm)^*$. Take any $\zeta\in Z_N(\xi)$. If $\zeta$ is a word over the alphabet $\widetilde Q_N^\pm$ and $h_N(\zeta)$ is freely irreducible, then it follows from the above that $\zeta=g(\xi)$ for some $g\in\widetilde G\subset G(D^{(N)})$. On the other hand, suppose that the last letter of $\zeta$ is $q_{ni}$ or $q_{ni}^{-1}$, where $n\in N$, $1\le i\le 2n-2$. In this case we have $\zeta= (\pi^{(N)}_\tau)^i(\xi)$, where $\tau=\prod_{n\in N}(c_nq_{n1}\dots q_{n,2n-2})$. By Proposition \ref{union2}, $\pi^{(N)}_\tau\in G(D^{(n)})$. It remains to consider the case when the last letter of $\zeta$ belongs to $\widetilde Q_N^\pm$ but the word $h_N(\zeta)$ is freely reducible. There is at most one $\zeta\in Z_N(\xi)$ with such properties. It exists if the last two letters of $v$ are of the form $*_l*_m^{-1}$ or $*_l^{-1}*_m$, where $l,m\in N$, $l\ne m$. Assume this is the case. Then the last letter of the word $\eta$ is either $a_l$ or $b_l^{-1}$. Let us change this letter to $c_l$ or $c_l^{-1}$, respectively. The resulting word $\eta_1$ follows the marked pattern $v_0$. Also, the words $h_N(\eta)$ and $h_N(\eta_1)$ are freely irreducible. By Proposition \ref{a5}, $h_N(\eta_1)= g_1(h_N(\eta))$ for some $g_1\in G(D)$. There exists a unique $\zeta_1\in (\widetilde Q_N^\pm)^*$ such that $h_N(\zeta_1)=g_1(h_N(\zeta))$ and $v$ is the marked pattern of $\zeta_1$. By the above there exists $\tilde g_1\in\widetilde G$ such that $\tilde g_1(\eta)=\eta_1$ and $\tilde g_1(\zeta)=\zeta_1$. Since the word $h_N(\zeta)$ is freely reducible, so is $h_N(\zeta_1)$. On the other hand, the word $h_N(\eta_1)$, which can be obtained by deleting the last letter of $h_N(\zeta_1)$, is freely irreducible. It follows that the last two letters of $h_N(\zeta_1)$ are $cc^{-1}$ or $c^{-1}c$. Then the last two letters of $\zeta_1$ are $c_lc_m^{-1}$ or $c_l^{-1}c_m$. If $2m-1$ does not divide $2l-1$ then the word $(\pi^{(N)}_\tau)^{2l-1}(\zeta_1)$ matches $\zeta_1$ except for the last letter. Consequently, the word $\zeta'=\tilde g_1^{-1} (\pi^{(N)}_\tau)^{2l-1}\tilde g_1(\zeta)$ matches $\zeta$ except for the last letter. Since the $G(D^{(N)})$ action preserves marked patterns, the word $\zeta'$ follows the marked pattern $v$. Hence $\zeta'\in Z_N(\xi)$. As $\zeta'\ne\zeta$, it follows from the above that $\zeta'=g(\xi)$ for some $g\in G(D^{(N)})$. Then $\zeta=g_0(\xi)$, where $g_0=\tilde g_1^{-1} (\pi^{(N)}_\tau)^{1-2l}\tilde g_1g\in G(D^{(N)})$. Now suppose that $2m-1$ divides $2l-1$. Then $(\pi^{(N)}_\tau)^{2l-1} (\zeta_1)=\zeta_1$ and the above argument does not apply. Recall that the last two letters of $h_N(\zeta_1)$ are $cc^{-1}$ or $c^{-1}c$. If these letters are preceded by $b^{-1}$, we let $\zeta_2=\pi^{(N)}_{\sigma_1} (\zeta_1)$. Otherwise they are preceded by $a$ or $h_N(\zeta_1)$ has length $2$. In this case, we let $\zeta_2=\zeta_1$. Further, consider the permutation $\tau_1=\tau^{2m-1}\sigma_2\tau^{-(2m-1)}\sigma_2\tau^{2m-1}$ on $Q_N$. Since $\pi^{(N)}_\tau,\pi^{(N)}_{\sigma_2}\in G(D^{(n)})$, we have that $\pi^{(N)}_{\tau_1}=(\pi^{(N)}_\tau)^{2m-1}\pi^{(N)}_{\sigma_2} (\pi^{(N)}_\tau)^{1-2m}\pi^{(N)}_{\sigma_2}(\pi^{(N)}_\tau)^{2m-1}\in G(D^{(N)})$. It is easy to see that $\tau_1(c_m)=c_m$ and $\tau_1(a_n)= a_n$ for all $n\in N$. Since $2m-1<2l-1$, we have $\tau_1(c_l)=b_l$. Also, for any $n\in N$ we have $\tau_1(b_n)=b_n$ if $2n-1$ divides $2m-1$ and $\tau_1(b_n)=c_n$ otherwise. It follows that $\zeta_3= \pi^{(N)}_{\tau_1}(\zeta_2)$ is a word in the alphabet $\widetilde Q_N^\pm$ such that $h_N(\zeta_3)$ is freely irreducible. Since $\zeta_3$ follows the marked pattern $v$, we obtain that $\zeta_3$ belongs to the orbit of $\xi$ under the $G(D^{(N)})$ action. So does the word $\zeta$. \end{proof}
3,913
40,263
en
train
0.92.11
\begin{proposition}\lambdabel{union5} Suppose $\xi\in(Q_N^\pm)^*$ is a freely irreducible word. Then the orbit of $\xi$ under the action of the group $G(D^{(N)})$ on $(Q_N^\pm)^*$ consists of all freely irreducible words following the same marked pattern as $\xi$. \end{proposition} \begin{theorem}\lambdabel{union6} The group $G(A^{(N)})$ is the free non-Abelian group on generators $A^{(N)}_q$, $q\in Q_N$. \end{theorem} Proposition \ref{union5} is derived from Lemma \ref{union4} in the same way as Proposition \ref{series6} was derived from Lemma \ref{series5}. Then Theorem \ref{union6} is derived from Proposition \ref{union5} and Lemma \ref{union3} in the same way as Theorem \ref{series7} was derived from Proposition \ref{series6} and Lemma \ref{series4}. We omit both proofs.
270
40,263
en
train
0.92.12
\section{The Bellaterra automaton and its series}\lambdabel{b} In this section we consider the Bellaterra automaton, a series of automata of Bellaterra type, and their disjoint unions. We use the notation of Sections \ref{a}, \ref{series}, and \ref{union}. The Bellaterra automaton $B$ is an automaton over the alphabet $X=\{0,1\}$ with the set of internal states $Q=\{a,b,c\}$. The state transition function $\widehat\phi$ and the output function $\widehat\psi$ of $B$ are defined as follows: $\widehat\phi(a,0)=\widehat\phi(b,1)=c$, $\widehat\phi(a,1)=\widehat\phi(b,0)=b$, $\widehat\phi(c,0)= \widehat\phi(c,1)=a$; $\widehat\psi(a,0)=\widehat\psi(b,0)=\widehat\psi(c,1)=0$, $\widehat\psi(a,1)= \widehat\psi(b,1)=\widehat\psi(c,0)=1$. The Moore diagram of $B$ is depicted in Figure \ref{fig2}. It is easy to verify that the inverse automaton of $B$ coincides with $B$. Besides, the reverse automaton of $B$ can be obtained from $B$ by renaming its states $a$ and $c$ to $c$ and $a$, respectively. Lemma \ref{auto6} implies that $B$ is bi-reversible. The Bellaterra automaton $B$ is closely related to the Aleshin automaton $A$. Namely, the two automata share the alphabet, the set of internal states, and the state transition function. On the other hand, the output function $\widehat\psi$ of $B$ never coincides with the output function $\psi$ of $A$, that is, $\widehat\psi(q,x)\ne\psi(q,x)$ for all $q\in Q$ and $x\in X$. For any integer $n\ge1$ we define a Bellaterra type automaton $B^{(n)}$ as the automaton that is related to the Aleshin type automaton $A^{(n)}$ in the same way as the automaton $B$ is related to $A$. To be precise, $B^{(n)}$ is an automaton over the alphabet $X=\{0,1\}$ with the set of states $Q_n$. The state transition function of $B^{(n)}$ coincides with that of $A^{(n)}$. The output function $\widehat\psi_n$ of $B^{(n)}$ is defined so that for any $x\in X$ we have $\widehat\psi_n(q,x)=x$ if $q\in\{a_n,b_n\}$ and $\widehat\psi_n(q,x)=1-x$ if $q\in Q_n\setminus\{a_n,b_n\}$. Then $\widehat\psi_n(q,x)= 1-\psi_n(q,x)$ for all $q\in Q_n$ and $x\in X$, where $\psi_n$ is the output function of $A^{(n)}$. Note that the automaton $B^{(1)}$ coincides with $B$ up to renaming of the internal states. In addition, we define a Bellaterra type automaton $B^{(0)}$. This is an automaton over the alphabet $X$ with the set of internal states $Q_0$ consisting of a single element $c_0$. The state transition function $\widehat\phi_0$ and the output function $\widehat\psi_0$ of $B^{(0)}$ are defined as follows: $\widehat\phi_0(c_0,0)=\widehat\phi_0(c_0,1)=c_0$; $\widehat\psi_0(c_0,0)=1$, $\widehat\psi_0(c_0,1)=0$. It is easy to see that each Bellaterra type automaton $B^{(n)}$ coincides with its inverse automaton. The reverse automaton of $B^{(0)}$ coincides with $B^{(0)}$ as well. In the case $n\ge1$, the reverse automaton of $B^{(n)}$ can be obtained from $B^{(n)}$ by renaming its states $c_n,q_{n1},\dots,q_{n,2n-2},a_n$ to $a_n,q_{n,2n-2},\dots,q_{n1},c_n$, respectively. Lemma \ref{auto6} implies that each $B^{(n)}$ is bi-reversible. \begin{figure} \caption{ \lambdabel{fig7} \end{figure} Let $N$ be a nonempty set of nonnegative integers. We denote by $B^{(N)}$ the disjoint union of automata $B^{(n)}$, $n\in N$. Then $B^{(N)}$ is an automaton over the alphabet $X=\{0,1\}$ with the set of internal states $Q_N=\bigcup_{n\in N}Q_n$. It is bi-reversible since each $B^{(n)}$ is bi-reversible. If $0\notin N$, then the automaton $B^{(N)}$ shares its alphabet, its internal states, and its state transition function with the automaton $A^{(N)}$ while the output functions of these automata never coincide. The relation between automata of Aleshin type and of Bellaterra type induces a relation between transformations defined by automata of these two types. \begin{lemma}\lambdabel{b1} Let $h=B^{(0)}_{c_0}$. Then (i) $A_q=hB_q$ and $B_q=hA_q$ for any $q\in\{a,b,c\}$; (ii) $A^{(n)}_q=hB^{(n)}_q$ and $B^{(n)}_q=hA^{(n)}_q$ for any $n\ge1$ and $q\in Q_n$; (iii) $A^{(N)}_q=hB^{(N)}_q$ and $B^{(N)}_q=hA^{(N)}_q$ for any nonempty set $N$ of positive integers and any $q\in Q_N$. \end{lemma} \begin{proof} The transformation $h$ is the automorphism of the free monoid $\{0,1\}^*$ that interchanges the free generators $0$ and $1$. For any $w\in X^*$ the word $h(w)$ can be obtained from $w$ by changing all letters $0$ to $1$ and all letters $1$ to $0$. Suppose $\widetilde A$ and $\widetilde B$ are two automata over the alphabet $X$ such that their sets of internal states and state transition functions are the same but their output functions never coincide. It is easy to see that $\widetilde A_q=h\widetilde B_q$ and $\widetilde B_q= h\widetilde A_q$ for any internal state $q$ of the automata $\widetilde A$ and $\widetilde B$. The lemma follows. \end{proof} \begin{proposition}\lambdabel{b2} (i) The group $G(A)$ is an index $2$ subgroup of $G(B^{(\{0,1\})})$; (ii) for any $n\ge1$ the group $G(A^{(n)})$ is an index $2$ subgroup of $G(B^{(\{0,n\})})$; (iii) for any nonempty set $N$ of positive integers the group $G(A^{(N)})$ is an index $2$ subgroup of $G(B^{(N\cup\{0\})})$. \end{proposition} \begin{proof} Note that the statement (i) is a particular case of the statement (ii) as $G(A)=G(A^{(1)})$. Furthermore, the statement (ii) is a particular case of the statement (iii) since $A^{(n)}=A^{(\{n\})}$ for any integer $n\ge1$. Suppose $N$ is a nonempty set of positive integers. The group $G(A^{(N)})$ is generated by transformations $A^{(N)}_q$, $q\in Q_N$. The group $G(B^{(N\cup\{0\})})$ is generated by transformations $h=B^{(0)}_{c_0}$ and $B^{(N)}_q$, $q\in Q_N$. By Lemma \ref{b1}, $A^{(N)}_q=hB^{(N)}_q$ and $B^{(N)}_q=hA^{(N)}_q$ for any $q\in Q_N$. It follows that the group $G(B^{(N\cup\{0\})})$ is generated by transformations $h$ and $A^{(N)}_q$, $q\in Q_N$. In particular, $G(A^{(N)})\subset G(B^{(N\cup\{0\})})$. For any $n\ge0$ the automaton $B^{(n)}$ coincides with its inverse. Lemma \ref{auto2} implies that $h^2=1$ and $(B^{(N)}_q)^2=1$, $q\in Q_N$. Then $hA^{(N)}_qh^{-1}=B^{(N)}_qh=(A^{(N)}_q)^{-1}$ for any $q\in Q_N$. It follows that $G(A^{(N)})$ is a normal subgroup of $G(B^{(N\cup\{0\})})$. Since $h^2=1$, the index of the group $G(A^{(N)})$ in $G(B^{(N\cup\{0\})})$ is at most $2$. On the other hand, $G(A^{(N)})\ne G(B^{(N\cup\{0\})})$ as $G(B^{(N\cup\{0\})})$ contains a nontrivial involution $h$ while $G(A^{(N)})$ is a free group due to Theorem \ref{union6}. Thus $G(A^{(N)})$ is an index $2$ subgroup of $G(B^{(N\cup\{0\})})$. \end{proof} The relation between groups defined by automata of Aleshin type and of Bellaterra type allows us to establish the structure of the groups defined by automata of the latter type. As the following two theorems show, these groups are free products of groups of order $2$. \begin{theorem}[\cite{N}]\lambdabel{b3} The group $G(B)$ is freely generated by involutions $B_a$, $B_b$, $B_c$. \end{theorem} \begin{theorem}\lambdabel{b4} (i) For any $n\ge1$ the group $G(B^{(n)})$ is freely generated by $2n+1$ involutions $B^{(n)}_q$, $q\in Q_n$; (ii) for any nonempty set $N$ of nonnegative integers the group $G(B^{(N)})$ is freely generated by involutions $B^{(N)}_q$, $q\in Q_N$. \end{theorem} To prove Theorems \ref{b3} and \ref{b4}, we need the following lemma. \begin{lemma}\lambdabel{b5} Suppose that a group $G$ is generated by elements $g_0,g_1,\dots,g_k$ ($k\ge1$) of order at most $2$. Let $H$ be the subgroup of $G$ generated by elements $h_i=g_0g_i$, $1\le i\le k$. Then $G$ is freely generated by $k+1$ involutions $g_0,g_1,\dots,g_k$ if and only if $H$ is the free group on $k$ generators $h_1,\dots,h_k$. \end{lemma} \begin{proof} Consider an element $h=h_{i_1}^{\varepsilon_1}h_{i_2}^{\varepsilon_2}\dots h_{i_l}^{\varepsilon_l}$, where $l\ge1$, $1\le i_j\le k$, $\varepsilon_j\in\{-1,1\}$, and $\varepsilon_j=\varepsilon_{j+1}$ whenever $i_j=i_{j+1}$. Since $h_i=g_0g_i$ and $h_i^{-1}=g_ig_0$ for $1\le i\le k$, and $g_0^2=1$, we obtain that $h=g'_0g_{i_1}g'_1\dots g_{i_l}g'_l$, where each $g'_j$ is equal to $g_0$ or $1$. Moreover, $g'_j=g_0$ whenever $\varepsilon_j=\varepsilon_{j+1}$. In particular, $h\ne1$ if $G$ is freely generated by involutions $g_0,g_1,\dots,g_k$. It follows that $H$ is the free group on generators $h_1,\dots,h_k$ if $G$ is freely generated by involutions $g_0,g_1,\dots,g_k$. Now assume that $H$ is the free group on generators $h_1,\dots,h_k$. Then each $h_i$ has infinite order. Since $h_i=g_0g_i$ and $g_0^2=g_i^2=1$, it follows that $g_0\ne1$ and $g_i\ne1$. Hence each of the elements $g_0,g_1,\dots,g_k$ has order $2$. In particular, none of these elements belongs to the free group $H$. The group $G$ is freely generated by involutions $g_0,g_1,\dots,g_k$ if $g\ne1$ for any $g=g_{i_1}\dots g_{i_l}$ such that $l\ge1$, $0\le i_j\le k$, and $i_j\ne i_{j+1}$. First consider the case when $l$ is even. Note that $g_ig_j=h_i^{-1}h_j$ for $0\le i,j\le n$, where by definition $h_0=1$. Therefore $g=h_{i_1}^{-1}h_{i_2}\dots h_{i_{l-1}}^{-1}h_{i_l}\in H$. Since $h_0=1$, the sequence $h_{i_1}^{-1},h_{i_2},\dots,h_{i_{l-1}}^{-1},h_{i_l}$ can contain the unit elements. After removing all of them, we obtain a nonempty sequence in which neighboring elements are not inverses of each other. Since $h_1,\dots,h_k$ are free generators, we conclude that $g\ne1$. In the case when $l$ is odd, it follows from the above that $g=g_{i_1}h$, where $h\in H$. Since $g_{i_1}\notin H$, we have that $g\notin H$, in particular, $g\ne1$. \end{proof}
3,772
40,263
en
train
0.92.13
The relation between groups defined by automata of Aleshin type and of Bellaterra type allows us to establish the structure of the groups defined by automata of the latter type. As the following two theorems show, these groups are free products of groups of order $2$. \begin{theorem}[\cite{N}]\lambdabel{b3} The group $G(B)$ is freely generated by involutions $B_a$, $B_b$, $B_c$. \end{theorem} \begin{theorem}\lambdabel{b4} (i) For any $n\ge1$ the group $G(B^{(n)})$ is freely generated by $2n+1$ involutions $B^{(n)}_q$, $q\in Q_n$; (ii) for any nonempty set $N$ of nonnegative integers the group $G(B^{(N)})$ is freely generated by involutions $B^{(N)}_q$, $q\in Q_N$. \end{theorem} To prove Theorems \ref{b3} and \ref{b4}, we need the following lemma. \begin{lemma}\lambdabel{b5} Suppose that a group $G$ is generated by elements $g_0,g_1,\dots,g_k$ ($k\ge1$) of order at most $2$. Let $H$ be the subgroup of $G$ generated by elements $h_i=g_0g_i$, $1\le i\le k$. Then $G$ is freely generated by $k+1$ involutions $g_0,g_1,\dots,g_k$ if and only if $H$ is the free group on $k$ generators $h_1,\dots,h_k$. \end{lemma} \begin{proof} Consider an element $h=h_{i_1}^{\varepsilon_1}h_{i_2}^{\varepsilon_2}\dots h_{i_l}^{\varepsilon_l}$, where $l\ge1$, $1\le i_j\le k$, $\varepsilon_j\in\{-1,1\}$, and $\varepsilon_j=\varepsilon_{j+1}$ whenever $i_j=i_{j+1}$. Since $h_i=g_0g_i$ and $h_i^{-1}=g_ig_0$ for $1\le i\le k$, and $g_0^2=1$, we obtain that $h=g'_0g_{i_1}g'_1\dots g_{i_l}g'_l$, where each $g'_j$ is equal to $g_0$ or $1$. Moreover, $g'_j=g_0$ whenever $\varepsilon_j=\varepsilon_{j+1}$. In particular, $h\ne1$ if $G$ is freely generated by involutions $g_0,g_1,\dots,g_k$. It follows that $H$ is the free group on generators $h_1,\dots,h_k$ if $G$ is freely generated by involutions $g_0,g_1,\dots,g_k$. Now assume that $H$ is the free group on generators $h_1,\dots,h_k$. Then each $h_i$ has infinite order. Since $h_i=g_0g_i$ and $g_0^2=g_i^2=1$, it follows that $g_0\ne1$ and $g_i\ne1$. Hence each of the elements $g_0,g_1,\dots,g_k$ has order $2$. In particular, none of these elements belongs to the free group $H$. The group $G$ is freely generated by involutions $g_0,g_1,\dots,g_k$ if $g\ne1$ for any $g=g_{i_1}\dots g_{i_l}$ such that $l\ge1$, $0\le i_j\le k$, and $i_j\ne i_{j+1}$. First consider the case when $l$ is even. Note that $g_ig_j=h_i^{-1}h_j$ for $0\le i,j\le n$, where by definition $h_0=1$. Therefore $g=h_{i_1}^{-1}h_{i_2}\dots h_{i_{l-1}}^{-1}h_{i_l}\in H$. Since $h_0=1$, the sequence $h_{i_1}^{-1},h_{i_2},\dots,h_{i_{l-1}}^{-1},h_{i_l}$ can contain the unit elements. After removing all of them, we obtain a nonempty sequence in which neighboring elements are not inverses of each other. Since $h_1,\dots,h_k$ are free generators, we conclude that $g\ne1$. In the case when $l$ is odd, it follows from the above that $g=g_{i_1}h$, where $h\in H$. Since $g_{i_1}\notin H$, we have that $g\notin H$, in particular, $g\ne1$. \end{proof} \begin{proofof}{Theorems \ref{b3} and \ref{b4}} First we observe that Theorem \ref{b3} is a particular case of Theorem \ref{b4} since the automata $B$ and $B^{(1)}$ coincide up to renaming of their internal states. Further, the statement (i) of Theorem \ref{b4} is a particular case of the statement (ii) since $B^{(n)}=B^{(\{n\})}$ for any $n\ge1$. Suppose $N$ is a nonempty set of nonnegative integers such that $0\in N$. For any $n\in N$ the automaton $B^{(n)}$ coincides with its inverse. Lemma \ref{auto2} implies that $(B^{(N)}_q)^2=1$ for all $q\in Q_N$. If $N=\{0\}$ then $Q_N=\{c_0\}$ and $G(B^{(N)})$ is a group of order $2$ generated by the involution $h=B^{(0)}_{c_0}$. Now assume that $N\ne \{0\}$. Then $K=N\setminus\{0\}$ is a nonempty set of positive integers. The group $G(B^{(N)})$ is generated by transformations $h$ and $B^{(K)}_q$, $q\in Q_K$. All generators are of order at most $2$. The group $G(A^{(K)})$ is the free group on generators $A^{(K)}_q$, $q\in Q_K$ due to Theorem \ref{union6}. By Lemma \ref{b1}, $A^{(K)}_q=hB^{(K)}_q$ for any $q\in Q_K$. Then Lemma \ref{b5} implies that $G(B^{(N)})$ is freely generated by involutions $h$ and $B^{(K)}_q$, $q\in Q_K$. Now consider the case when $N$ is a nonempty set of positive integers. By the above the group $G(B^{(N\cup\{0\})})$ is freely generated by involutions $h$ and $B^{(N)}_q$, $q\in Q_N$. Clearly, this implies that the group $G(B^{(N)})$ is freely generated by involutions $B^{(N)}_q$, $q\in Q_N$. \end{proofof} Now we shall establish a relation between transformation groups defined by the Aleshin type and the Bellaterra type automata with the same set of internal states. Since $G(A)$ is the free group on generators $A_a$, $A_b$, $A_c$, there is a unique homomorphism $\Delta:G(A)\to G(B)$ such that $\Delta(A_a)=B_a$, $\Delta(A_b)=B_b$, $\Delta(A_c)=B_c$. Likewise, for any $n\ge1$ there is a unique homomorphism $\Delta_n:G(A^{(n)})\to G(B^{(n)})$ such that $\Delta_n(A^{(n)}_q)=B^{(n)}_q$ for all $q\in Q_n$. Also, for any nonempty set $N$ of positive integers there is a unique homomorphism $\Delta_N: G(A^{(N)})\to G(B^{(N)})$ such that $\Delta_N(A^{(N)}_q)=B^{(N)}_q$ for all $q\in Q_N$. \begin{proposition}\lambdabel{b6} (i) $G(A)\cap G(B)=\{g\in G(A)\mid \Delta(g)=g\}$; (ii) $G(A)\cap G(B)$ is the free group on generators $B_aB_b$ and $B_aB_c$; (iii) $G(A)\cap G(B)$ is an index $2$ subgroup of $G(B)$; (iv) $A_p^{-1}A_q=B_pB_q$ for all $p,q\in\{a,b,c\}$. \end{proposition} \begin{proof} Let $h=B^{(0)}_{c_0}$. By Lemma \ref{b1}, $A_q=hB_q$ for all $q\in \{a,b,c\}$. Since the inverse automaton of $B$ coincides with $B$, Lemma \ref{auto2} implies that $B_a^2=B_b^2=B_c^2=1$. Then for any $p,q\in \{a,b,c\}$ we have $A_p^{-1}A_q=(hB_p)^{-1}hB_q=B_p^{-1}B_q=B_pB_q$. It is easy to see that $\{g\in G(A)\mid \Delta(g)=g\}$ is a subgroup of $G(A)\cap G(B)$. Let $\widetilde G$ be the group generated by transformations $B_aB_b$ and $B_aB_c$. By the above $\Delta(A_a^{-1}A_b)=B_a^{-1}B_b= B_aB_b=A_a^{-1}A_b$ and $\Delta(A_a^{-1}A_c)=B_a^{-1}B_c=B_aB_c= A_a^{-1}A_c$. It follows that $\widetilde G$ is a subgroup of $\{g\in G(A)\mid \Delta(g)=g\}$. By Theorem \ref{b3}, the group $G(B)$ is freely generated by involutions $B_a$, $B_b$, $B_c$. Then Lemma \ref{b5} implies that $\widetilde G$ is the free group on generators $B_aB_b$ and $B_aB_c$. Note that $B_aB_q\in\widetilde G$ for all $q\in Q$. Then for any $p,q\in Q$ we have $B_pB_q=(B_aB_p)^{-1} B_aB_q\in\widetilde G$. It follows that for any $g\in G(B)$ at least one of the transformations $g$ and $B_ag$ belongs to $\widetilde G$. Therefore the index of $\widetilde G$ in $G(B)$ is at most $2$. Note that $B_a\notin G(A)$ as $B_a$ is a nontrivial involution while $G(A)$ is a free group. Hence $G(A)\cap G(B)\ne G(B)$. Now it follows from the above that $\widetilde G=\{g\in G(A)\mid \Delta(g)=g\}=G(A)\cap G(B)$ and this is an index $2$ subgroup of $G(B)$. \end{proof}
2,948
40,263
en
train
0.92.14
\begin{proposition}\lambdabel{b7} Let $n$ be a positive integer. Then (i) $G(A^{(n)})\cap G(B^{(n)})=\{g\in G(A^{(n)})\mid \Delta_n(g)=g\}$; (ii) $G(A^{(n)})\cap G(B^{(n)})$ is the free group on $2n$ generators $B^{(n)}_{a_n}B^{(n)}_q$, $q\in Q_n\setminus\{a_n\}$; (iii) $G(A^{(n)})\cap G(B^{(n)})$ is an index $2$ subgroup of $G(B^{(n)})$; (iv) $(A^{(n)}_p)^{-1}A^{(n)}_q=B^{(n)}_pB^{(n)}_q$ for all $p,q\in Q_n$. \end{proposition} \begin{proposition}\lambdabel{b8} Let $N$ be a nonempty set of positive integers. Then (i) $G(A^{(N)})\cap G(B^{(N)})=\{g\in G(A^{(N)})\mid \Delta_N(g)=g\}$; (ii) for any $n\in N$ the group $G(A^{(N)})\cap G(B^{(N)})$ is the free group on generators $B^{(N)}_{a_n}B^{(N)}_q$, $q\in Q_N\setminus\{a_n\}$; (iii) $G(A^{(N)})\cap G(B^{(N)})$ is an index $2$ subgroup of $G(B^{(N)})$; (iv) $(A^{(N)}_p)^{-1}A^{(N)}_q=B^{(N)}_pB^{(N)}_q$ for all $p,q\in Q_N$. \end{proposition} The proofs of Propositions \ref{b7} and \ref{b8} are completely analogous to the proof of Proposition \ref{b6} and we omit them. Now let us consider the dual automata of the Bellaterra automaton and automata of Bellaterra type. Let $\widehat D$ denote the dual automaton of the Bellaterra automaton $B$. The automaton $\widehat D$ is defined over the alphabet $Q=\{a,b,c\}$, with two internal states $0$ and $1$. The Moore diagram of $\widehat D$ is depicted in Figure \ref{fig8}. The automaton $\widehat D$ is bi-reversible since $B$ is bi-reversible. \begin{figure} \caption{ \lambdabel{fig8} \end{figure} A word $\xi$ over an arbitrary alphabet is called a {\em double letter word\/} if there are two adjacent letters in $\xi$ that coincide. Otherwise we call $\xi$ a {\em no-double-letter word}. The set of no-double-letter words over the alphabet $Q$ forms a subtree of the rooted ternary tree $Q^*$. As an unrooted tree, this subtree is $3$-regular. However it is not regular as a rooted tree. The following proposition shows that the group $G(\widehat D)$ acts transitively on each level of the subtree. \begin{proposition}[\cite{N}]\lambdabel{b9} Suppose $\xi\in Q^*$ is a no-double-letter word. Then the orbit of $\xi$ under the action of the group $G(\widehat D)$ on $Q^*$ consists of all no-double-letter words of the same length as $\xi$. \end{proposition} \begin{proof} Let $\lambda$ and $\mu$ denote the state transition and output functions of the automaton $B$. By $\tilde\lambda$ and $\tilde\mu$ denote the state transition and output functions of its dual $\widehat D$. Take any $q\in Q$ and $x\in X$. Let $p=\lambda(q,x)$ and $y=\mu(q,x)$. Since $B$ coincides with its inverse automaton, it follows that $p=\lambda(q,y)$. Consequently, $\widehat D_x(qq)= \tilde\mu(x,q)\tilde\mu(\tilde\lambda(x,q),q)=\lambda(q,x)\lambda(q,\mu(q,x))=pp$. It follows that the set $P=\{qq\mid q\in Q\}\subset Q^*$ is invariant under $\widehat D_0$ and $\widehat D_1$. Any double letter word $\xi\in Q^*$ is represented as $\xi_1\xi_0\xi_2$, where $\xi_0\in P$ and $\xi_1,\xi_2\in Q^*$. For any $x\in X$ we have $\widehat D_x(\xi)=\widehat D_x(\xi_1)\widehat D_{x_0}(\xi_0)\widehat D_{x_1}(\xi_2)$, where $x_0,x_1\in X$. By the above $\widehat D_x(\xi)$ is a double letter word. Thus $\widehat D_0$ and $\widehat D_1$ map double letter words to double letter words. Since these transformations are invertible, they also map no-double-letter words to no-double-letter words, and so does any $g\in G(\widehat D)$. Now we are going to prove that for any no-double-letter words $\xi_1,\xi_2\in Q^*$ of the same length $l$ there exists $g\in G(\widehat D)$ such that $\xi_2=g(\xi_1)$. The empty word is the only word of length $0$ so it is no loss to assume that $l>0$. First consider the case when $l$ is even. We have $\xi_1=q_1q_2\dots q_{l-1}q_l$ and $\xi_2=p_1p_2\dots p_{l-1}p_l$ for some $q_i,p_i\in Q$, $1\le i\le l$. Consider two words $\eta_1= q_1q_2^{-1}\dots q_{l-1}q_l^{-1}$ and $\eta_2=p_1p_2^{-1}\dots p_{l-1}p_l^{-1}$ over the alphabet $Q^\pm$. Clearly, $\eta_1$ and $\eta_2$ follow the same pattern. Furthermore, they are freely irreducible since $\xi_1$ and $\xi_2$ are no-double-letter words. By Proposition \ref{a5}, $\eta_2=g_0(\eta_1)$ for some $g_0\in G(D)$. By Lemma \ref{auto1}, we can assume that $g_0\in S(D)$. Then $g_0=D_w$ for some word $w\in X^*$. Proposition \ref{auto4} implies that $U_{\eta_1}(wu)=U_{\eta_1}(w) U_{\eta_2}(u)$ for any $u\in X^*$. By Proposition \ref{b6}, $A_p^{-1}A_q= B_pB_q$ for all $p,q\in Q$. It follows that $U_{\eta_1}=B_{\xi_1}$ and $U_{\eta_2}=B_{\xi_2}$. In particular, $B_{\xi_1}(wu)=B_{\xi_1}(w) B_{\xi_2}(u)$ for any $u\in X^*$. Now Proposition \ref{auto4} implies that $B_{\xi_2}=B_{g(\xi_1)}$, where $g=\widehat D_w\in G(\widehat D)$. By the above $g(\xi_1)$ is a no-double-letter word. By Theorem \ref{b3}, the group $G(B)$ is freely generated by involutions $B_q$, $q\in Q$. Since $\xi_2$ and $g(\xi_1)$ are no-double-letter words in the alphabet $Q$, the equality $B_{\xi_2}=B_{g(\xi_1)}$ implies that $\xi_2=g(\xi_1)$. Now consider the case when $\xi_1$ and $\xi_2$ have odd length. Obviously, there exist letters $q_0,p_0\in Q$ such that $\xi_1q_0$ and $\xi_2p_0$ are no-double-letter words. Since $\xi_1q_0$ and $\xi_2p_0$ are of the same even length, it follows from the above that $\xi_2p_0=g(\xi_1q_0)$ for some $g\in G(\widehat D)$. Then $\xi_2=g(\xi_1)$. \end{proof} For any integer $n\ge0$ let $\widehat D^{(n)}$ denote the dual automaton of the automaton $B^{(n)}$. The automaton $\widehat D^{(n)}$ is defined over the alphabet $Q_n$, with two internal states $0$ and $1$. It is bi-reversible since $B^{(n)}$ is bi-reversible. \begin{proposition}\lambdabel{b10} Let $n\ge1$ and suppose $\xi\in Q_n^*$ is a no-double-letter word. Then the orbit of $\xi$ under the action of the group $G(\widehat D^{(n)})$ on $Q_n^*$ consists of all no-double-letter words of the same length as $\xi$. \end{proposition} The proof of Proposition \ref{b10} is completely analogous to the above proof of Proposition \ref{b9} and we omit it. {\sc \begin{raggedright} Department of Mathematics\\ Texas A\&M University\\ College Station, TX 77843--3368 \end{raggedright} } \end{document}
2,454
40,263
en
train
0.93.0
\begin{document} \title{On the Partition Set Cover Problem} \begin{abstract} Several algorithms with an approximation guarantee of $O(\log n)$ are known for the Set Cover problem, where $n$ is the number of elements. We study a generalization of the Set Cover problem, called the Partition Set Cover problem. Here, the elements are partitioned into $r$ \emph{color classes}, and we are required to cover at least $k_t$ elements from each color class $\mathcal{C}_t$, using the minimum number of sets. We give a randomized \textsf{LP}\xspace-rounding algorithm that is an $O(\beta + \log r)$ approximation for the Partition Set Cover problem. Here $\beta$ denotes the approximation guarantee for a related Set Cover instance obtained by rounding the standard \textsf{LP}\xspace. As a corollary, we obtain improved approximation guarantees for various set systems for which $\beta$ is known to be sublogarithmic in $n$. We also extend the \textsf{LP}\xspace rounding algorithm to obtain $O(\log r)$ approximations for similar generalizations of the Facility Location type problems. Finally, we show that many of these results are essentially tight, by showing that it is \mathcal{N}P-hard to obtain an $o(\log r)$-approximation for any of these problems. \end{abstract} \section{Introduction} We first consider the Set Cover problem. The input of the Set Cover problem consists of a set system $(X, \mathcal{R})$, where $X$ is a set of $n$ elements and $\mathcal{R}$ is a collection of subsets of $X$. Each set $S_i \in \mathcal{R}$ has a non-negative weight $w_i$. The goal of the set cover problem is to find a minimum-weight sub-collection of sets from $\mathcal{R}$, that \emph{covers} $X$. In the unweighted version, all weights are assumed to be $1$. We state the standard \textsf{LP}\xspace relaxation for the Set Cover problem. \begin{mdframed}[backgroundcolor=gray!9] (Set Cover \textsf{LP}\xspace) \begin{alignat}{3} \text{minimize} \displaystyle&\sum\limits_{S_i \in \mathcal{R}} w_{i}x_{i} & \nonumber \\ \text{subject to} \displaystyle&\sum\limits_{i:e_{j} \in S_{i}} x_{i} \geq 1, \quad & \forall e_j \in X \label[constr]{constr:sc-cover-ej}\\ \displaystyle &x_i \in [0, 1], & \forall S_i \in \mathcal{R} \label[constr]{constr:sc-fractional-x} \end{alignat} \end{mdframed} It is well-known that a simple greedy algorithm, or an \textsf{LP}\xspace rounding algorithm gives an $O(\log n)$ approximation. This can be improved to $O(\log \mathsf{D}elta)$, where $\mathsf{D}elta$ is the maximum size of a set -- see \cite{VaziraniBook} for further details and references. It is also known that it is not possible to obtain an approximation guarantee that is asymptotically smaller than $O(\log n)$ in general, under certain standard complexity theoretic assumptions (\cite{Feige1998,DS2014}). A simple \textsf{LP}\xspace rounding algorithm is also known to give an $f$ approximation (see \cite{VaziraniBook}), where $f$ is the maximum \emph{frequency} of an element, i.e., the maximum number of sets any element is contained in. For several set systems such as geometric set systems, however, sublogarithmic\,---\,or even constant\,---\,approximation guarantees are known. For a detailed discussion of such results, see \cite{Inamdar2018partial}. Partial Set Cover problem (\textsf{PSC}\xspace) is a generalization of the Set Cover problem. Here, along with the set system $(X, \mathcal{R})$, we are also given the coverage requirement $1 \le k \le n$. The objective of \textsf{PSC}\xspace is to find a minimum-weight cover for at least $k$ of the given $n$ elements. It is easy to see that when the coverage requirement $k$ equals $n$, \textsf{PSC}\xspace reduces to the Set Cover problem. However, for a general $k$, \textsf{PSC}\xspace introduces the additional difficulty of discovering a subset with $k$ of the $n$ elements that one must aim to cover in order to obtain a least expensive solution. Despite this, approximation guarantees matching that for the standard Set Cover are known in many cases. For example, a slight modification of the greedy algorithm can be shown to be an $O(\log \mathsf{D}elta)$ approximation (\cite{Kearns1990,Slavik1997}). Algorithms achieving the approximation guarantee of $f$ are known via various techniques -- see \cite{Fujito04} and the references therein. For several instances of \textsf{PSC}\xspace, an $O(\beta)$ approximation algorithm was described in \cite{Inamdar2018partial}, where $\beta$ is the integrality gap of the standard set cover \textsf{LP}\xspace for a related set system. As a corollary, they give improved approximation guarantees for the geometric instances of \textsf{PSC}\xspace, for which $\beta$ is known to be sublogarithmic (or even a constant). \paragraph{Partition Set Cover Problem.} Now we consider a further generalization of the Partial Set Cover problem, called the Partition Set Cover problem. Again, the input contains a set system $(X, \mathcal{R})$, with weights on the sets, where $X = \{e_1, \ldots, e_n\}$ and $\mathcal{R} = \{S_1, \ldots, S_m\}$. We are also given $r$ non-empty subsets of $X$: $\mathcal{C}_1, \ldots, \mathcal{C}_r$, where each $\mathcal{C}_t$ is referred to as a \emph{color class}. These $r$ color classes form a partition of $X$. Each color class $\mathcal{C}_t$ has a coverage requirement $1 \le k_t \le |\mathcal{C}_t|$ that is also a part of the input. The objective of the Partition Set Cover problem is to find a minimum-weight sub-collection $\mathcal{R}' \subseteq \mathcal{R}$, such that it meets the coverage requirement of each color class, i.e., for each color class $\mathcal{C}_t$, we have that $|(\bigcup \mathcal{R}') \cap \mathcal{C}_t| \ge k_t$. Here, for any $\mathcal{R}' \subseteq \mathcal{R}$, we use the shorthand $\bigcup \mathcal{R}'$ for referring to the union of all sets in $\mathcal{R}'$, i.e., $\bigcup \mathcal{R}' \coloneqq \bigcup_{S_i \in \mathcal{R}'} S_i$. \citet{bera2014approximation} give an $O(\log r)$-approximation for an analogous version of the Vertex Cover problem, called the Partition Vertex Cover problem. This is a special case of the Partition Set Cover problem where each element is contained in exactly two sets. The Vertex Cover and Partial Vertex Cover problems are, respectively, special cases of the Set Cover and Partial Set Cover problems. Various $2$ approximations are known for Vertex Cover (see references in \cite{VaziraniBook}) as well as Partial Vertex Cover (\cite{BshoutyL1998,Hochbaum98,Gandhi2004}). \citet{bera2014approximation} note that for the Partition Set Cover problem, an extension of the greedy algorithm of \citet{Slavik1997} gives an $O(\log (\sum_{t = 1}^r k_t))$ approximation. On the negative side, they (\cite{bera2014approximation}) show that it is \mathcal{N}P-hard to obtain an approximation guarantee asymptotically better than $O(\log r)$ for the Partition Vertex Cover problem. Since Partition Vertex Cover is a special case of Partition Set Cover, the same hardness result holds for the Partition Set Cover problem as well. \citet{Har2018few} consider a problem concerned with breaking up a collection of point sets using a minimum number of hyperplanes. They reduce this problem to an instance of the Partition Set Cover problem, where the color classes are no longer required to form a partition of $X$. For this problem, which they call `Partial Cover for Multiple Sets', they describe an $O(\log (nr))$ approximation. We note that the algorithm of \citet{bera2014approximation} as well as our algorithm easily extends to this more general setting. \subsection{Natural \textsf{LP}\xspace Relaxation and Its Integrality Gap} \label{subsec:nat-lp} Given the success of algorithms based on the natural \textsf{LP}\xspace relaxation for \textsf{PSC}\xspace, let us first consider the natural \textsf{LP}\xspace for the Partition Set Cover problem. We first state this natural \textsf{LP}\xspace relaxation. \begin{mdframed}[backgroundcolor=gray!9] (Natural \textsf{LP}\xspace) \begin{alignat}{3} \text{minimize} \displaystyle&\sum\limits_{S_i \in \mathcal{R}} w_{i}x_{i} & \nonumber \\ \text{subject to} \displaystyle&\sum\limits_{i:e_{j} \in S_{i}} x_{i} \geq z_j, \quad & \forall e_j \in X \label[constr]{constr:cover-ej}\\ \displaystyle&\sum_{e_j \in \mathcal{C}_r}z_j \ge k_t, & \forall \mathcal{C}_t \in \{\mathcal{C}_1, \ldots, \mathcal{C}_r\} \label[constr]{constr:cover-ci}\\ \displaystyle &z_j \in [0, 1], & \forall e_j \in X \label[constr]{constr:fractional-z}\\ \displaystyle &x_i \in [0, 1], & \forall S_i \in \mathcal{R} \label[constr]{constr:fractional-x} \end{alignat} \end{mdframed} In the corresponding integer program, the variable $x_i$ denotes whether or not the set $S_i \in \mathcal{R}$ is included in the solution. Similarly, the variable $z_j$ denotes whether or not an element $e_j \in X$ is covered in the solution. Both types of variables are restricted to $\{0, 1\}$ in the integer program. The \textsf{LP}\xspace relaxation stated above relaxes this condition by allowing those variables to take any value from $[0, 1]$.
2,698
26,543
en
train
0.93.1
Various $2$ approximations are known for Vertex Cover (see references in \cite{VaziraniBook}) as well as Partial Vertex Cover (\cite{BshoutyL1998,Hochbaum98,Gandhi2004}). \citet{bera2014approximation} note that for the Partition Set Cover problem, an extension of the greedy algorithm of \citet{Slavik1997} gives an $O(\log (\sum_{t = 1}^r k_t))$ approximation. On the negative side, they (\cite{bera2014approximation}) show that it is \mathcal{N}P-hard to obtain an approximation guarantee asymptotically better than $O(\log r)$ for the Partition Vertex Cover problem. Since Partition Vertex Cover is a special case of Partition Set Cover, the same hardness result holds for the Partition Set Cover problem as well. \citet{Har2018few} consider a problem concerned with breaking up a collection of point sets using a minimum number of hyperplanes. They reduce this problem to an instance of the Partition Set Cover problem, where the color classes are no longer required to form a partition of $X$. For this problem, which they call `Partial Cover for Multiple Sets', they describe an $O(\log (nr))$ approximation. We note that the algorithm of \citet{bera2014approximation} as well as our algorithm easily extends to this more general setting. \subsection{Natural \textsf{LP}\xspace Relaxation and Its Integrality Gap} \label{subsec:nat-lp} Given the success of algorithms based on the natural \textsf{LP}\xspace relaxation for \textsf{PSC}\xspace, let us first consider the natural \textsf{LP}\xspace for the Partition Set Cover problem. We first state this natural \textsf{LP}\xspace relaxation. \begin{mdframed}[backgroundcolor=gray!9] (Natural \textsf{LP}\xspace) \begin{alignat}{3} \text{minimize} \displaystyle&\sum\limits_{S_i \in \mathcal{R}} w_{i}x_{i} & \nonumber \\ \text{subject to} \displaystyle&\sum\limits_{i:e_{j} \in S_{i}} x_{i} \geq z_j, \quad & \forall e_j \in X \label[constr]{constr:cover-ej}\\ \displaystyle&\sum_{e_j \in \mathcal{C}_r}z_j \ge k_t, & \forall \mathcal{C}_t \in \{\mathcal{C}_1, \ldots, \mathcal{C}_r\} \label[constr]{constr:cover-ci}\\ \displaystyle &z_j \in [0, 1], & \forall e_j \in X \label[constr]{constr:fractional-z}\\ \displaystyle &x_i \in [0, 1], & \forall S_i \in \mathcal{R} \label[constr]{constr:fractional-x} \end{alignat} \end{mdframed} In the corresponding integer program, the variable $x_i$ denotes whether or not the set $S_i \in \mathcal{R}$ is included in the solution. Similarly, the variable $z_j$ denotes whether or not an element $e_j \in X$ is covered in the solution. Both types of variables are restricted to $\{0, 1\}$ in the integer program. The \textsf{LP}\xspace relaxation stated above relaxes this condition by allowing those variables to take any value from $[0, 1]$. One important way the Standard \textsf{LP}\xspace differs from the Partial Cover \textsf{LP}\xspace is that we have a coverage constraint (\mathcal{C}ref{constr:cover-ci}) for each color class $\mathcal{C}_1, \ldots, \mathcal{C}_r$. \iffalse If an element $e_j \in X$ belongs to multiple color classes, then its $z_j$ appears in multiple such constraints.\fi Unfortunately, this \textsf{LP}\xspace has a large integrality gap as demonstrated by the following simple construction. \paragraph{Integrality Gap.} Let $(X, \mathcal{R})$ be the given set system, where $X = \{e_1, e_2, \ldots, e_n\}$ and $\mathcal{R} = \{S_1, S_2, \ldots, S_{\sqrt{n}}\}$ -- assuming $n$ is a perfect square. The sets $S_i$ form a partition of $X$, such that each $S_i$ contains exactly $\sqrt{n}$ elements. For any $1 \le i \le \sqrt{n}$, the color class $\mathcal{C}_i$ equals $S_i$, and its coverage requirement, $k_i = 1$. Also, for each set $S_i$, $w_i = 1$. Clearly any integral optimal solution must choose all sets $S_1, \ldots, S_{\sqrt{n}}$, with cost $\sqrt{n}$. On the other hand, consider a fractional solution $(x, z)$, where for any $S_i \in \mathcal{R}$, $x_i = \frac{1}{\sqrt{n}}$; and for any $e_j \in X$, $z_j = \frac{1}{\sqrt{n}}$. It is easy to see that this solution satisfies all constraints, and has cost $1$. We emphasize here that even the natural \textsf{PSC}\xspace \textsf{LP}\xspace has a large integrality gap, but it can be easily circumvented via parametric search; see \cite{Gandhi2004,Inamdar2018partial} for examples. For the Partition Set Cover problem, however, similar techniques do not seem to work. \subsection{Our Results and Techniques} For any subset $Y \subseteq X$, let $(Y, \mathcal{R}_{|Y})$ denote the projection of $(X, \mathcal{R})$ on $Y$, where $\mathcal{R}_{|Y} = \{S_i \cap Y \mid S_i \in \mathcal{R} \}$. Suppose there exists an algorithm that can round a feasible \emph{Set Cover} \textsf{LP}\xspace solution for any projection $(Y, \mathcal{R}_{|Y})$, within a factor of $\beta$. Then, we show that there exists an $O(\beta + \log r)$ approximation for the Partition Set Cover problem on the original set system $(X, \mathcal{R})$. Given the integrality gap of the natural \textsf{LP}\xspace for the Partition Set Cover problem, we strengthen it by adding the knapsack cover inequalities (first introduced by \citet{carrStrengthening}) to the \textsf{LP}\xspace relaxation -- the details are given in the following section. This approach is similar to that used for the Partition Vertex Cover problem considered in \citet{bera2014approximation}, and for the Partial Set Cover problem in \citet{Fujito04}. A similar technique was also used for a scheduling with outliers problem by \citet{gupta2009scheduling}. Once we have a solution to this strengthened \textsf{LP}\xspace, we partition the elements of $X$ as follows. The elements that are covered to an extent of at least a positive constant in this \textsf{LP}\xspace solution are said to be \emph{heavy} (the precise definition is given in the following section), and the rest of the elements are \emph{light}. For the heavy elements, we obtain a standard \emph{Set Cover} \textsf{LP}\xspace solution, and round it using a black-box rounding algorithm with a guarantee $\beta$. To meet the residual coverage requirements of the color classes, we use a randomized algorithm that covers some of the light elements. This randomized rounding consists of $O(\log r)$ independent iterations of a simple randomized rounding process. Let $\mathcal{R}igma_\ell$ be a random collection of sets, which is the outcome of an iteration $\ell$. The cover for the heavy elements from previous step, plus $\bigcup_\ell \mathcal{R}igma_\ell$, which is the result of randomized rounding, make up the final output of our algorithm. The analysis of this solution hinges on showing the following two properties of $\mathcal{R}igma_\ell$. \begin{enumerate} \item Expected cost of $\mathcal{R}igma_\ell$ is no more than some constant times the optimal cost, and \item For any color class $\mathcal{C}_t$, the sets in $\mathcal{R}igma_\ell$ satisfy the residual coverage of $\mathcal{C}_t$ with at least a constant probability. \end{enumerate} The first property of $\mathcal{R}igma_\ell$ follows easily, given the description of the randomized rounding process. Much of the analysis is devoted to showing the second property. The randomized rounding algorithm ensures that in each iteration, the residual requirement of each color class is satisfied in expectation. However, showing the second property, i.e., the residual coverage is satisfied with at least a constant probability, is rather tricky for the Partition Set Cover problem. The analogous claim about the Partition Vertex Cover problem in \cite{bera2014approximation} is easier to prove, because each edge is incident on exactly two vertices. Despite the fact that here, an element can belong to any number of sets, we are able to show that the second property holds, using a careful analysis of the randomized rounding process. From these two properties, it is straightforward to show that the algorithm is an $O(\beta + \log r)$ approximation for the Partition Set Cover problem (for details, see \mathcal{C}ref{thm:main-theorem}). As shown in \cite{bera2014approximation}, $\Omega(\log r)$ is necessary, even for the Partition Vertex Cover problem, and we extend this result to the Partition Set Cover problem induced by various geometric set systems (see \mathcal{C}ref{sec:hardness}). Also $\beta$ is the integrality gap of the standard Set Cover \textsf{LP}\xspace for this set system. Our approximation guarantee of $O(\beta + \log r)$ should be viewed in the context of these facts. We also note here that an extension of the \textsf{LP}\xspace rounding algorithm of \cite{Inamdar2018partial} for the \textsf{PSC}\xspace can be shown to be an $O(\beta + r)$ approximation. In particular, this extension involves doing a separate rounding for the light elements in each color class, which does not take advantage of the fact that a set in $\mathcal{R}$ may cover elements from multiple color classes. When $\beta$ is a constant, our guarantee is an exponential improvement over $O(\beta + r)$. Our analysis of the randomized rounding process may be of independent interest, and at its core establishes the following type of claim. Suppose we have a set system $(X, \mathcal{R})$, where each set $S_i \in \mathcal{R}$ has at most $k$ elements; here $k$ is a parameter that can be much smaller than $|X|$. Suppose that we construct a collection of subsets $\mathcal{R}igma \subseteq \mathcal{R}$ by independently picking each set $S_i \in \mathcal{R}$ with a certain probability $0 < \mu_i < 1$. Assume that the set system and the $\mu_i$ ensure the following properties: \begin{enumerate} \item The expected number of elements that $\mathcal{R}igma$ covers is large in terms of $k$, say at least $6k$. \item For any element in $X$, the probability that it is covered by $\mathcal{R}igma$ is at most a constant strictly smaller than $1$. \end{enumerate} Given these conditions, our analysis shows that $\mathcal{R}igma$ covers at least $k$ elements with probability at least a positive constant. \paragraph{Applications.} As described earlier, for several geometric set systems, the approximation guarantee $\beta$ for Set Cover based on the natural LP relaxation is sublogarithmic. For example, when $X$ is a set of points in $\mathbb{R}^2$ (resp. $\mathbb{R}^3$), and each set in $\mathcal{R}$ consists of the points in $X$ contained in some input disk (resp. halfspace), an \textsf{LP}\xspace rounding algorithm with a guarantee $\beta = O(1)$ is known. Therefore, for the corresponding Partition Cover instances, we get an $O(\log r)$ approximation. If $X$ is a set of $n$ rectangles in $\mathbb{R}^3$, and each set in $\mathcal{R}$ is the subset of rectangles that are stabbed by an input point, then an \textsf{LP}\xspace rounding algorithm with a guarantee $\beta = O(\log \log n)$ is known. In the corresponding Partition Set Cover instance, we get an $O(\log \log n + \log r)$ approximation. We summarize some of these results in the following table. We remind the reader that it is \mathcal{N}P-hard to obtain an $o(\log r)$ approximation for all of these set systems, as shown in \mathcal{C}ref{sec:hardness}. Therefore, when $\beta = O(\log r)$, improving on the $O(\beta + \log r)$ approximation is \mathcal{N}P-hard. Otherwise, when $\beta = \omega(\log r)$, improving $O(\beta + \log r)$ also involves improving the approximation guarantee for the corresponding Set Cover problem, for example, hitting rectangles in $\mathbb{R}^3$, or covering by fat triangles in $\mathbb{R}^2$.
3,255
26,543
en
train
0.93.2
Our analysis of the randomized rounding process may be of independent interest, and at its core establishes the following type of claim. Suppose we have a set system $(X, \mathcal{R})$, where each set $S_i \in \mathcal{R}$ has at most $k$ elements; here $k$ is a parameter that can be much smaller than $|X|$. Suppose that we construct a collection of subsets $\mathcal{R}igma \subseteq \mathcal{R}$ by independently picking each set $S_i \in \mathcal{R}$ with a certain probability $0 < \mu_i < 1$. Assume that the set system and the $\mu_i$ ensure the following properties: \begin{enumerate} \item The expected number of elements that $\mathcal{R}igma$ covers is large in terms of $k$, say at least $6k$. \item For any element in $X$, the probability that it is covered by $\mathcal{R}igma$ is at most a constant strictly smaller than $1$. \end{enumerate} Given these conditions, our analysis shows that $\mathcal{R}igma$ covers at least $k$ elements with probability at least a positive constant. \paragraph{Applications.} As described earlier, for several geometric set systems, the approximation guarantee $\beta$ for Set Cover based on the natural LP relaxation is sublogarithmic. For example, when $X$ is a set of points in $\mathbb{R}^2$ (resp. $\mathbb{R}^3$), and each set in $\mathcal{R}$ consists of the points in $X$ contained in some input disk (resp. halfspace), an \textsf{LP}\xspace rounding algorithm with a guarantee $\beta = O(1)$ is known. Therefore, for the corresponding Partition Cover instances, we get an $O(\log r)$ approximation. If $X$ is a set of $n$ rectangles in $\mathbb{R}^3$, and each set in $\mathcal{R}$ is the subset of rectangles that are stabbed by an input point, then an \textsf{LP}\xspace rounding algorithm with a guarantee $\beta = O(\log \log n)$ is known. In the corresponding Partition Set Cover instance, we get an $O(\log \log n + \log r)$ approximation. We summarize some of these results in the following table. We remind the reader that it is \mathcal{N}P-hard to obtain an $o(\log r)$ approximation for all of these set systems, as shown in \mathcal{C}ref{sec:hardness}. Therefore, when $\beta = O(\log r)$, improving on the $O(\beta + \log r)$ approximation is \mathcal{N}P-hard. Otherwise, when $\beta = \omega(\log r)$, improving $O(\beta + \log r)$ also involves improving the approximation guarantee for the corresponding Set Cover problem, for example, hitting rectangles in $\mathbb{R}^3$, or covering by fat triangles in $\mathbb{R}^2$. \begin{table}[H] \mathsf{cen}tering \caption{Some of the approximation guarantees for Partition Set Cover (last column). In the third column, we have $\beta$, the \textsf{LP}\xspace-based approximation guarantee for the corresponding Set Cover instance. See \cite{ClarksonV2007,AronovES2010,VaradarajanWGSC2010, EASFat, ChanGKS12,ElbTerrain} for the references establishing these bounds on $\beta$.} \begin{tabular}{|c|c|c|c|} \hline $X$ & Geometric objects inducing $\mathcal{R}$ & $\beta$ & Our guarantee\\ \hline \multirow{2}{*}{Points in $\mathbb{R}^2$} & Disks (via containment) & $O(1)$ & $O(\log r)$ \\ \hhline{|~|---} & Fat triangles (containment) & $O(\log \log^* n)$ & $O(\log \log^* n + \log r)$ \\ \hline \multirow{2}{*}{Points in $\mathbb{R}^3$} & Unit cubes (containment) & $O(1)$ & $O(\log r)$ \\ \hhline{|~|---} & Halfspaces (containment) & $O(1)$ & $O(\log r)$ \\ \hline Rectangles in $\mathbb{R}^3$ & Points (via stabbing) & $O(\log \log n)$ & $O(\log \log n + \log r)$ \\ \hline Points on 1.5D terrain & Points on terrain (via visibility) & $O(1)$ & $O(\log r)$ \\ \hline \end{tabular} \end{table} As a combinatorial application, consider the combinatorial version of the Partition Set Cover problem, where each element is contained in at most $f$ sets of $\mathcal{R}$. We note that the algorithm of \citet{bera2014approximation} can be extended in a straightforward way to obtain an $O(f \log r)$ approximation in this case. However, recall that the Set Cover \textsf{LP}\xspace can be rounded to give an $f$ approximation. Therefore, using our result, we can get an $O(f + \log r)$ approximation for the Partition Set Cover problem, which is an improvement over the earlier result. Finally, in \mathcal{C}ref{sec:fl-mcc}, we consider analogous generalizations of the (Metric Uncapacitated) Facility Location Problem, and the so-called Minimum Cost Covering Problem (\cite{CharikarP04}). Various \textsf{LP}\xspace-based $O(1)$ approximations are known for these problems (\cite{JainVazirani2001,ByrkaFLLP,Li2013}, and \cite{CharikarP04} respectively), i.e., for these problems, $\beta = O(1)$. We show how to adapt the algorithm from \mathcal{C}ref{sec:LPRounding} to obtain $O(\log r)$ approximations for the generalizations of these problems with $r$ color classes. \paragraph{Organization.} In the following section, we first describe the strengthened \textsf{LP}\xspace and then discuss the randomized rounding algorithm. In \mathcal{C}ref{subsec:coverage}, we prove the second property of $\mathcal{R}igma_\ell$ as stated above. In \mathcal{C}ref{subsec:expectation} we prove a technical lemma that is required in the \mathcal{C}ref{subsec:coverage}. In \mathcal{C}ref{subsec:ellipsoid}, we show how to obtain a feasible solution to the strengthened \textsf{LP}\xspace, despite exponentially many constraints in the \textsf{LP}\xspace. In \mathcal{C}ref{sec:fl-mcc}, we give the $O(\log r)$ approximations for the generalizations of the Facility Location and the Minimum Cost Covering problems. Finally, in \mathcal{C}ref{sec:hardness}, we show how to extend the $\Omega(\log r)$ hardness result from \cite{bera2014approximation}, for all these problems. \iffalse \section{Problem Definition and the Standard \textsf{LP}\xspace} We first consider the following \textsf{LP}\xspace relaxation for Partition Set Cover problem, which is a simple extension of the standard \textsf{LP}\xspace relaxation for the Partial Set Cover problem. \begin{mdframed}[backgroundcolor=gray!9] (Standard \textsf{LP}\xspace) \begin{alignat}{3} \text{minimize} \displaystyle&\sum\limits_{S_i \in \mathcal{R}} w_{i}x_{i} & \nonumber \\ \text{subject to} \displaystyle&\sum\limits_{i:e_{j} \in S_{i}} x_{i} \geq z_j, \quad & e_j \in X \label[constr]{constr:cover-ej}\\ \displaystyle&\sum_{e_j \in \mathcal{C}_r}z_j \ge k_t, & \mathcal{C}_t \in \{\mathcal{C}_1, \ldots, \mathcal{C}_r\} \label[constr]{constr:cover-ci}\\ \displaystyle &z_j \in [0, 1], & e_j \in X \label[constr]{constr:fractional-z}\\ \displaystyle &x_i \in [0, 1], & S_i \in \mathcal{R} \label[constr]{constr:fractional-x} \end{alignat} \end{mdframed} One important way the Standard \textsf{LP}\xspace differs from the Partial Cover \textsf{LP}\xspace is that we have a coverage constraint (\mathcal{C}ref{constr:cover-ci}) for each color class $\mathcal{C}_1, \ldots, \mathcal{C}_r$. If an element $e_j \in X$ belongs to multiple color classes, then its $z_j$ appears in multiple such constraints. Unfortunately, this \textsf{LP}\xspace has a large integrality gap as demonstrated by the following simple construction. \subsection{Integrality Gap} Let $(X, \mathcal{R})$ be the given set system, where $X = \{e_1, e_2, \ldots, e_n\}$ and $\mathcal{R} = \{S_1, S_2, \ldots, S_{\sqrt{n}}\}$ -- assuming $n$ is a perfect square. The sets $S_i$ form a partition of $X$, such that each $S_i$ contains exactly $\sqrt{n}$ elements. For any $1 \le i \le \sqrt{n}$, the color class $\mathcal{C}_i$ equals $S_i$, and its coverage requirement, $k_i = 1$. Also, for each set $S_i$, $w_i = 1$. Clearly any integral optimal solution must choose all sets $S_1, \ldots, S_{\sqrt{n}}$, with cost $\sqrt{n}$. However, consider a fractional solution $(x, z)$, where for any $S_i \in \mathcal{R}$, $x_i = \frac{1}{\sqrt{n}}$; and for any $e_j \in X$, $z_j = \frac{1}{\sqrt{n}}$. It is easy to see that this solution satisfies all constraints, and has cost $1$. Note that the sub-instance induced by each color class is equivalent to a Partial Set Cover instance, and hence such an integrality gap can be overcome if we knew the heaviest set from each color class. However, the standard technique of guessing the heaviest set from each color class will take time that is exponential in the number of color classes. Therefore, we strengthen this \textsf{LP}\xspace by adding extra constraints. \fi \section{Strenghtened \textsf{LP}\xspace and Randomized Rounding} \label{sec:LPRounding} Recall that the input contains a set system $(X, \mathcal{R})$, with weights on the sets, where $X = \{e_1, \ldots, e_n\}$ and $\mathcal{R} = \{S_1, \ldots, S_m\}$. We are also given $r$ non-empty subsets of $X$: $\mathcal{C}_1, \ldots, \mathcal{C}_r$, where each $\mathcal{C}_t$ is referred to as a \emph{color class}. Here, we consider a generalization of the Partition Set Cover problem, where the color classes cover $X$ but are no longer required to form a partition of $X$, i.e., an element $e_j$ can belong to multiple color classes. Each color class $\mathcal{C}_t$ has a coverage requirement $1 \le k_t \le |\mathcal{C}_t|$. The objective of the Partition Set Cover problem is to find a minimum-weight sub-collection $\mathcal{R}' \subseteq \mathcal{R}$, such that it meets the coverage requirement of each color class, i.e., for each color class $\mathcal{C}_t$, we have that $|(\bigcup \mathcal{R}') \cap \mathcal{C}_t| \ge k_t$. Let $OPT$ denote the cost of an optimal solution for this problem. Now, we describe the strengthened \textsf{LP}\xspace. Imagine $\mathcal{A} \subseteq \mathcal{R}$ is a collection of sets that we have decided to add to our solution. The sets in $\mathcal{A}$ may cover some elements from each color class $\mathcal{C}_t$, potentially reducing the remaining coverage requirement. For a color class $\mathcal{C}_t$, define $\mathcal{C}_t(\mathcal{A}) \coloneqq (\bigcup \mathcal{A}) \cap \mathcal{C}_t$ to be the set of elements of color $\mathcal{C}_t$, covered by the sets in $\mathcal{A}$. Then, let $k_t(\mathcal{A}) \coloneqq \max\{0, k_t - |\mathcal{C}_t(\mathcal{A})| \}$ be the residual coverage requirement of $\mathcal{C}_t$ with respect to the collection $\mathcal{A}$. Finally, for a set $S_i \not\in \mathcal{A}$, and a color class $\mathcal{C}_t$, define $\textsf{deg}_t(S_i, \mathcal{A}) \coloneqq |S_i \cap (\mathcal{C}_t \setminus \mathcal{C}_t(\mathcal{A}))|$ to be the additional number of elements from $\mathcal{C}_t$ covered by $S_i$, provided that $\mathcal{A}$ is already a part of the solution. For any $\mathcal{C}_t$ and for any collection $\mathcal{A} \subseteq \mathcal{R}$, the following constraint is satisfied by any feasible integral solution: $$\sum_{S_i \not\in \mathcal{A}} x_i \cdot \min\{\textsf{deg}_t(S_i, \mathcal{A}), k_t(\mathcal{A})\} \ge k_t(\mathcal{A})$$ For a detailed explanation of why this constraint holds, we refer the reader to the discussion in \citet{bera2014approximation}. Adding such constraints for each $\mathcal{A} \subseteq \mathcal{R}$ and for each $\mathcal{C}_t$, gives the following strengthened \textsf{LP}\xspace. \begin{mdframed}[backgroundcolor=gray!9] (Strengthened \textsf{LP}\xspace) \begin{alignat}{3} \text{minimize} \displaystyle&\sum\limits_{S_i \in \mathcal{R}} w_{i}x_{i} & \nonumber \\ \text{subject to\quad } &\text{\mathcal{C}ref{constr:cover-ej,constr:cover-ci,constr:fractional-z,constr:fractional-x}, and } \nonumber \\\displaystyle&\sum_{S_i \not\in \mathcal{A}} x_i \cdot \min\{\textsf{deg}_t(S_i, \mathcal{A}), k_t(\mathcal{A})\} \ge k_t(\mathcal{A}), &\quad \forall \mathcal{C}_t \in \{\mathcal{C}_1, \ldots, \mathcal{C}_r\} \text{ and } \forall \mathcal{A} \subseteq \mathcal{R} \label[constr]{constr:s-cover-ci-a} \end{alignat} \end{mdframed}
3,784
26,543
en
train
0.93.3
(Strengthened \textsf{LP}\xspace) \begin{alignat}{3} \text{minimize} \displaystyle&\sum\limits_{S_i \in \mathcal{R}} w_{i}x_{i} & \nonumber \\ \text{subject to\quad } &\text{\mathcal{C}ref{constr:cover-ej,constr:cover-ci,constr:fractional-z,constr:fractional-x}, and } \nonumber \\\displaystyle&\sum_{S_i \not\in \mathcal{A}} x_i \cdot \min\{\textsf{deg}_t(S_i, \mathcal{A}), k_t(\mathcal{A})\} \ge k_t(\mathcal{A}), &\quad \forall \mathcal{C}_t \in \{\mathcal{C}_1, \ldots, \mathcal{C}_r\} \text{ and } \forall \mathcal{A} \subseteq \mathcal{R} \label[constr]{constr:s-cover-ci-a} \end{alignat} \end{mdframed} Let $(x, z)$ be an \textsf{LP}\xspace solution to the natural \textsf{LP}\xspace\,---\, i.e., $(x, z)$ satisfies \mathcal{C}ref{constr:cover-ej,constr:cover-ci,constr:fractional-z,constr:fractional-x}. Let $H = \{e_j \in X \mid \sum_{ S_i \ni e_j} x_i \ge \frac{1}{6 \alpha} \}$ be the set of \emph{heavy} elements, where $\alpha > 1$ is some constant to be fixed later. Let $(\widetilde{x})$ be a solution defined as $\widetilde{x}_i = \min\{6\alpha \cdot x_i, 1\}$, for all $S_i \in \mathcal{R}$. By definition, for any heavy element $e_j \in H$, we have that $\sum_{S_i \ni e_j} \widetilde{x}_i \ge 1$, so $(\widetilde{x})$ is a feasible Set Cover \textsf{LP}\xspace solution for the projected set system $(H, \mathcal{R}_{|H})$, with cost at most $6\alpha \cdot \sum_{S_i \in \mathcal{R}} x_i$. Let $\mathcal{A}'$ be the collection of sets returned by a $\beta$-approximate Set Cover \textsf{LP}\xspace rounding algorithm. Starting from an empty solution, we add the sets from $\mathcal{A}'$ to the solution. Let $\mathcal{A} = \mathcal{A}' \cup \{S_i \in \mathcal{R} \mid x_i \ge \frac{1}{6\alpha} \}$. However, notice that if $x_i \ge \frac{1}{6\alpha}$ for some set $S_i$, then all elements in $S_i$ are heavy by definition, and hence are covered by $\mathcal{A}'$. Therefore, $\mathcal{A}$ and $\mathcal{A}'$ cover the same set of elements from $X$, and we may pretend that we have added the sets in $\mathcal{A}$ to our solution. In \mathcal{C}ref{subsec:ellipsoid}, we discuss how to obtain a fractional \textsf{LP}\xspace solution $(x, z)$ satisfying the following properties, in polynomial time. \begin{enumerate} \item $(x, z)$ satisfies \mathcal{C}ref{constr:cover-ej,constr:cover-ci,constr:fractional-z,constr:fractional-x}. \item $\sum_{S_i \in \mathcal{R}} w_i x_i \le 2 \cdot OPT$ \item $\sum_{S_i \not\in \mathcal{A}} x_i \cdot \min\{ \textsf{deg}_{t}(S_i, \mathcal{A}), k_t(\mathcal{A}) \} \ge k_t(\mathcal{A}) \qquad \forall \mathcal{C}_t \in \{ \mathcal{C}_1, \ldots, \mathcal{C}_t \}$, where $\mathcal{A}$ is obtained from $(x, z)$ as described above. \end{enumerate} We assume that we have such a fractional \textsf{LP}\xspace solution $(x, z)$. Now, for any uncovered element $e_j \in X \setminus \bigcup \mathcal{A}$, we have that $\sum_{ S_i \ni e_j} x_i < \frac{1}{6\alpha}$. Furthermore, for any set $S_i \in \mathcal{R} \setminus \mathcal{A}$, we have $x_i < \frac{1}{6\alpha}$. However, even after adding $\mathcal{A}$ to the solution, each color class $\mathcal{C}_t$ is left with some residual coverage $k_t(\mathcal{A})$. In order to satisfy this residual coverage requirement, we use the following randomized rounding algorithm. Consider an iteration $\ell$ of the algorithm. In this iteration, for each set $S_i \in \mathcal{R} \setminus \mathcal{A}$, we independently add $S_i$ to the solution with probability $6x_i$. Let $\mathcal{R}igma_\ell$ be the collection of sets added during this iteration in this manner. We perform $c \log r$ independent iterations of this algorithm for some constant $c$, and let $\mathcal{R}igma = \bigcup_{\ell = 1}^{c \log r} \mathcal{R}igma_\ell$. In \mathcal{C}ref{subsec:coverage}, we prove the following property about $\mathcal{R}igma_\ell$. \begin{restatable}{lemma}{coverage} \label{lem:constant-prob} For any color class $\mathcal{C}_t$, the solution $\mathcal{R}igma_\ell$ covers at least $k_t(\mathcal{A})$ elements from $\mathcal{C}_t \setminus \mathcal{C}_t(\mathcal{A})$ with probability at least a positive constant. \end{restatable} This lemma implies the following result. \begin{theorem} \label{thm:main-theorem} Suppose there exists a polynomial time \textsf{LP}\xspace rounding algorithm that rounds a given Set Cover \textsf{LP}\xspace solution on any projection of $(X, \mathcal{R})$, within a $\beta$ factor. Then, there exists a polynomial time randomized algorithm that returns a solution with approximation guarantee of $O(\beta + \log r)$ for the Partition Set Cover problem, with at least a constant probability. \end{theorem} \begin{proof} First, we argue about the running time. As described earlier, the discussion of how to obtain the required \textsf{LP}\xspace solution $(x, z)$ in polynomial time is deferred to \mathcal{C}ref{subsec:ellipsoid}. It is easy to see that the rest of the steps take polynomial time, and hence the overall time taken by this algorithm is polynomial. Now, we argue about the feasibility of this solution. The sets in $\mathcal{A}'$ form a cover for the heavy elements by assumption. Furthermore, from \mathcal{C}ref{lem:constant-prob}, in any iteration $\ell$, the sets in $\mathcal{R}igma_\ell$ satisfy the remaining coverage requirement of any color class $\mathcal{C}_t$, with at least a constant probability. It follows from union bound that the probability that there exists a color class with unmet coverage requirement after $c \log r$ independent iterations is at most $1/(2r)$, for appropriately chosen constant $c$. Therefore, the solution $\mathcal{A}' \cup \mathcal{R}igma$ is feasible with probability at least $1-1/(2r).$ As argued earlier, $w(\mathcal{A}') \le 6\alpha \beta \cdot \sum_{S_i \in \mathcal{R}} w_i x_i = O(\beta) \cdot OPT$, since $\alpha$ is a constant. It is also easy to see that for any iteration $\ell$ of the randomized rounding, $\operatorname{E}[w(\mathcal{R}igma_\ell)] \le 6 \cdot \sum_{S_i \in \mathcal{R}} w_i x_i = O(1) \cdot OPT$. Therefore, $\operatorname{E}[w(\mathcal{R}igma)] \le c' \log r \cdot OPT$, for some constant $c'$. Therefore, $\Pr[w(\mathcal{R}igma) \le 3c' \log r \cdot OPT] \ge \frac{2}{3}$, using Markov's inequality. The theorem follows from an application of union bound over the events concerning the feasibility and the approximation guarantee of the solution. \end{proof}
2,115
26,543
en
train
0.93.4
\iffalse Let $\mathcal{R}igma_\ell$ be the collection of sets added during this iteration. We show that with at least a constant probability, $\mathcal{R}igma_\ell$ covers at least $k_t(\mathcal{A})$ elements from $\mathcal{C}_t$, for any color class $\mathcal{C}_t$. Let $\mathcal{R}igma$ denote the collection of sets added throughout $O(\log r)$ independent iterations. It then follows from the standard arguments that, the probability that there exists a color class with unmet coverage requirement is at most $1/r$. Furthermore, the expected weight of the sets added to the solution during any iteration, $\operatorname{E}[w(\mathcal{R}igma_\ell)] \le 6 \cdot w(x, z)$. Again, using standard arguments involving Chernoff Bounds, it follows that $w(\mathcal{R}igma) \le O(\log r) \cdot w(x, z)$ with high probability. Therefore, $\mathcal{R}igma \cup \mathcal{A}'$ is an $O(\beta + \log r)$-approximation. In the rest of the section, we prove the preceding claim about $\mathcal{R}igma_\ell$. \fi \iffalse \section{Randomized Rounding Algorithm} \begin{algorithm} \caption{RandomizedRounding($x$)} \begin{algorithmic}[1] \mathcal{R}tate Let $H = \{e_j \in X \mid \sum_{ S_i \ni e_j} x_i \ge \frac{1}{6 \alpha} \}$ for some constant $\alpha$. \mathcal{R}tate Let $\widetilde{x}$ be a Set Cover \textsf{LP}\xspace solution, where $\widetilde{x}_i \gets \min\{1, 6\alpha \cdot x_i\}$ for each $S_i \in \mathcal{R}$. \mathcal{R}tatex Let $\mathcal{A}'$ be the solution returned by the Set Cover \textsf{LP}\xspace rounding algorithm. \mathcal{R}tate Let $\mathcal{A} \gets \mathcal{A}' \cup \{S_i \in \mathcal{R} \mid x_i \ge \frac{1}{6\alpha} \}$ \mathcal{R}tate $\mathcal{R}igma \gets \emptyset$ \For{$\ell = 1$ \textbf{to} $O(\log r)$} \mathcal{R}tate $\mathcal{R}igma_\ell \gets \emptyset$ \For{Each $S_i \in \mathcal{R} \setminus \mathcal{A}$} \mathcal{R}tate Add $S_i$ to $\mathcal{R}igma_\ell$ with probability $6\cdot x_i$. \operatorname{E}ndFor \mathcal{R}tate $\mathcal{R}igma \gets \mathcal{R}igma \cup \mathcal{R}igma_\ell$. \operatorname{E}ndFor \mathcal{R}tate \mathcal{R}eturn $\mathcal{R}igma \cup \mathcal{A}'$ \end{algorithmic} \end{algorithm} Here, $H$ is a set of ``heavy'' elements that are covered to an extent of at least $\frac{1}{\alpha}$. We create a standard Set Cover \textsf{LP}\xspace solution on the set system $(H, S_{|H})$, and use a known Set Cover \textsf{LP}\xspace rounding algorithm. Let $\mathcal{A}'$ be a $\beta$-approximate solution returned by this algorithm. Note that if $x_i \ge \frac{1}{6\alpha}$, then $S_i \subseteq H$, and so the elements in $S_i$ are covered by $\mathcal{A}'$. Therefore, we can safely remove any such set from consideration without any effect. Let $X' \coloneqq X \setminus \bigcup \mathcal{A}$. Notice that for any $e_j \in X'$, $\sum_{S_i \ni e_j} x_i < \frac{1}{6\alpha}$. Furthermore, for any set $S_i \in \mathcal{R} \setminus \mathcal{A}$, we have that $x_i < \frac{1}{6\alpha}$. We perform $O(\log r)$ iterations of randomized rounding for the sets in $\mathcal{R} \setminus \mathcal{A}$. In each such iteration, we add a set $S_i \in \mathcal{R} \setminus \mathcal{A}$ to the solution with probability $6 \cdot x_i$ -- it is easy to see that $6 x_i < 1$. Let $\mathcal{R}igma_\ell$ be the collection of sets added in an arbitrary iteration $\ell$. We show in the following that $\mathcal{R}igma_\ell$ covers at least $k_t(\mathcal{A})$ elements from the color class $\mathcal{C}_t$, with at least a constant probability. Using standard arguments, this implies that, after $O(\log r)$ iterations, $\mathcal{R}igma = \bigcup \mathcal{R}igma_\ell$ will cover at least $k_t(\mathcal{A})$ elements from each color class $\mathcal{C}_1, \ldots, \mathcal{C}_r$, with high probability. Combining with the fact that $\operatorname{E}[w(\mathcal{R}igma_\ell)] \le 6 \cdot OPT$, this readily implies that $\mathcal{R}igma \cup \mathcal{A}'$ is an $O(\beta + \log r)$-approximation. In the rest of the section, we prove the preceding claim about each $\mathcal{R}igma_\ell$. \fi \subsection{Analyzing Coverage of \texorpdfstring{$\mathcal{R}igma_\ell$}{Σ\_l}} \label{subsec:coverage} As stated earlier, we show that for any color class $\mathcal{C}_t$, the probability that $\mathcal{R}igma_\ell$ covers at least $k_t(\mathcal{A})$ elements is at least a positive constant. Let $\mathcal{A}$ be the collection of sets as defined earlier. Recall that for any color class $\mathcal{C}_t$, the solution $(x, z)$ satisfies: $$\sum_{S_i \not\in \mathcal{A}} x_i \cdot \min\{\textsf{deg}_t(S_i, \mathcal{A}), k_t(\mathcal{A})\} \ge k_t(\mathcal{A}).$$ Henceforth, fix a color class $\mathcal{C}_t$. To simplify notation, let us use the following shorthands: $\mathcal{C} \coloneqq \mathcal{C}_t \setminus \mathcal{C}_t(\mathcal{A})$ is the set of uncovered elements from $\mathcal{C}_t$. $k \coloneqq k_t(\mathcal{A})$ is the residual coverage requirement. For any set $S_i \in \mathcal{R}$, we restrict it to its projection on $\mathcal{C}_t \setminus \mathcal{C}_t(\mathcal{A})$. Similarly, for a collection of sets $\mathcal{R}' \subseteq \mathcal{R} \setminus \mathcal{A}$, we restrict $\bigcup \mathcal{R}'$ to mean the set of ``uncovered elements'' from this color class $\mathcal{C}_t$, i.e., $\bigcup \mathcal{R}' = \bigcup_{S_i \in \mathcal{R}'} S_i$ (where each $S_i \in \mathcal{R}'$ is the projection of the original set, as in the previous sentence). Finally, let $\delta_i \coloneqq \frac{\min\{\textsf{deg}_t(S_i, \mathcal{A}), k\}}{k}$. Notice that using this notation, the preceding constraint is equivalent to $$\sum_{S_i \not\in \mathcal{A}} \delta_i x_i \ge 1.$$ For a set $S_i$, let $\hat{x}_i$ be an indicator random variable that denotes whether or not $S_i$ was added to $\mathcal{R}igma_\ell$. It is easy to see that $\operatorname{E}[\hat{x}_i] = \Pr[S_i \text{ is added}] = 6x_i$. For an element $e_j \in \mathcal{C}$, let $Z_j \coloneqq \sum_{S_i \ni e_j} \hat{x}_i$ be a random variable that denotes the number of sets containing $e_j$ that are added to $\mathcal{R}igma_\ell$. Notice that $e_j$ is covered by $\mathcal{R}igma_\ell$ iff $Z_j \ge 1$. Note that, $\operatorname{E}[Z_j] = \sum_{ S_i \ni e_j} \operatorname{E}[\hat{x}_i] = \sum_{S_i \ni e_j} 6 x_i < 6 \cdot \frac{1}{6\alpha} = \frac{1}{\alpha}$. Let $Z \coloneqq \sum_{S_i \not\in \mathcal{A}} \delta_i \cdot \hat{x}_i $ be a random variable. Notice that \begin{equation} Z = \sum_{S_i \not\in \mathcal{A}} \delta_i \cdot \hat{x}_i \le \frac{1}{k} \sum_{S_i \not\in \mathcal{A}} \hat{x}_i \cdot \textsf{deg}_{t}(S_i, \mathcal{A}) = \frac{1}{k} \sum_{S_i \not\in \mathcal{A}} \sum_{e_j \in S_i} \hat{x}_i = \frac{1}{k} \sum_{e_j \in \mathcal{C}} Z_j\ . \label[ineq]{ineq:Z} \end{equation} Here, the second equality follows from the fact that $\textsf{deg}_t(S_i, \mathcal{A})$ is exactly the number of elements in $S_i$ (that are not covered by $\mathcal{A}$). Now following \citet{bera2014approximation}, we show the following fact about $Z$. \begin{claim} \label{lem:Z-var} $\Pr[Z < 2] \le \frac{3}{8}$. \end{claim} \begin{proof} First, notice that $$\operatorname{E}[Z] = \sum_{S_i \not\in \mathcal{A}} \delta_i \cdot \operatorname{E}[\hat{x_i}] = 6 \cdot \sum_{S_i \not\in \mathcal{A}} \delta_i x_i \ge 6.$$ Now, consider \begin{align*} \mathrm{Var}[Z] = \sum_{S_i \not\in \mathcal{A}} \delta_i^2 \cdot \mathrm{Var}[\hat{x}_i] = \sum_{S_i \not\in \mathcal{A}} \delta_i^2 \cdot 6x_i (1-6x_i) \le 6 \sum_{S_i \not\in \mathcal{A}} \delta_i x_i\ . \tag{$\because$ $0 \le \delta_i \le 1$.} \end{align*} Using Chebyshev's inequality, \begin{align*} \Pr[Z < 2] \le \Pr\left[\big|Z - \operatorname{E}[Z]\big| \ge \frac{2\operatorname{E}[Z]}{3} \right] \le \frac{9}{4} \cdot \frac{\mathrm{Var}[Z]}{\operatorname{E}[Z]^2} \le \frac{9}{4} \cdot \frac{6 \sum_{S_i \not\in \mathcal{A}} \delta_i x_i}{(6 \sum_{S_i \not\in \mathcal{A}} \delta_i x_i)^2} \le \frac{3}{8}\ . \tag{$\because \sum_{S_i \not\in \mathcal{A}} \delta_i x_i \ge 1$.} \end{align*} \end{proof} For convenience, let us use the following notation for some events of interest: \begin{align*} \mathcal{K} &\equiv \mathcal{R}igma_\ell \text{ covers at most $k-1$ elements from $\mathcal{C}$} \\\mathcal{M} &\equiv Z < 2 \end{align*} Recall that the objective is to show that $\Pr[\mathcal{K}]$ is upper bounded by a constant less than $1$. To this end, we first analyze $\Pr[\mathcal{M}|\mathcal{K}]$. \begin{claim} \label{lem:p-given-q} $\Pr[\mathcal{M}|\mathcal{K}] \ge \frac{2}{5}$ \end{claim} \begin{proof} For an element $e_j \in \mathcal{C}$, define an event $$\mathcal{L}_j \equiv Z_j \ge 1 \text{ (i.e., $e_j$ is covered)}.$$ First, consider the following conditional expectation: \begin{align} \operatorname{E}[Z|\mathcal{K}] &\le \frac{1}{k}\sum_{e_j \in \mathcal{C}} \operatorname{E}[Z_j | \mathcal{K}] \tag{From \ref{ineq:Z}} \\&= \frac{1}{k} \sum_{e_j \in \mathcal{C}} \Pr[\bar{\mathcal{L}_j} | \mathcal{K}] \cdot \operatorname{E}[Z_j | \mathcal{K} \cap \bar{\mathcal{L}_j}] + \Pr[\mathcal{L}_j | \mathcal{K}] \cdot \operatorname{E}[Z_j | \mathcal{K} \cap \mathcal{L}_j] \nonumber \\&= \frac{1}{k} \sum_{e_j \in \mathcal{C}} \Pr[\mathcal{L}_j | \mathcal{K}] \cdot \operatorname{E}[Z_j | \mathcal{K} \cap \mathcal{L}_j ]\ . \tag{$\because \operatorname{E}[Z_j | \mathcal{K} \cap \bar{\mathcal{L}_j}] = 0.$} \end{align} In \mathcal{C}ref{subsec:expectation}, we show that the conditional expectation $E[Z_j | \mathcal{K} \cap \mathcal{L}_j]$ is upper bounded by $\frac{6}{5}$. Then, it follows that, \begin{align*} \operatorname{E}[Z|\mathcal{K}] &\le \frac{6}{5k} \sum_{e_j \in \mathcal{C}} \Pr[Z_j \ge 1 | \mathcal{K}] \\&= \frac{6}{5k} \sum_{\mathcal{R}' \subseteq \mathcal{R} \setminus \mathcal{A}} \Pr[\mathcal{R}igma_\ell = \mathcal{R}' | \mathcal{K}] \cdot \left|\bigcup \mathcal{R}'\right| \tag{Where we sum over collections $\mathcal{R}' \subseteq \mathcal{R} \setminus \mathcal{A}$ s.t. $|\bigcup \mathcal{R}'| \le k-1$} \\&\le \frac{6(k-1)}{5k} \sum_{\mathcal{R}' \subseteq \mathcal{R} \setminus \mathcal{A}} \Pr[\mathcal{R}igma_\ell = \mathcal{R}' | \mathcal{K}] \\&< \frac{6}{5}\ . \end{align*} Now, using Markov's inequality, we have that $$\Pr[\mathcal{M}|\mathcal{K}] = \Pr[Z < 2 | \mathcal{K}] \ge 1 - \frac{\operatorname{E}[Z|\mathcal{K}]}{2} \ge 1- \frac{3}{5} = \frac{2}{5}\ .$$ \end{proof}
3,966
26,543
en
train
0.93.5
For convenience, let us use the following notation for some events of interest: \begin{align*} \mathcal{K} &\equiv \mathcal{R}igma_\ell \text{ covers at most $k-1$ elements from $\mathcal{C}$} \\\mathcal{M} &\equiv Z < 2 \end{align*} Recall that the objective is to show that $\Pr[\mathcal{K}]$ is upper bounded by a constant less than $1$. To this end, we first analyze $\Pr[\mathcal{M}|\mathcal{K}]$. \begin{claim} \label{lem:p-given-q} $\Pr[\mathcal{M}|\mathcal{K}] \ge \frac{2}{5}$ \end{claim} \begin{proof} For an element $e_j \in \mathcal{C}$, define an event $$\mathcal{L}_j \equiv Z_j \ge 1 \text{ (i.e., $e_j$ is covered)}.$$ First, consider the following conditional expectation: \begin{align} \operatorname{E}[Z|\mathcal{K}] &\le \frac{1}{k}\sum_{e_j \in \mathcal{C}} \operatorname{E}[Z_j | \mathcal{K}] \tag{From \ref{ineq:Z}} \\&= \frac{1}{k} \sum_{e_j \in \mathcal{C}} \Pr[\bar{\mathcal{L}_j} | \mathcal{K}] \cdot \operatorname{E}[Z_j | \mathcal{K} \cap \bar{\mathcal{L}_j}] + \Pr[\mathcal{L}_j | \mathcal{K}] \cdot \operatorname{E}[Z_j | \mathcal{K} \cap \mathcal{L}_j] \nonumber \\&= \frac{1}{k} \sum_{e_j \in \mathcal{C}} \Pr[\mathcal{L}_j | \mathcal{K}] \cdot \operatorname{E}[Z_j | \mathcal{K} \cap \mathcal{L}_j ]\ . \tag{$\because \operatorname{E}[Z_j | \mathcal{K} \cap \bar{\mathcal{L}_j}] = 0.$} \end{align} In \mathcal{C}ref{subsec:expectation}, we show that the conditional expectation $E[Z_j | \mathcal{K} \cap \mathcal{L}_j]$ is upper bounded by $\frac{6}{5}$. Then, it follows that, \begin{align*} \operatorname{E}[Z|\mathcal{K}] &\le \frac{6}{5k} \sum_{e_j \in \mathcal{C}} \Pr[Z_j \ge 1 | \mathcal{K}] \\&= \frac{6}{5k} \sum_{\mathcal{R}' \subseteq \mathcal{R} \setminus \mathcal{A}} \Pr[\mathcal{R}igma_\ell = \mathcal{R}' | \mathcal{K}] \cdot \left|\bigcup \mathcal{R}'\right| \tag{Where we sum over collections $\mathcal{R}' \subseteq \mathcal{R} \setminus \mathcal{A}$ s.t. $|\bigcup \mathcal{R}'| \le k-1$} \\&\le \frac{6(k-1)}{5k} \sum_{\mathcal{R}' \subseteq \mathcal{R} \setminus \mathcal{A}} \Pr[\mathcal{R}igma_\ell = \mathcal{R}' | \mathcal{K}] \\&< \frac{6}{5}\ . \end{align*} Now, using Markov's inequality, we have that $$\Pr[\mathcal{M}|\mathcal{K}] = \Pr[Z < 2 | \mathcal{K}] \ge 1 - \frac{\operatorname{E}[Z|\mathcal{K}]}{2} \ge 1- \frac{3}{5} = \frac{2}{5}\ .$$ \end{proof} We conclude with proving the main result of the section, which follows from \mathcal{C}ref{lem:Z-var} and \mathcal{C}ref{lem:p-given-q}. \coverage* \begin{proof} Consider $$\Pr[\mathcal{K}] = \frac{\Pr[\mathcal{K}|\mathcal{M}] \cdot \Pr[\mathcal{M}]}{\Pr[\mathcal{M}|\mathcal{K}]} \le \frac{\Pr[\mathcal{M}]}{\Pr[\mathcal{M}|\mathcal{K}]} \le \frac{3/8}{2/5} = \frac{15}{16}\ .$$ Therefore, $\Pr[\mathcal{R}igma_\ell \text{ covers at least $k_t(\mathcal{A})$ elements from }\mathcal{C}_t \setminus \mathcal{C}_t(\mathcal{A}) ] = 1 - \Pr[\mathcal{K}] \ge \frac{1}{16}\ .$ \end{proof} \iffalse \begin{theorem} \label{thm:main} $\mathcal{R}igma \cup \mathcal{A}'$ is an $O(\beta + \log r)$-approximation with probability at least $1-1/r$. \end{theorem} \begin{proof} By assumption, $\mathcal{A}'$ is a cover for $H$ with cost at most $6\alpha\beta \cdot OPT$, where $\alpha$ is a constant. Therefore, we argue about $\mathcal{R}igma = \bigcup_{\ell = 1}^{c \log r} \mathcal{R}igma_\ell$. From \mathcal{C}ref{lem:constant-prob}, we have that, for each color class $\mathcal{C}_t$, $\mathcal{R}igma_\ell$ covers at least $k_t$ elements, with constant probability. Therefore, for appropriately chosen constant $c$, we have $$\Pr[\mathcal{R}igma \text{ covers $k_t$ elements from $\mathcal{C}_t$}] \ge 1 - \lr{\frac{15}{16}}^{c \log r} = 1 - \frac{1}{r^{3}}$$ By union bound, $$\Pr[\mathcal{R}igma \cup \mathcal{A} \text{ is a feasible solution }] \ge 1 - \frac{1}{r^2}.$$ This shows that $\mathcal{R}igma \cup \mathcal{A}$ is a feasible solution with probability at least $1 - \frac{1}{r}$. As argued earlier, $E[w(\mathcal{R}igma_\ell)] \le 6 \cdot OPT$. By using standard techniques and Chernoff Bounds, we can show that the approximation guarantee of the algorithm is $O(\beta + \log r)$. Using union bound on both the events, the statement of the theorem follows. \end{proof}
1,675
26,543
en
train
0.93.6
\fi \subsection{Analyzing \texorpdfstring{$\operatorname{E}[Z_j | \mathcal{K} \cap \mathcal{L}_j]$}{E[Z\_j | K ∩ L\_j]}} \label{subsec:expectation} In this section, we show that for any $e_j \in \mathcal{C}$, the conditional expectation $\operatorname{E}[Z_j | \mathcal{K} \cap \mathcal{L}_j]$ is bounded by $\frac{6}{5}$. Recall that $\mathcal{L}_j$ denotes the event $Z_j \ge 1$ (equivalently, $e_j$ is covered by $\mathcal{R}igma_\ell$), and that $\mathcal{K}$ denotes the event that $\mathcal{R}igma_\ell$ covers at most $k-1$ elements. For notational convenience, we shorten $\mathcal{L}_j$ to $\mathcal{L}$. Partition the sets $\mathcal{R} \setminus \mathcal{A}$ into disjoint collections $\mathcal{R}_1, \mathcal{R}_2$, where $\mathcal{R}_1$ consists of sets that do not contain $e_j$, and $\mathcal{R}_2$ consists of sets that contain $e_j$. Fix an arbitrary ordering $\sigma$ of sets in $\mathcal{R} \setminus \mathcal{A}$, where the sets in $\mathcal{R}_1$ appear before the sets in $\mathcal{R}_2$. We view the algorithm for choosing $\mathcal{R}igma_\ell$, as considering the sets in $\mathcal{R} \setminus \mathcal{A}$ according to this ordering $\sigma$, and making a random decision of whether to add each set in $\mathcal{R}igma_\ell$. Let $\mathcal{R}igma'_\ell$ be the random collection of sets added according to the ordering $\sigma$, until the first set containing $e_j$, say $S_i$, is added to $\mathcal{R}igma_\ell$. Note that if we condition on the event $\mathcal{L}$, such an $S_i$ must exist. Let \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace denote the event that (i) $S_i$ is the first set containing $e_j$ that is added by the algorithm, and (ii) $\mathcal{R}igma'_\ell$ is the collection added by the algorithm, just after $S_i$ was added. Note that \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace contains the history of the choices made by the algorithm, until the point just after $S_i$ is considered. For a history \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace, $k-1-|\bigcup \mathcal{R}igma'_\ell|$ is the maximum number of additional elements that can still be covered, without violating the condition $\mathcal{K}$ (which says that $\mathcal{R}igma_\ell$ covers at most $k-1$ elements). We say that a history \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace is relevant, if $k-1-|\bigcup \mathcal{R}igma'_\ell| \ge 0$. Thus, \begin{equation} \label{eqn:exp-1} \operatorname{E}[Z_j \mid \mathcal{K} \cap \mathcal{L}] = \sum_{\ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace} \Pr[\ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace \mid \mathcal{K} \cap \mathcal{L}] \cdot \operatorname{E}[Z_j \mid \mathcal{K} \cap \mathcal{L} \cap \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace], \end{equation} where we only sum over the relevant histories. Now, once $S_i$ has been added to the solution, $e_j$ is covered, thereby satisfying the condition $\mathcal{L}$. That is, the event \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace implies the event $\mathcal{L}$. It follows that, \begin{equation} \label{eqn:exp-2} \operatorname{E}[Z_j \mid \mathcal{K} \cap \mathcal{L} \cap \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace] = \operatorname{E}[Z_j \mid \mathcal{K} \cap \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace]. \end{equation} Let $\mathcal{K}'$ denote the event that $\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell$ covers at most $p \coloneqq k-1-|\bigcup \mathcal{R}igma'_\ell|$ elements. Then, \begin{equation} \label{eqn:exp-3} \operatorname{E}[Z_j \mid \mathcal{K} \cap \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace] = \operatorname{E}[Z_j \mid \mathcal{K}' \cap \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace]. \end{equation} Now, let $\bar{Z_j}$ be the sum of the indicator random variables $\hat{x}_{i'}$, over the the sets $S_{i'} \in \mathcal{R}_2$, that occur after $S_i$ in the ordering $\sigma$. Clearly, $\operatorname{E}[\bar{Z_j}] \le \operatorname{E}[Z_j]$. We also have, \begin{equation} \label{eqn:exp-4} \operatorname{E}[Z_j \mid \mathcal{K}' \cap \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace] = 1 + \operatorname{E}[\bar{Z_j} \mid \mathcal{K}' \cap \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace]. \end{equation} This is because, $Z_j$ denotes the number of sets containing $e_j$ that are added to $\mathcal{R}igma_\ell$, including $S_i$; whereas $\bar{Z_j}$ does not count $S_i$. Now, $\bar{Z_j}$ and $\mathcal{K}'$ are concerned with the sets after $S_i$ according to $\sigma$, whereas \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace concerns the history upto $S_i$. Therefore, \begin{align} \operatorname{E}[\bar{Z_j} \mid \mathcal{K}' \cap \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace] &= \operatorname{E}[\bar{Z_j} \mid \mathcal{K}'] \nonumber \\&= \operatorname{E}[\bar{Z_j} |\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell \text{ covers at most } p \text{ additional elements}] \nonumber \\&\le \frac{\operatorname{E}[\bar{Z_j}]}{\Pr[\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell \text{ covers at most }p \text{ additional elements}]} \nonumber \\&\le \frac{\operatorname{E}[\bar{Z_j]}}{\Pr[\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell = \emptyset]} \tag{$\because$ ``$\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell$ covers at most $p$ additional elements'' $\supseteq$ ``$\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell = \emptyset$'' } \nonumber \\&\le \frac{1}{\alpha - 1}\ . \label{eqn:exp-5} \end{align} This follows from (i) $\operatorname{E}[\bar{Z_j}] \le \operatorname{E}[Z_j] \le \frac{1}{\alpha}$ as argued earlier, and (ii) $\Pr[\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell = \emptyset] \ge \mathsf{pr}od_{S_{i'} \ni e_j }(1 - 6x_{i'}) \ge 1 - \sum_{S_{i'} \ni e_j} 6x_{i'} \ge \frac{\alpha - 1}{\alpha}$, where we use Weierstrass product inequality in the second step. Now, combining \mathcal{C}ref{eqn:exp-2,eqn:exp-3,eqn:exp-4,eqn:exp-5}, we conclude that $$\operatorname{E}[Z_j \mid \mathcal{K} \cap \mathcal{L} \cap \ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace] \le 1 + \frac{1}{\alpha-1} = \frac{\alpha}{\alpha -1}.$$ Plugging this into \mathcal{C}ref{eqn:exp-1}, we get that, $$\operatorname{E}[Z_j \mid \mathcal{L}\cap \mathcal{K}] \le \sum_{\ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace} \frac{\alpha}{\alpha - 1} \cdot \Pr[\ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace \mid \mathcal{K}\cap \mathcal{L}] = \frac{\alpha}{\alpha - 1} \sum_{\ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace} \Pr[\ensuremath{\langle S_i, \mathcal{R}igma'_\ell \rangle}\xspace \mid \mathcal{K}\cap \mathcal{L}] = \frac{\alpha}{\alpha-1}.$$ \iffalse Now we look at an iteration of the algorithm as first making the random decisions for the sets $\mathcal{R}_1$ (considered in an arbitrary order), and then for the sets in $\mathcal{R}_2$. Let $\mathcal{R}igma'_\ell$ be the random collection of sets added according to this ordering until the first set containing $e_j$, say $S_i$, is added to $\mathcal{R}igma_\ell$. Note that because of the condition $\mathcal{L}$, such an $S_i$ must exist. Now, let $p \coloneqq k-1 - \left| \bigcup \mathcal{R}igma'_\ell \right|$ denote the maximum number of elements that can still be covered, without violating the condition $\mathcal{K}$. Without loss of generality, we assume that we only consider the collections $\mathcal{R}igma'_\ell$, where $p \ge 0$ (since otherwise the condition $\mathcal{K}$ is violated). Note that $S_i \ni e_j$ is a random set, and hence we can write the original conditional expectation as follows: \begin{align} \operatorname{E}[Z_j | \mathcal{K} \cap \mathcal{L} ] &= \sum_{S_i \ni e_j} \Pr[S_i \text{ is the first set} | \mathcal{K} \cap \mathcal{L}] \cdot \operatorname{E}[Z_j | \mathcal{K}' \cap \mathcal{L} \cap S_i \text{ is the first set} ] \label{eqn:lcapn} \end{align} Where $\mathcal{K}'$ denotes the event that $\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell$ covers at most $p$ new elements. Note that since $S_i$ has already been added to the solution, we can see that $E[Z_j | \mathcal{K}' \cap \mathcal{L} \cap S_i \text{ is the first set}] = 1 + \operatorname{E}[\bar{Z_j} | \mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell \text{ covers at most } p \text{ new elements}]$, where $\bar{Z_j} \coloneqq \sum_{S_{i'} \in \mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell} \hat{x}_{i'}$. This is because, once $S_i$ has been added to the solution, $e_j$ is covered, satisfying condition $\mathcal{L}$. It is also easy to see that $\operatorname{E}[\bar{Z_j}] \le \operatorname{E}[Z_j]$. Therefore, we have,
3,064
26,543
en
train
0.93.7
\begin{align} \operatorname{E}[Z_j | \mathcal{K}' \cap \mathcal{L} \cap S_i \text{ is the first set}] &= 1 + \operatorname{E}[\bar{Z_j} |\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell \text{ covers at most } p \text{ new elements}] \nonumber \\&\le 1 +\frac{\operatorname{E}[\bar{Z_j}]}{\Pr[\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell \text{ covers at most }p \text{ new elements}]} \nonumber \\&\le 1 + \frac{\operatorname{E}[\bar{Z_j]}}{\Pr[\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell = \emptyset]} \tag{$\because$ ``$\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell$ covers at most $p$ new elements'' $\subseteq$ ``$\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell = \emptyset$'' } \nonumber \\&\le 1 + \frac{1}{\alpha - 1} \label{ineq:lcapncapsi} \end{align} This follows from (i) $\operatorname{E}[\bar{Z_j}] \le \operatorname{E}[Z_j] \le \frac{1}{\alpha}$ as argued earlier, and (ii) $\Pr[\mathcal{R}igma_\ell \setminus \mathcal{R}igma'_\ell = \emptyset] \ge \mathsf{pr}od_{S_{i'} \ni e_j }(1 - 6x_{i'}) \ge 1 - \sum_{S_{i'} \ni e_j} 6x_{i'} \ge \frac{\alpha - 1}{\alpha}$, where we use Weierstrass product inequality in the second step. Combining \ref{eqn:lcapn} and \ref{ineq:lcapncapsi}, we get that, \begin{align*} E[Z_j | \mathcal{L}\cap \mathcal{K}] &\le \sum_{S_i \ni e_j} \frac{\alpha}{\alpha-1} \cdot \Pr[S_i \text{ is the first set} | \mathcal{K} \cap \mathcal{L}] = \frac{\alpha}{\alpha - 1} \end{align*} \fi Choosing $\alpha = 6$, it follows that $E[Z_j | \mathcal{L}\cap \mathcal{K}] \le \frac{6}{5}$, as claimed. \subsection{Solving the \textsf{LP}\xspace} \label{subsec:ellipsoid} Recall that the strengthened \textsf{LP}\xspace has exponentially many constraints, and hence we cannot use a standard \textsf{LP}\xspace algorithm directly. We guess the cost of the integral optimal solution up to a factor of $2$ (this can be done by binary search), say $\mathsf{D}elta$. Then, we convert the \textsf{LP}\xspace into a feasibility \textsf{LP}\xspace by removing the objective function, and adding a constraint $\sum_{S_i \in \mathcal{R}} w_i x_i \le \mathsf{D}elta$. We then use Ellipsoid algorithm to find a feasible solution to this \textsf{LP}\xspace and let $(x, z)$ be a candidate solution returned by the Ellipsoid algorithm. If it does not satisfy the preceding constraint, we report it as a violated constraint. We also check \mathcal{C}ref{constr:cover-ej,constr:cover-ci,constr:fractional-z,constr:fractional-x} (the number of these constraints is polynomial in the input size), and report if any of these constraints is violated. Otherwise, let $H$ be the set of heavy elements with respect to $(x, z)$ (as defined earlier), and let $\mathcal{A} = \mathcal{A}' \cup \{S_i \in \mathcal{R} \mid x_i \ge \frac{1}{6\alpha}\}$ be the collection of sets as defined in \mathcal{C}ref{sec:LPRounding} (where $\mathcal{A}'$ is the Set Cover solution for the heavy elements $H$ returned by the rounding algorithm). Then, we check if the following constraint is satisfied with respect to this $\mathcal{A}$, for all color classes $\mathcal{C}_t$: $$\sum_{S_i \not\in \mathcal{A}} x_i \min\{ \textsf{deg}_{t}(S_i, \mathcal{A}), k_t(\mathcal{A}) \} \ge k_t(\mathcal{A})\ .$$ If this constraint is not satisfied for some color class $\mathcal{C}_t$, we report it as a violated constraint. Otherwise we stop the Ellipsoid algorithm and proceed with the randomized rounding algorithm with the current \textsf{LP}\xspace solution $(x, z)$, as described in \mathcal{C}ref{sec:LPRounding}. Note that $(x, z)$ may not satisfy \mathcal{C}ref{constr:s-cover-ci-a} with respect to all collections $\mathcal{A}$. However, our randomized rounding algorithm requires that it is satisfied with respect to the specific collection $\mathcal{A}$ as defined earlier. Therefore, we do not need to check the feasibility of $(x, z)$ with respect to exponentially many constraints. \section{Facility Location and Minimum Cost Covering with Multiple Outliers} \label{sec:fl-mcc} We consider generalizations of the Facility Location and Minimum Cost Covering problems. These generalizations are analogous to the Partition Set Cover problem considered in the previous section. That is, the set of ``clients'' (which are the objects to be covered) is partitioned into $r$ color classes, and each color class has a coverage requirement. We note that for the (standard) Facility Location and Minimum Cost Covering problems, \textsf{LP}\xspace-based $O(1)$ approximation algorithms are known. In the following, we first state the generalizations formally, and then show how the Randomized Rounding framework from the previous section can be adapted to obtain $O(\log r)$ approximations for these problems. These guarantees are asymptotically tight, in light of the hardness results given in \mathcal{C}ref{sec:hardness}. \subsection{Facility Location with Multiple Outliers} In the Facility Location with Multiple Outliers problem, we are given a set of Facilities $F$, a set of Clients $C$, belonging to a metric space $(F \cup C, d)$. Each facility $i \in F$ has a non-negative opening cost $f_i$. We are given $r$ non-empty subsets of clients (or ``color classes'') $\mathcal{C}_1, \ldots, \mathcal{C}_r$, that partition the set of clients. Each color class $\mathcal{C}_t$ has a connection requirement $1 \le k_t \le |\mathcal{C}_t|$. The objective of the Facility Location with Multiple Outliers problem is to find a solution $(F^*, C^*)$, such that $\sum_{i \in F'} f_i + \sum_{j \in C'} d(j, F')$ is minimized over all feasible solutions $(F', C')$. A solution $(F', C')$ is feasible if (i) $|F'| \ge 1$ and (ii) For all color classes $\mathcal{C}_t$, $|\mathcal{C}_t \cap C'| \ge k_t$. Note that this is a generalization of the Robust Facility Location problem, first considered by \citet{Charikar2001FLwO}. A natural \textsf{LP}\xspace formulation of this problem is as follows. \begin{mdframed}[backgroundcolor=gray!9]
1,907
26,543
en
train
0.93.8
(Natural \textsf{LP}\xspace for Facility Location with Multiple Outliers) \begin{alignat}{3} \text{minimize} &\ \sum\limits_{i \in F} f_{i}x_{i} + \sum_{i \in F, j \in C} y_{ij} \cdot d(i, j) & \nonumber \\ \text{subject to} \displaystyle&\sum\limits_{i \in F} y_{ij} \geq z_j, \quad & \forall j \in C \label[constr]{constr:fl-cover-j}\\ \displaystyle&\sum_{j \in \mathcal{C}_r}z_j \ge k_t, & \forall \mathcal{C}_t \in \{\mathcal{C}_1, \ldots, \mathcal{C}_r\} \label[constr]{constr:fl-cover-ci}\\ \displaystyle&0 \le y_{ij} \le x_i \le 1, & \forall i \in F, \forall j \in C \label[constr]{constr:fl-atmost-ci}\\ \displaystyle &z_j \in [0, 1], & \forall j \in C \label[constr]{constr:fl-fractional-z} \end{alignat} \end{mdframed} We note that the integrality gap example from \mathcal{C}ref{subsec:nat-lp} can be easily converted to show a similar gap for the Facility Location with Multiple Outliers problem. Therefore, we strengthen the \textsf{LP}\xspace in a manner similar to the previous section. First, we convert the \textsf{LP}\xspace to a feasibility \textsf{LP}\xspace by guessing the optimal cost up to a factor of $2$, say $\mathsf{D}elta$, and by adding a constraint $\sum\limits_{i \in F} f_{i}x_{i} + \sum_{i \in F, j \in C} y_{ij} \cdot d(i, j) \le \mathsf{D}elta$. Similar to \mathcal{C}ref{subsec:ellipsoid}, we use the Ellipsoid algorithm to find a feasible \textsf{LP}\xspace solution that satisfies this constraint, as well \mathcal{C}refrange{constr:fl-cover-j}{constr:fl-fractional-z}. Let $H = \{j \in C \mid \sum_{i \in F} y_{ij} \ge \frac{1}{6\alpha} \}$ be the set of \emph{heavy} clients. For any $i \in F$, let $\widetilde{x}_i \coloneqq \min \{1, 6\alpha \cdot x_i \}$, and for any $i \in F, j \in H$, let $y_{ij} \coloneqq \min\{1, 6\alpha \cdot y_{ij}\}$. It is easy to see that $(\widetilde x, \widetilde y)$ is a feasible Facility Location (without outliers) solution for the instance induced by the heavy clients, and its cost is at most $6\alpha \mathsf{D}elta$. We use an \textsf{LP}\xspace-based algorithm (such as \cite{ByrkaFLLP}) with a constant approximation guarantee to round this solution to an integral solution $(F_H, H)$, where $F_H \subseteq F$. Let $L = C \setminus H$ be the set of \emph{light} clients. Note that for any light client $j \in L$, $z_j \le \sum_{i \in F} y_{ij} < \frac{1}{6\alpha}$. Also, for a color class $\mathcal{C}_t$, let $\mathcal{C}_t(H) \coloneqq \mathcal{C}_t \setminus H$ denote the uncovered (light) elements from $\mathcal{C}_t$, and let $k_t(H) \coloneqq k_t - |\mathcal{C}_t \cap H|$ denote its residual coverage requirement. Wlog, we assume that $k_t(H)$ is positive, otherwise we can ignore the color class $\mathcal{C}_t$ from consideration in the remaining part. Now, we check whether the following constraint holds for all color classes $\mathcal{C}_t$: \begin{equation} \sum_{i \in F} \min\bigg\{x_i \cdot k_t(H), \sum_{j \in \mathcal{C}_t(H)} y_{ij} \bigg\} \ge k_t(H) \label[constr]{eqn:fl-constraint} \end{equation} First, note that this can be easily formulated as an \textsf{LP}\xspace constraint by introducing auxiliary variables. If this constraint is not satisfied for some color class $\mathcal{C}_t$, we report it as a violated constraint. Consider the integral \textsf{LP}\xspace solution $(x', y', z')$ corresponding to a feasible integral solution $(F', C')$. We argue that $(x', y', z')$ satisfies this constraint. Note that at most $|\mathcal{C}_t \cap H|$ clients are connected from the set $\mathcal{C}_t \cap H$. Therefore, by feasibility of the solution, at least $k_t(H)$ clients must be connected from $\mathcal{C}_t(H)$. For a facility $i \in F'$, the quantity $\sum_{j \in \mathcal{C}_t(H)} y'_{ij}$ denotes the number of clients connected to $i$. However, even if the number of clients connected to $i$ is more than $k_t(H)$, only $k_t(H)$ of them count towards satisfying the residual connection requirement. Therefore, $(x', y', z')$ satisfies this constraint for all color classes, and hence it is a valid constraint. Now, suppose we have an \textsf{LP}\xspace solution $(x, y, z)$ that satisfies \mathcal{C}refrange{constr:fl-cover-j}{eqn:fl-constraint}, and has cost at most $\mathsf{D}elta$. By ``splitting'' the facilities into multiple co-located copies if necessary, we ensure the following two conditions hold: \begin{enumerate} \item For any facility $i \in F$, $x_i < \frac{1}{6\alpha}$. \item For any client $j \in L$ and any facility $i \in F$, $y_{ij} > 0\ \implies\ y_{ij} = x_i$. \end{enumerate} This has to be done in a careful manner, since we also want to maintain \mathcal{C}ref{eqn:fl-constraint} after the facilities have been split. This procedure results in a feasible \textsf{LP}\xspace solution of the same cost. Henceforth, we treat all co-located copies of a facility as distinct facilities for the sake of the analysis. We now show that the rounding for the light clients can be reduced to the Randomized Rounding algorithm from the previous section. For any facility $i \in F$, let $S_i \coloneqq \{j \in L \mid x_{i} = y_{ij} \}$ denote the set of light clients that are fractionally connected to $i$. The cost of opening facility $i$ and connecting all $j \in S_i$ to $i$ is equal to $w_i \coloneqq f_i + \sum_{j \in S_i} d(i, j)$. Consider an instance $(L, \mathcal{R})$ of the Partition Set Cover problem, where $\mathcal{R} = \{S_i \mid i \in F\}$ with weights $w_i$, and residual coverage requirement $k_t(H)$ for each color class $\mathcal{C}_t(H)$, and consider the corresponding \textsf{LP}\xspace solution $(x, z)$. The following properties are satisfied by the \textsf{LP}\xspace solution. \begin{enumerate} \item All the elements are light, and all the sets $S_i \in \mathcal{R}$ have $x_i < \frac{1}{6\alpha}$. \item The costs of the two \textsf{LP}\xspace solutions are equal: $$\sum_{S_i \in \mathcal{R}} w_i x_i = \sum_{i \in F} x_i \cdot \bigg(f_i + \sum_{j \in S_i} d(i, j)\bigg) = \sum_{i \in F} f_i x_i + \sum_{i \in F,\ j \in L} y_{ij} \cdot d(i, j).$$ \item \mathcal{C}ref{eqn:fl-constraint} is equivalent to: $$\sum_{S_i \in \mathcal{R}} x_i \cdot \min\left\{ k_t(H), |S_i \cap \mathcal{C}_t| \right\} \ge k_t(H) \quad \forall \mathcal{C}_t.$$ \end{enumerate} Therefore, we can use the Randomized Rounding algorithm from the previous section to obtain a solution $\mathcal{R}igma = \bigcup_{\ell = 1}^{O(\log r)} \mathcal{R}igma_\ell$. It has cost at most $O(\log r) \cdot \mathsf{D}elta$, and for each color class $\mathcal{C}_t(H)$, it covers at least $k_t(H)$ clients, with at least a constant probability. To obtain a solution for the Facility Location with Multiple Outliers problem, we open any facility $i \in F$, if its corresponding set $S_i$ is selected in $\mathcal{R}igma$. Furthermore, we connect $k_t(H)$ clients from $\mathcal{C}_t(H)$ to the set of opened facilities. Note that the cost of this solution is upper bounded by $w(\mathcal{R}igma) \le O(\log r) \cdot \mathsf{D}elta$. Combining this with the solution $(F_H, H)$ for the heavy clients with cost at most $O(1) \cdot \mathsf{D}elta$, we obtain our overall solution for the given instance. It is easy to see that this is an $O(\log r)$ approximation. \subsection{Minimum Cost Covering with Multiple Outliers} Here, we are given a set of Facilities $F$, a set of Clients $C$, belonging to a metric space $(F \cup C, d)$. Each facility $i \in F$ has a non-negative opening cost $f_i$. We are given $r$ subsets of clients (or ``color classes'') $\mathcal{C}_1, \ldots, \mathcal{C}_r$, where any client $j \in C$ belongs to at least one color class. Each color class $\mathcal{C}_t$ has a coverage requirement $1 \le k_t \le |\mathcal{C}_t|$. A ball centered at a facility $i \in F$ of radius $r \ge 0$ is the set $B(i, r) \coloneqq \{j \in C \mid d(i, j) \le r \}$. The goal is to select a set of balls $\mathcal{B} = \{B_i = B(i, r_i) \mid i \in F' \subseteq F \}$ centered at some subset of facilities $F' \subseteq F$, such that (i) The set of balls $\mathcal{B}$ satisfies the coverage requirement of each color class and (ii) the sum $\sum_{i \in F'} (f_i + r_i^\gamma)$ is minimized. Here, $\gamma \ge 1$ is a constant, and is a parameter of the problem. Note that even though the radius of a ball centered at $i \in F$ is allowed to be any non-negative real number, it can be restricted to the following set of ``relevant'' radii: $R_i \coloneqq \{d(i, j)\mid j \in C\}$. Now, define a set system $(C, \mathcal{R})$. Here, $C$ is the set of clients, and $\mathcal{R} = \{ B(i, r) \mid i \in F, r \in R_i \}$, with weight of the set corresponding to a ball $B(i, r)$ being defined as $f_i + r^\gamma$. Now, we use the algorithm from the previous section for this set system. Let $H$ be the set of heavy clients (or elements) as defined in \mathcal{C}ref{sec:LPRounding}. We use the Primal-Dual algorithm of \citet{CharikarP04} \footnote{\citet{CharikarP04} consider the special case of $\gamma = 1$, however their algorithm easily generalizes to arbitrary $\gamma$.} with an approximation guarantee of $\beta = 3^\gamma$ (which is a constant) to obtain a cover for the heavy clients. For the remaining light clients, we use the Randomized Rounding algorithm as is. Note that this reduction from the Minimum Cost Covering with Multiple Outliers Problem to the Partition Set Cover Problem is not exact, since the solution thus obtained may select sets corresponding to concentric balls in the original instance. However, from each set of concentric balls, we can choose the largest radius ball. This pruning process does not affect the coverage, and can only decrease the cost of the solution. Therefore, it is easy to see that the resulting solution is an $O(\log r)$ approximation. \section{\texorpdfstring{$\Omega(\log r)$}{Ω(log r)} Hardness Results} \label{sec:hardness} In this section, we show that it is \mathcal{N}P-hard to obtain approximation guarantees better than $O(\log r)$ for the Partition Set Cover for several geometric set systems, as well as the problems considered in \mathcal{C}ref{sec:fl-mcc}. The reductions are from (unweighted) Set Cover, and are straightforward extensions of a similar hardness result shown in \cite{bera2014approximation}. \subsection*{Geometric Set Systems} Suppose we are given an instance of Set Cover $(X, \mathcal{R})$, where $X = \{e_1, \ldots, e_n\}$. For each set $S_i \in \mathcal{R}$, add a unit interval $I_i$ in $\mathbb{R}$, such that all intervals are disjoint. Add a point $p_{ij}$ (of color class $\mathcal{C}_j$) inside an interval $I_i$, corresponding to an element $e_j \in X$, and a set $S_i \ni e_j$. Thus, there are $n$ disjoint color classes, partitioning the set of points. The coverage requirement of each color class is $1$. It is easy to see that a feasible solution to the Partition Set Cover instance corresponds to a feasible solution to the original Set Cover instance, of the same cost. The $\Omega(\log r)$ hardness follows from the $\Omega(\log n)$ hardness for Set Cover (\cite{DS2014}), and $r = n$ is the number of color classes. Therefore, $\Omega(\log r)$ hardness follows, for the Partition Set Cover problem where the sets are unit intervals in $\mathbb{R}$. This is easily generalized to any other type of geometric objects, as all that is needed is the disjointness of the geometric objects. \subsection*{Facility Location and Minimum Cost Covering} First, consider the Facility Location with Multiple Outliers problem. Given an instance $(X, \mathcal{R})$ of the unweighted Set Cover problem, we add facilities $i \in F$, corresponding to sets $S_i \in R$, uniformly separated on the real line, such that the distance between the facilities is at least $|X| \cdot |\mathcal{R}|$. The opening cost of each facility is $1$. Similar to the reduction above, we add a client $c_{ij}$ co-located with facility $i \in F$, corresponding to an element $e_j \in X$, and a set $S_i \ni e_j$. The coverage requirement of each color class is set to $1$. It is easy to see the one-to-one correspondence between optimal solutions to both of these problems. Now, we tweak the above instance for obtaining the same result for the Minimum Cost Covering with Multiple Outliers problem on a line, even when all the opening costs are $0$ (otherwise, we can use the reduction from the paragraph above as is). For each facility $i \in F$, add the clients $c_{ij}$ (corresponding to the elements in $S_i$) at a distance of $1$ from $i$ (instead of being co-located, as in the previous reduction). The coverage requirement of each color class is $1$, as before. Note that the facility $i$ and the clients $\{c_{ij} \mid e_j \in S_i \}$ form a ``cluster'', and the inter-cluster distance is large enough to ensure that an optimal solution to the resulting instance consists of disjoint clusters, which then exactly corresponds to an optimal solution to the Set Cover problem. \subsection*{Acknowledgment} We thank Sariel Har-Peled and Timothy M. Chan for preliminary discussions on this problem. \end{document}
4,079
26,543
en
train
0.94.0
\begin{document} \title{Identification and well-posedness in nonparametric models with independence conditions} \author{Victoria Zinde-Walsh\thanks{ The support of the Social Sciences and Humanities Research Council of Canada (SSHRC) and the \textit{Fonds\ qu\'{e}becois de la recherche sur la soci\'{e} t\'{e} et la culture} (FRQSC) is gratefully acknowledged. } \\ \\ McGill University and CIREQ\\ [email protected]} \maketitle \date{} \begin{center} \pagebreak {\LARGE Abstract} \end{center} This paper provides a nonparametric analysis for several classes of models, with cases such as classical measurement error, regression with errors in variables, and other models that may be represented in a form involving convolution equations. The focus here is on conditions for existence of solutions, nonparametric identification and well-posedness in the space $ S^{\ast }$ of generalized functions (tempered distributions). This space provides advantages over working in function spaces by relaxing assumptions and extending the results to include a wider variety of models, for example by not requiring existence of density. Classes of (generalized) functions for which solutions exist are defined; identification conditions, partial identification and its implications are discussed. Conditions for well-posedness are given and the related issues of plug-in estimation and regularization are examined. \section{Introduction} Many statistical and econometric models involve independence (or conditional independence) conditions that can be expressed via convolution. Examples are independent errors, classical measurement error and Berkson error, regressions involving data measured with these types of errors, common factor models and models that conditionally on some variables can be represented in similar forms, such as a nonparametric panel data model with errors conditionally on observables independent of the idiosyncratic component. Although the convolution operator is well known, this paper provides explicitly convolution equations for a wide list of models for the first time. In many cases the analysis in the literature takes Fourier transforms as the starting point, e.g. characteristic functions for distributions of random vectors (as in the famous Kotlyarski lemma, 1967). The emphasis here on convolution equations for the models provides the opportunity to explicitly state nonparametric classes of functions defined by the model for which such equations hold, in particular, for densities, conditional densities and regression functions. The statistical model may give rise to different systems of convolution equations and may be over-identified in terms of convolution equations; some choices may be better suited to different situations, for example, here in Section 2 two sets of convolution equations (4 and 4a in Table 1) are provided for the same classical measurement error model with two measurements; it turns out that one of those allows to relax some independence conditions, while the other makes it possible to relax a support assumption in identification. Many of the convolution equations derived here are based on density-weighted conditional averages of the observables. The main distinguishing feature is that here all the functions defined by the model are considered within the space of generalized functions $S^{\ast },$ the space of so-called tempered distributions (they will be referred to as generalized functions). This is the dual space, the space of linear continuous functionals, on the space $S$ of well-behaved functions: the functions in $S$ are infinitely differentiable and all the derivatives go to zero at infinity faster than any power. An important advantage of assuming the functions are in the space of generalized functions is that in that space any distribution function has a density (generalized function) that continuously depends on the distribution function, so that distributions with mass points and fractal measures have well-defined generalized densities. Any regular function majorized by a polynomial belongs to $S^{\ast }$; this includes polynomially growing regression functions and binary choice regression as well as many conditional density functions. Another advantage is that Fourier transform is an isomorphism of this space, and thus the usual approaches in the literature that employ characteristic functions are also included. Details about the space $S^{\ast }$ are in Schwartz (1966) and are summarized in Zinde-Walsh (2012). The model classes examined here lead to convolution equations that are similar to each other in form; the main focus of this paper is on existence, identification, partial identification and well-posedness conditions. Existence and uniqueness of solutions to some systems of convolution equations in the space $S^{\ast }$ were established in Zinde-Walsh (2012). Those results are used here to state identification in each of the models. Identification requires examining support of the functions and generalized functions that enter into the models; if support excludes an open set then identification at least for some unknown functions in the model fails, however, some isolated points or lower-dimensional manifolds where the e.g. the characteristic function takes zero values (an example is the uniform distribution) does not preclude identification in some of the models. This point was made in e.g. Carrasco and Florens (2010), Evdokimov and White (2011) and is expressed here in the context of operating in $S^{\ast }.$ Support restriction for the solution may imply that only partial identification will be provided. However, even in partially identified models some features of interest (see, e.g. Matzkin, 2007) could be identified thus some questions could be addressed even in the absence of full identification. A common example of incomplete identification which nevertheless provides important information is Gaussian deconvolution of a blurred image of a car obtained from a traffic camera; the filtered image is still not very good, but the licence plate number is visible for forensics. Well-posedness conditions are emphasized here. The well-known definition by Hadamard (1923) defines well-posedness via three conditions: existence of a solution, uniqueness of the solution and continuity in some suitable topology. The first two are essentially identification. Since here we shall be defining the functions in subclasses of $S^{\ast }$ we shall consider continuity in the topology of this generalized functions space. This topology is weaker than the topologies in functions spaces, such as the uniform or $L_{p}$ topologies; thus differentiating the distribution function to obtain a density is a well-posed problem in $S^{\ast },$ by contrast, even in the class of absolutely continuous distributions with uniform metric where identification for density in the space $L_{1}$ holds, well-posedness however does not obtain (see discussion in Zinde-Walsh, 2011). But even though in the weaker topology of $S^{\ast }$ well-posedness obtains more widely, for the problems considered here some additional restrictions may be required for well-posedness. Well-posedness is important for plug-in estimation since if the estimators are in a class where the problem is well-posed they are consistent, and conversely, if well-posedness does not hold consistency will fail for some cases. Lack of well-posedness can be remedied by regularization, but the price is often more extensive requirements on the model and slower convergence. For example, in deconvolution (see e.g. Fan, 1991, and most other papers cited here) spectral cut-off regularization is utilized; it crucially depends on knowing the rate of the decay at infinity of the density. Often non-parametric identification is used to justify parametric or semi-parametric estimation; the claim here is that well-posedness should be an important part of this justification. The reason for that is that in estimating a possibly misspecified parametric model, the misspecified functions of the observables belong in a nonparametric neighborhood of the true functions; if the model is non-parametrically identified, the unique solution to the true model exists, but without well-posedness the solution to the parametric model and to the true one may be far apart. For deconvolution An and Hu (2012) demonstrate well-posedness in spaces of integrable density functions when the measurement error has a mass point; this may happen in surveys when probability of truthful reporting is non-zero. The conditions for well-posedness here are provided in $S^{\ast }$ ; this then additionally does not exclude mass points in the distribution of the mismeasured variable itself; there is some empirical evidence of mass points in earnings and income. The results here show that in $S^{\ast }$ well-posedness holds more generally: as long as the error distribution is not super-smooth. The solutions for the systems of convolution equations can be used in plug-in estimation. Properties of nonparametric plug-in estimators are based on results on stochastic convergence in $S^{\ast }$ for the solutions that are stochastic functions expressed via the estimators of the known functions of the observables. Section 2 of the paper enumerates the classes of models considered here. They are divided into three groups: 1. measurement error models with classical and Berkson errors and possibly an additional measurement, and common factor models that transform into those models; 2. nonparametric regression models with classical measurement and Berkson errors in variables; 3. measurement error and regression models with conditional independence. The corresponding convolution equations and systems of equations are provided and discussed. Section 3 is devoted to describing the solutions to the convolution equations of the models. The main mathematical aspect of the different models is that they require solving equations of a similar form. Section 4 provides a table of identified solutions and discusses partial identification and well-posedness. Section 5 examines plug-in estimation. A brief conclusion follows.
2,380
26,749
en
train
0.94.1
\section{Convolution equations in classes of models with independence or conditional independence} This section derives systems of convolution equations for some important classes of models. The first class of model is measurement error models with some independence (classical or Berkson error) and possibly a second measurement; the second class is regression models with classical or Berkson type error; the third is models with conditional independence. For the first two classes the distributional assumptions for each model and the corresponding convolution equations are summarized in tables; it is indicated which of the functions are known and which unknown; a brief discussion of each model and derivation of the convolution equations follows. The last part of this section discusses convolution equations for two specific models with conditional independence; one is a panel data model studied by Evdokimov (2011), the other a regression model where independence of measurement error of some regressors obtains conditionally on a covariate. The general assumption made here is that all the functions in the convolution equations belong to the space of generalized functions $S^{\ast }.$ \textbf{Assumption 1. }\textit{All the functions defined by the statistical model are in the space of generalized functions }$S^{\ast }.$ This space of generalized function includes functions from most of the function classes that are usually considered, but allows for some useful generalizations. The next subsection provides the necessary definitions and some of the implications of working in the space $S^{\ast }.$ \subsection{The space of generalized functions $S^{\ast }.$} The space $S^{\ast }$ is the dual space, i.e. the space of continuous linear functionals on the space $S$ of functions. The theory of generalized functions is in Schwartz (1966); relevant details are summarized in Zinde-Walsh (2012). In this subsection the main definitions and properties are reproduced. Recall the definition of $S.$ For any vector of non-negative integers $m=(m_{1},...m_{d})$ and vector $ t\in R^{d}$ denote by $t^{m}$ the product $t_{1}^{m_{1}}...t_{d}^{m_{d}}$ and by $\partial ^{m}$ the differentiation operator $\frac{\partial ^{m_{1}} }{\partial x_{1}^{m_{1}}}...\frac{\partial ^{m_{d}}}{\partial x_{d}^{m_{d}}} ; $ $C_{\infty }$ is the space of infinitely differentiable (real or complex-valued) functions on $R^{d}.$ The space $S\subset C_{\infty }$ of test functions is defined as: \begin{equation*} S=\left\{ \psi \in C_{\infty }(R^{d}):|t^{l}\partial ^{k}\psi (t)|=o(1)\text{ as }t\rightarrow \infty \right\} , \end{equation*} for any $k=(k_{1},...k_{d}),l=(l_{1},...l_{d}),$ where $k=(0,...0)$ corresponds to the function itself, $t\rightarrow \infty $ coordinate-wise; thus \ the functions in $S$ go to zero at infinity faster than any power as do their derivatives; they are rapidly decreasing functions. A sequence in $ S $ converges if in every bounded region each $\left\vert t^{l}\partial ^{k}\psi (t)\right\vert $ converges uniformly. Then in the dual space $S^{\ast }$ any $b\in S^{\ast }$ represents a linear functional on $S;$ the value of this functional for $\psi \in S$ is denoted by $\left( b,\psi \right) .$ When $b$ is an ordinary (point-wise defined) real-valued function, such as a density of an absolutely continuous distribution or a regression function, the value of the functional on real-valued $\psi $ defines it and is given by \begin{equation*} \left( b,\psi \right) =\int b(x)\psi (x)dx. \end{equation*} If $b$ is a characteristic function it may be complex-valued, then the value of the functional $b$ applied to $\psi \in S$ where $S$ is the space of complex-valued functions, is \begin{equation*} \left( b,\psi \right) =\int b(x)\overline{\psi (x)}dx, \end{equation*} where overbar denotes complex conjugate. The integrals are taken over the whole space $R^{d}.$ The generalized functions in the space $S^{\ast }$ are continuously differentiable and the differentiation operator is continuous; Fourier transforms and their inverses are defined for all $b\in S^{\ast },$ the operator is a (continuos) isomorphism of the space $S^{\ast }.$ However, convolutions and products are not defined for all pairs of elements of $ S^{\ast },$ unlike, say, the space $L_{1};$ on the other hand, in $L_{1}$ differentiation is not defined and not every distribution has a density that is an element of $L_{1}.$ Assumption 1 places no restrictions on the distributions, since in $S^{\ast } $ any distribution function is differentiable and the differentiation operator is continuous. The advantage of not restricting distributions to be absolutely continuous is that mass points need not be excluded; distributions representing fractal measures such as the Cantor distribution are also allowed. This means that mixtures of discrete and continuous distributions e.g. such as those examined by An and Hu (2012) for measurement error in survey responses, some of which may be error-contaminated, but some may be truthful leading to a mixture with a mass point distribution are included. Moreover, in $S^{\ast }$ the case of mass points in the distribution of the mismeasured variable is also easily handled; in the literature such mass points are documented for income or work hours distributions in the presence of rigidities such as unemployment compensation rules (e.g. Green and Riddell, 1997). Fractal distributions may arise in some situations, e.g. Karlin's (1958) example of the equilibrium price distribution in an oligopolistic game. For regression functions the assumption $g\in S^{\ast }$ implies that growth at infinity is allowed but is somewhat restricted. In particular for any ordinary point-wise defined function $b\in S^{\ast }$ the condition \begin{equation} \int ...\int \Pi _{i=1}^{d}\left( (1+t_{i}^{2}\right) ^{-1})^{m_{i}}\left\vert b(t)\right\vert dt_{1}...dt_{d}<\infty , \label{condition} \end{equation} needs to be satisfied for some non-negative valued $m_{1},...,m_{d}.$ If a locally integrable function $g$ is such that its growth at infinity is majorized by a polynomial, then $b\equiv g$ satisfies this condition. While restrictive this still widens the applicability of many currently available approaches. For example in Berkson regression the common assumption is that the regression function be absolutely integrable (Meister, 2009); this excludes binary choice, linear and polynomial regression functions that belong to $S^{\ast }$ and satisfy Assumption 1. Also, it is advantageous to allow for functions that may not belong to any ordinary function classes, such as sums of $\delta -$functions ("sum of peaks") or (mixture) cases with sparse parts of support, such as isolated points; such functions are in $ S^{\ast }.$ Distributions with mass points can arise when the response to a survey questions may be only partially contaminated; regression "sum of peaks" functions arise e.g. in spectroscopy and astrophysics where isolated point supports are common.
1,908
26,749
en
train
0.94.2
\subsection{Measurement error and related models} Current reviews for measurement error models are in Carrol et al, (2006), Chen et al (2011), Meister (2009). Here and everywhere below the variables $x,z,x^{\ast },u,u_{x}$ are assumed to be in $R^{d};y,v$ are in $R^{1};$ all the integrals are over the corresponding space; density of $\nu $ for any $\nu $ is denoted by $f_{v};$ independence is denoted by $\bot $; expectation of $x$ conditional on $z$ is denoted by $E(x|z).$ \subsubsection{List of models and corresponding equations} The table below lists various models and corresponding convolution equations. Many of the equations are derived from density weighted conditional expectations of the observables. Recall that for two functions, $f$ and $g$ convolution $f\ast g$ is defined by \begin{equation*} (f\ast g)\left( x\right) =\int f(w)g(x-w)dw; \end{equation*} this expression is not always defined. A similar expression (with some abuse of notation since generalized functions are not defined pointwise) may hold for generalized functions in $S^{\ast };$ similarly, it is not always defined. With Assumption 1 for the models considered here we show that convolution equations given in the Tables below hold in $S^{\ast }.$ \begin{center} \textbf{Table 1.} Measurement error models: 1. Classical measurement error; 2. Berkson measurement error; 3. Classical measurement error with additional observation (with zero conditional mean error); 4., 4a. Classical error with additional observation (full independence). \begin{tabular}{|c|c|c|c|c|} \hline Model & $ \begin{array}{c} \text{Distributional} \\ \text{assumptions} \end{array} $ & $ \begin{array}{c} \text{Convolution } \\ \text{equations} \end{array} $ & $ \begin{array}{c} \text{Known} \\ \text{ functions} \end{array} $ & $ \begin{array}{c} \text{Unknown} \\ \text{ functions} \end{array} $ \\ \hline \multicolumn{1}{|l|}{$\ \ \ $1.} & \multicolumn{1}{|l|}{$\ \ \ \ \ \ \ \begin{array}{c} z=x^{\ast }+u \\ x^{\ast }\bot u \end{array} $} & \multicolumn{1}{|l|}{$\ \ \ \ \ \ \ f_{x^{\ast }}\ast f_{u}=f_{z}$} & \multicolumn{1}{|l|}{$\ \ \ \ \ \ \ f_{z},f_{u}$} & \multicolumn{1}{|l|}{$\ \ \ \ \ \ \ f_{x^{\ast }}$} \\ \hline 2. & $\ \begin{array}{c} z=x^{\ast }+u \\ z\bot u \end{array} $ & $f_{z}\ast f_{-u}=f_{x^{\ast }}$ & $f_{z},f_{u}$ & $f_{x^{\ast }}$ \\ \hline \multicolumn{1}{|l|}{$\ $\ 3.} & \multicolumn{1}{|l|}{$\ \ \begin{array}{c} z=x^{\ast }+u; \\ x=x^{\ast }+u_{x} \\ x^{\ast }\bot u; \\ E(u_{x}|x^{\ast },u)=0; \\ E\left\Vert z\right\Vert <\infty ;E\left\Vert u\right\Vert <\infty . \end{array} $} & \multicolumn{1}{|l|}{$ \begin{array}{c} f_{x^{\ast }}\ast f_{u}=f_{z}; \\ h_{k}\ast f_{u}=w_{k}, \\ \text{with }h_{k}(x)\equiv x_{k}f_{x^{\ast }}(x); \\ k=1,2...d \end{array} $} & \multicolumn{1}{|l|}{$ \begin{array}{c} f_{z},w_{k}, \\ k=1,2...d \end{array} $} & \multicolumn{1}{|l|}{$f_{x^{\ast }}$; $f_{u}$} \\ \hline 4. & $ \begin{array}{c} z=x^{\ast }+u; \\ x=x^{\ast }+u_{x};x^{\ast }\bot u; \\ x^{\ast }\bot u_{x};E(u_{x})=0; \\ u\bot u_{x}; \\ E\left\Vert z\right\Vert <\infty ;E\left\Vert u\right\Vert <\infty . \end{array} $ & $ \begin{array}{c} f_{x^{\ast }}\ast f_{u}=f_{z}; \\ h_{k}\ast f_{u}=w_{k}; \\ f_{x^{\ast }}\ast f_{u_{x}}=f_{x}; \\ \text{with }h_{k}(x)\equiv x_{k}f_{x^{\ast }}(x); \\ k=1,2...d \end{array} $ & $ \begin{array}{c} f_{z}\text{, }f_{x};w;w_{k} \\ k=1,2...d \end{array} $ & $f_{x^{\ast }};f_{u},$ $f_{u_{x}}$ \\ \hline 4a. & $ \begin{array}{c} \text{Same model as 4.,} \\ \text{alternative} \\ \text{equations:} \end{array} $ & $ \begin{array}{c} f_{x^{\ast }}\ast f_{u}=f_{z}; \\ f_{u_{x}}\ast f_{-u}=w; \\ h_{k}\ast f_{-u}=w_{k}, \\ \text{with }h_{k}(x)\equiv x_{k}f_{u_{x}}(x); \\ k=1,2...d \end{array} $ & --"-- & --"-- \\ \hline \end{tabular} \end{center} Notation: $k=1,2,...,d;$ in 3. and 4, $w_{k}=E(x_{k}f_{z}(z)|z);$ in 4a $ w=f_{z-x};w_{k}=E(x_{k}w(z-x)|\left( z-x\right) ).$ \textbf{Theorem 1.} \textit{Under Assumption 1 for each of the models 1-4 the corresponding convolution equations of Table 1 hold in the generalized functions space }$S^{\ast }$\textit{.} The proof is in the derivations of the following subsection. Assumption 1 requires considering all the functions defined by the model as elements of the space $S^{\ast },$ but if the functions (e.g. densities, the conditional moments) exist as regular functions, the convolutions are just the usual convolutions of functions, on the other hand, the assumption allows to consider convolutions for cases where distributions are not absolutely continuous. \subsubsection{\protect Measurement error models and derivation of the corresponding equations.} 1. The classical measurement error model. The case of the classical measurement error is well known in the literature. The concept of error independent of the variable of interest is applicable to many problems in seismology, image processing, where it may be assumed that the source of the error is unrelated to the signal. In e.g. Cunha et al. (2010) it is assumed that some constructed measurement of ability of a child derived from test scores fits into this framework. As is well-known in regression a measurement error in the regressor can result in a biased estimator (attenuation bias). Typically the convolution equation \begin{equation*} f_{x^{\ast }}\ast f_{u}=f_{z} \end{equation*} is written for density functions when the distribution function is absolutely continuous. The usual approach to possible non-existence of density avoids considering the convolution and focuses on the characteristic functions. Since density always exists as a generalized function and convolution for such generalized functions is always defined it is possible to write convolution equations in $S^{\ast }$ for any distributions in model 1. The error distribution (and thus generalized density $f_{u})$ is assumed known thus the solution can be obtained by "deconvolution" (Carrol et al (2006), Meister (2009), the review of Chen et al (2011) and papers by Fan (1991), Carrasco and Florens(2010) among others). 2. The Berkson error model.{} For Berkson error the convolution equation is also well-known. Berkson error of measurement arises when the measurement is somehow controlled and the error is caused by independent factors, e.g. amount of fertilizer applied is given but the absorption into soil is partially determined by factors independent of that, or students' grade distribution in a course is given in advance, or distribution of categories for evaluation of grant proposals is determined by the granting agency. The properties of Berkson error are very different from that of classical error of measurement, e.g. it does not lead to attenuation bias in regression; also in the convolution equation the unknown function is directly expressed via the known ones when the distribution of Berkson error is known. For discussion see Carrol et al (2006), Meister (2009), and Wang (2004). Models 3. and 4. The classical measurement error with another observation. In 3., 4. in the classical measurement error model the error distribution is not known but another observation for the mis-measured variable is available; this case has been treated in the literature and is reviewed in Carrol et al (2006), Chen et al \ (2011). In econometrics such models were examined by Li and Vuong (1998), Li (2002), Schennach (2004) and subsequently others (see e.g. the review by Chen et al, 2011). In case 3 the additional observation contains an error that is not necessarily independent, just has conditional mean zero. Note that here the multivariate case is treated where arbitrary dependence for the components of vectors is allowed. For example, it may be of interest to consider the vector of not necessarily independent latent abilities or skills as measured by different sections of an IQ test, or the GRE scores. Extra measurements provide additional equations. Consider for any $k=1,...d$ the function of observables $w_{k}$ defined by density weighted expectation $ E(x_{k}f_{z}(z)|z)$ as a generalized function; it is then determined by the values of the functional $\left( w_{k},\psi \right) $ for every $\psi \in S.$ Note that by assumption $E(x_{k}f_{z}(z)|z)=E(x_{k}^{\ast }f_{z}(z)|z);$ then for any $\psi \in S$ the value of the functional: \begin{eqnarray*} (E(x_{k}^{\ast }f_{z}(z)|z),\psi ) &=&\int [\int x_{k}^{\ast }f_{x^{\ast },z}(x^{\ast },z)dx^{\ast }]\psi (z)dz= \\ \int \int x_{k}^{\ast }f_{x^{\ast },z}(x^{\ast },z)\psi (z)dx^{\ast }dz &=&\int \int x_{k}^{\ast }\psi (x^{\ast }+u)f_{x^{\ast },u}(x^{\ast },u)dx^{\ast }du= \\ \int \int x_{k}^{\ast }f_{x^{\ast }}(x^{\ast })f_{u}(u)\psi (x^{\ast }+u)dx^{\ast }du &=&(h_{k}\ast f_{u},\psi ). \end{eqnarray*} The third expression is a double integral which always exists if $ E\left\Vert x^{\ast }\right\Vert <\infty $; this is a consequence of boundedness of the expectations of $z$ and $u.$ The fourth is a result of change of variables $\left( x^{\ast },z\right) $ into $\left( x^{\ast },u\right) ,$ the fifth uses independence of $x^{\ast }$and $u,$ and the sixth expression follows from the corresponding expression for the convolution of generalized functions (Schwartz, 1967, p.246). The conditions of model 3 are not sufficient to identify the distribution of $u_{x};$ this is treated as a nuisance part in model 3. The model in 4 with all the errors and mis-measured variable independent of each other was investigated by Kotlyarski (1967) who worked with the joint characteristic function. In 4 consider in addition to the equations written for model 3 another that uses the independence between $x^{\ast }$ and $ u_{x} $ and involves $f_{u_{x}}.$ In representation 4a the convolution equations involving the density $ f_{u_{x}}$ are obtained by applying the derivations that were used here for the model in 3.: \begin{equation*} \begin{array}{c} z=x^{\ast }+u; \\ x=x^{\ast }+u_{x}, \end{array} \end{equation*} to the model in 4 with $x-z$ playing the role of $z,$ $u_{x}$ playing the role of $x^{\ast },$ $-u$ playing the role of $u,$ and $x^{\ast }$ playing the role of $u_{x}.$ The additional convolution equations arising from the extra independence conditions provide extra equations and involve the unknown density $f_{u_{x}}.$ This representation leads to a generalization of Kotlyarski's identification result similar to that obtained by Evdokimov (2011) who used the joint characteristic function. The equations in 4a make it possible to identify $f_{u},f_{u_{x}}$ ahead of $f_{x^{\ast }};$ for identification this will require less restrictive conditions on the support of the characteristic function for $x^{\ast }.$
3,595
26,749
en
train
0.94.3
\subsubsection{Some extensions} \textbf{A. Common factor models.} Consider a model $\tilde{z}=AU,$ with $A$ a matrix of known constants and $ \tilde{z}$ a $m\times 1$ vector of observables, $\ U$ a vector of unobservable variables. Usually, $A$ is a block matrix and $AU$ can be represented via a combination of mutually independent vectors. Then without loss of generality consider the model \begin{equation} \tilde{z}=\tilde{A}x^{\ast }+\tilde{u}, \label{factormod} \end{equation} where $\tilde{A}$ is a $m\times d$ known matrix of constants, $\tilde{z}$ is a $m\times 1$ vector of observables, unobserved $x^{\ast }$ is $d\times 1$ and unobserved $\tilde{u}$ is $m\times 1.$ If the model $\left( \ref {factormod}\right) $ can be transformed to model 3 considered above, then $ x^{\ast }$ will be identified whenever identification holds for model 3. Once some components are identified identification of other factors could be considered sequentially. \textbf{Lemma 1. }\textit{If in }$\left( \ref{factormod}\right) $ \textit{ the vectors }$x^{\ast }$\textit{\ and }$\tilde{u}$\textit{\ are independent and all the components of the vector }$\tilde{u}$\textit{\ are mean independent of each other and are mean zero and the matrix }$A$ \textit{can be partitioned after possibly some permutation of rows as }$\left( \begin{array}{c} A_{1} \\ A_{2} \end{array} \right) $\textit{\ with }$rankA_{1}=rankA_{2}=d,$\textit{\ then the model }$ \left( \ref{factormod}\right) $\textit{\ implies model 3.} Proof. Define $z=T_{1}\tilde{z},$ where conformably to the partition of $A$ the partitioned $T_{1}=\left( \begin{array}{c} \tilde{T}_{1} \\ 0 \end{array} \right) ,$ with $\tilde{T}_{1}A_{1}x^{\ast }=x^{\ast }$ (such a $\tilde{T} _{1}$ always exists by the rank condition); then $z=x^{\ast }+u,$ where $ u=T_{1}\tilde{u}$ is independent of $x^{\ast }.$ Next define $T_{2}=\left( \begin{array}{c} 0 \\ \tilde{T}_{2} \end{array} \right) $ similarly with $\tilde{T}_{2}A_{2}x^{\ast }=x^{\ast }$. Then $x=T_{2}\tilde{z}$ is such that $x=x^{\ast }+u_{x},$ where $u_{x}=T_{2} \tilde{u}$ and does not include any components from $u.$ This implies $ Eu_{x}|(x^{\ast },u)=0.$ Model 3 holds. $\blacksquare $ Here dependence in components of $x^{\ast }$ is arbitrary. A general structure with subvectors of $U$ independent of each other but with components which may be only mean independent (as $\tilde{u}$ here) or arbitrarily dependent (as in $x^{\ast })$ is examined by Ben-Moshe (2012). Models of linear systems with full independence were examined by e.g. Li and Vuong (1998). These models lead to systems of first-order differential equations for the characteristic functions. It may be that there are no independent components $x^{\ast }$ and $\tilde{u} $ for which the conditions of Lemma 1 are satisfied. Bonhomme and Robin (2010) proposed to consider products of the observables to increase the number of equations in the system and analyzed conditions for identification; Ben-Moshe (2012) provided necessary and sufficient conditions under which this strategy leads to identification when there may be some dependence. \textbf{B. Error correlations with more observables.} The extension to non-zero $E(u_{x}|z)$ in model 3 is trivial if this expectation is a known function. A more interesting case results if the errors $u_{x}$ and $u$ are related, e.g. \begin{equation*} u_{x}=\rho u+\eta ;\eta \bot z. \end{equation*} With an unknown parameter (or function of observables) $\rho $ if more observations are available more convolution equations can be written to identify all the unknown functions. Suppose that additionally a observation $ y$ is available with \begin{eqnarray*} y &=&x^{\ast }+u_{y}; \\ u_{y} &=&\rho u_{x}+\eta _{1};\eta _{1}\bot ,\eta ,z. \end{eqnarray*} Without loss of generality consider the univariate case and define $ w_{x}=E(xf(z)|z);w_{y}=E(yf(z)|z).\,\ $Then the system of convolution equations expands to \begin{equation} \left\{ \begin{array}{ccc} f_{x^{\ast }}\ast f_{u} & & =w; \\ (1-\rho )h_{x^{\ast }}\ast f_{u} & +\rho zf(z) & =w_{x}; \\ (1-\rho ^{2})h_{x^{\ast }}\ast f_{u} & +\rho ^{2}zf(z) & =w_{y}. \end{array} \right. \label{ar(1)} \end{equation} The three equations have three unknown functions, $f_{x^{\ast }},f_{u}$ and $ \rho .$ Assuming that support of $\rho $ does not include the point 1, $\rho $ can be expressed as a solution to a linear algebraic equation derived from the two equations in $\left( \ref{ar(1)}\right) $ that include $\rho :$ \begin{equation*} \rho =(w_{x}-zf(z))^{-1}\left( w_{y}-w_{x}\right) . \end{equation*}
1,536
26,749
en
train
0.94.4
\subsection{Regression models with classical and Berkson errors and the convolution equations} \subsubsection{The list of models} The table below provides several regression models and the corresponding convolution equations involving density weighted conditional expectations. \begin{center} Table 2. Regression models: 5. Regression with classical measurement error and an additional observation; 6. Regression with Berkson error ($x,y,z$ are observable); 7. Regression with zero mean measurement error and Berkson instruments. \end{center} \begin{tabular}{|c|c|c|c|c|} \hline Model & $ \begin{array}{c} \text{Distributional} \\ \text{assumptions} \end{array} $ & $ \begin{array}{c} \text{Convolution } \\ \text{equations} \end{array} $ & $ \begin{array}{c} \text{Known} \\ \text{ functions} \end{array} $ & $ \begin{array}{c} \text{Unknown} \\ \text{ functions} \end{array} $ \\ \hline \multicolumn{1}{|l|}{$\ \ $5.} & \multicolumn{1}{|l|}{$\ \ \begin{array}{c} y=g(x^{\ast })+v \\ z=x^{\ast }+u; \\ x=x^{\ast }+u_{x} \\ x^{\ast }\bot u;E(u)=0; \\ E(u_{x}|x^{\ast },u)=0; \\ E(v|x^{\ast },u,u_{x})=0. \end{array} $} & \multicolumn{1}{|l|}{$ \begin{array}{c} f_{x^{\ast }}\ast f_{u}=f_{z}; \\ \left( gf_{x^{\ast }}\right) \ast f_{u}=w, \\ h_{k}\ast f_{u}=w_{k}; \\ \text{with }h_{k}(x)\equiv x_{k}g(x)f_{x^{\ast }}(x); \\ k=1,2...d \end{array} $} & \multicolumn{1}{|l|}{$f_{z};$ $w;w_{k}$} & \multicolumn{1}{|l|}{$ f_{x^{\ast }}$; $f_{u}$; $g.$} \\ \hline \multicolumn{1}{|l|}{$\ \ $6.} & \multicolumn{1}{|l|}{$ \begin{array}{c} y=g(x)+v \\ z=x+u;E(v|z)=0; \\ z\bot u;E(u)=0. \end{array} $} & \multicolumn{1}{|l|}{$\ \ \ \ \ \begin{array}{c} f_{x}=f_{-u}\ast f_{z}; \\ g\ast f_{-u}=w \end{array} $} & \multicolumn{1}{|l|}{$\ f_{z};f_{x},w$} & \multicolumn{1}{|l|}{$f_{u}$; $g.$} \\ \hline \multicolumn{1}{|l|}{$\ \ $7.} & \multicolumn{1}{|l|}{$\ \ \begin{array}{c} y=g(x^{\ast })+v; \\ x=x^{\ast }+u_{x}; \\ z=x^{\ast }+u;z\bot u; \\ E(v|z,u,u_{x})=0; \\ E(u_{x}|z,v)=0. \end{array} $} & \multicolumn{1}{|l|}{$ \begin{array}{c} g\ast f_{u}=w; \\ h_{k}\ast f_{u}=w_{k}, \\ \text{with }h_{k}(x)\equiv x_{k}g(x); \\ k=1,2...d \end{array} $} & \multicolumn{1}{|l|}{$w,w_{k}$} & \multicolumn{1}{|l|}{$f_{u}$; $g.$} \\ \hline \end{tabular} Notes. Notation: $k=1,2...d;$ in model 5.$ w=E(yf_{z}(z)|z);w_{k}=E(x_{k}f_{z}(z)|z);$ in model 6. $w=E(y|z);$ in model 7. $w=E(y|z);w_{k}=E(x_{k}y|z).$ \textbf{Theorem 2.} \textit{Under Assumption 1 for each of the models 5-7 the corresponding convolution equations hold.} The proof is in the derivations of the next subsection. \subsubsection{\protect Discussion of the regression models and derivation of the convolution equations.} 5. The nonparametric regression model with classical measurement error and an additional observation. This type of model was examined by Li (2002) and Li and Hsiao (2004); the convolution equations derived here provide a convenient representation. Often models of this type were considered in semiparametric settings. Butucea and Taupin (2008) (extending the earlier approach by Taupin, 2001) consider a regression function known up to a finite dimensional parameter with the mismeasured variable observed with independent error where the error distribution is known. Under the latter condition the model 5 here would reduce to the two first equations \begin{equation*} f_{x^{\ast }}\ast f_{u}=f_{z};\text{ }\left( gf_{x^{\ast }}\right) \ast f_{u}=w, \end{equation*} where $f_{u}$ is known and two unknown functions are $g$ (here nonparametric) and $f_{x^{\ast }}.$ The model 5 incorporates model 3 for the regressor and thus the convolution equations from that model apply. An additional convolution equation is derived here; it is obtained from considering the value of the density weighted conditional expectation in the dual space of generalized functions, $S^{\ast },$ applied to arbitrary $\psi \in S,$ \begin{equation*} (w,\psi )=(E(f(z)y|z),\psi )=(E(f(z)g(x^{\ast })|z),\psi ); \end{equation*} this equals \begin{eqnarray*} &&\int \int g(x^{\ast })f_{x^{\ast },z}(x^{\ast },z)\psi (z)dx^{\ast }dz \\ &=&\int \int g(x^{\ast })f_{x^{\ast },u}(x^{\ast },u)\psi (x^{\ast }+u)dx^{\ast }du \\ &=&\int g(x^{\ast })f_{x^{\ast }}(x^{\ast })f_{u}(u)dx^{\ast }\psi (x^{\ast }+u)dx^{\ast }du=((gf_{x^{\ast }})\ast f_{u},\psi ). \end{eqnarray*} Conditional moments for the regression function need not be integrable or bounded functions of $z$; we require them to be in the space of generalized functions $S^{\ast }.$ 6. Regression with Berkson error. This model may represent the situation when the regressor (observed) $x$ is correlated with the error $v,$ but $z$ is a (vector) possibly representing an instrument uncorrelated with the regression error. Then as is known in addition to the Berkson error convolution equation the equation \begin{equation*} w=E(y|z)=E(g(x)|z)=\int g(x)\frac{f_{x,z}(x,z)}{f_{z}(z)}dx=\int g(z-u)f_{u}(u)dx=g\ast f_{u} \end{equation*} holds. This is stated in Meister (2008); however, the approach there is to consider $g$ to be absolutely integrable so that convolution can be defined in the $L_{1}$ space. Here by working in the space of generalized functions $ S^{\ast }$ a much wider nonparametric class of functions that includes regression functions with polynomial growth is allowed. 7. Nonparametric regression with error in the regressor, where Berkson type instruments are assumed available. This model was proposed by Newey (2001), examined in the univarite case by Schennach (2007) and Zinde-Walsh (2009), in the multivariate case in Zinde-Walsh (2012), where the convolution equations given here in Table 2 were derived. \subsection{\textbf{Convolution equations in models with conditional independence conditions.}} All the models 1-7 can be extended to include some additional variables where conditionally on those variables, the functions in the model (e.g. conditional distributions) are defined and all the model assumptions hold conditionally. Evdokimov (2011) derived the conditional version of the model 4 from a very general nonparametric panel data model. Model 8 below describes the panel data set-up and how it transforms to conditional model 4 and 4a and possibly model 3 with relaxed independence condition (if the focus is on identifying the regression function). Model 8. Panel data model with conditional independence. Consider a two-period panel data model with an unknown regression function $ m $ and an idiosyncratic (unobserved) $\alpha :$ \begin{eqnarray*} Y_{i1} &=&m(X_{i1},\alpha _{i})+U_{i1}; \\ Y_{i2} &=&m(X_{i2},\alpha _{i})+U_{i2}. \end{eqnarray*} To be able to work with various conditional characteristic functions corresponding assumptions ensuring existence of the conditional distributions need to be made and in what follows we assume that all the conditional density functions and moments exist as generalized functions in $ S^{\ast }$. In Evdokimov (2011) independence (conditional on the corresponding period $ X^{\prime }s)$ of the regression error from $\alpha ,$ and from the $ X^{\prime }s$ and error of the other period is assumed: \begin{equation*} f_{t}=f_{Uit}|_{X_{it},\alpha _{i},X_{i(-t)},U_{i(-t)}}(u_{t}|x,...)=f_{Uit}|_{X_{it}}(u_{t}|x),t=1,2 \end{equation*} with $f_{\cdot |\cdot }$ denoting corresponding conditional densities. Conditionally on $X_{i2}=X_{i1}=x$ the model takes the form 4 \begin{equation*} \begin{array}{c} z=x^{\ast }+u; \\ x=x^{\ast }+u_{x} \end{array} \end{equation*} with $z$ representing $Y_{1},x$ representing $Y_{2},$ $x^{\ast }$ standing in for $m(x,\alpha ),$ $u$ for $U_{1}$ and $u_{x}$ for $U_{2}.$ The convolution equations derived here for 4 or 4a now apply to conditional densities. The convolution equations in 4a are similar to Evdokimov; they allow for equations for $f_{u},$ $f_{u_{x}}$ that do not rely on $f_{x^{\ast }}.$ The advantage of those lies in the possibility of identifying the conditional error distributions without placing the usual non-zero restrictions on the characteristic function of $x^{\ast }$ (that represents the function $m$ for the panel model). The panel model can be considered with relaxed independence assumptions. Here in the two-period model we look at forms of dependence that assume zero conditional mean of the second period error, rather than full independence of the first period error: \begin{eqnarray*} f_{Ui1}|_{X_{i1},\alpha _{i},X_{i2},Ui2}(u_{t}|x,...) &=&f_{Ui1}|_{Xi1}(u_{t}|x); \\ E(U_{i2}|X_{i1},\alpha _{i},X_{i2},U_{i1}) &=&0; \\ f_{Ui2}|_{\alpha _{i},X_{i2}=X_{i1}=x}(u_{t}|x,...) &=&f_{Ui2}|_{Xi2}(u_{t}|x). \end{eqnarray*} Then the model maps into the model 3 with the functions in the convolution equations representing conditional densities and allows to identify distribution of $x^{\ast }$ (function $\ m$ in the model). But the conditional distribution of the second-period error in this set-up is not identified. Evdokimov introduced parametric AR(1) or MA(1) dependence in the errors $U$ and to accommodate that extended the model to three periods. Here this would lead in the AR case to the equations in $\left( \ref{ar(1)}\right) .$ Model 9. Errors in variables regression with classical measurement error conditionally on covariates. Consider the regression model \begin{equation*} y=g(x^{\ast },t)+v, \end{equation*} with a measurement of unobserved $x^{\ast }$ given by $\ \tilde{z}=x^{\ast }+ \tilde{u},$ with $x^{\ast }\bot \tilde{u}$ conditionally on $t$. Assume that $E(\tilde{u}|t)=0$ and that $E(v|x^{\ast },t)=0.$ Then redefining all the densities and conditional expectations to be conditional on $t$ we get the same system of convolution equations as in Table 2 for model 5 with the unknown functions now being conditional densities and the regression function, $g.$ Conditioning requires assumptions that provide for existence of conditional distribution functions in $S^{\ast }$.
3,474
26,749
en
train
0.94.5
\section{\textbf{Solutions for the models.}}
12
26,749
en
train
0.94.6
\subsection{Existence of solutions} To state results for nonparametric models it is important first to clearly indicate the classes of functions where the solution is sought. Assumption 1 requires that all the (generalized) functions considered are elements in the space of generalized functions $S^{\ast }.$ This implies that in the equations the operation of convolution applied to the two functions from $ S^{\ast }$ provides an element in the space $S^{\ast }.$ This subsection gives high level assumptions on the nonparametric classes of the unknown functions where the solutions can be sought: any functions from these classes that enter into the convolution provide a result in $S^{\ast }.$ No assumptions are needed for existence of convolution and full generality of identification conditions in models 1,2 where the model assumptions imply that the functions represent generalized densities. For the other models including regression models convolution is not always defined in $S^{\ast }.$ Zinde-Walsh (2012) defines the concept of convolution pairs of classes of functions in $S^{\ast }$ where convolution can be applied. To solve the convolution equations a Fourier transform is usually employed, so that e.g. one transforms generalized density functions into characteristic functions. Fourier transform is an isomorphism of the space $ S^{\ast }.$ The Fourier transform of a generalized function $a\in S^{\ast }$ , $Ft(a),$ is defined as follows. For any $\psi \in S,$ as usual $Ft(\psi )(s)=\int \psi (x)e^{isx}dx;$ then the functional $Ft(a)$ is defined by \begin{equation*} (Ft(a),\psi )\equiv (a,Ft(\psi )). \end{equation*} The advantage of applying Fourier transform is that integral convolution equations transform into algebraic equations when the "exchange formula" applies: \begin{equation} a\ast b=c\Longleftrightarrow Ft(a)\cdot Ft(b)=Ft(c). \label{exchange} \end{equation} In the space of generalized functions $S^{\ast },$ the Fourier transform and inverse Fourier transform always exist. As shown in Zinde-Walsh (2012) there is a dichotomy between convolution pairs of subspaces in $S^{\ast }$ and the corresponding product pairs of subspaces of their Fourier transforms. The classical pairs of spaces (Schwartz, 1966) are the convolution pair $ \left( S^{\ast },O_{C}^{\ast }\right) $ and the corresponding product pair $ \left( S^{\ast },O_{M}\right) ,$ where $O_{C}^{\ast }$ is the subspace of $ S^{\ast }$ that contains rapidly decreasing (faster than any polynomial) generalized functions and $\mathit{O}_{M}$ is the space of infinitely differentiable functions with every derivative growing no faster than a polynomial at infinity. These pairs are important in that no restriction is placed on one of the generalized functions that could be any element of space $S^{\ast }$; the other belongs to a space that needs to be correspondingly restricted. A disadvantage of the classical pairs is that the restriction is fairly severe, for example, the requirement that a characteristic function be in $O_{M}\,\ $implies existence of all moments for the random variable. Relaxing this restriction would require placing constraints on the other space in the pair; Zinde-Walsh (2012) introduces some pairs that incorporate such trade-offs. In some models the product of a function with a component of the vector of arguments is involved,such as $d(x)=x_{k}a(x),$ then for Fourier transforms $ Ft(d)\left( s\right) =-i\frac{\partial }{\partial s_{k}}Ft(a)(s);$ the multiplication by a variable is transformed into ($-i)$ times the corresponding partial derivative. Since the differentiation operators are continuous in $S^{\ast }$ this transformation does not present a problem. \textbf{Assumption 2.} \textit{The functions }$a\in A,b\in B,$\textit{\ are such that }$\left( A,B\right) $\textit{\ form a convolution pair in }$ S^{\ast }$\textit{.} Equivalently, $Ft(a),$ $Ft(b)$ are in the corresponding product pair of spaces. Assumption 2 is applied to model 1 for $a=f_{x^{\ast }},b=f_{u};$ to model 2 with $a=f_{z},b=f_{u};$ to model 3 with $a=f_{x^{\ast }},b=f_{u}$ and with $ a=h_{k},b=f_{u},$ for all $k=1,...,d;$ to model 4a for $a=f_{x^{\ast }},$ or $f_{u_{x}},$ or $h_{k}$ for all $k$ and $b=f_{u};$ to model 5 with $ a=f_{x^{\ast }},$ or $gf_{x^{\ast }},$ or $h_{k}f_{x^{\ast }}$ and $b=f_{u};$ to model 6 with $a=f_{z},$ or $g$ and $b=f_{u};$ to model 7 with $a=g$ or $ h_{k}$ and $b=f_{u}.$ Assumption 2 is a high-level assumption that is a sufficient condition for a solution to the models 1-4 and 6-7 to exist. Some additional conditions are needed for model 5 and are provided below. Assumption 2 is automatically satisfied for generalized density functions, so is not needed for models 1 and 2. Denote by $\bar{D}\subset S^{\ast }$ the subset of generalized derivatives of distribution functions (corresponding to Borel probability measures in $R^{d}$) then in models 1 and 2 $A=B=\bar{D};$ and for the characteristic functions there are correspondingly no restrictions; denote the set of all characteristic functions, $Ft\left( \bar{D}\right) \subset S^{\ast },$ by $\bar{C}.$ Below a (non-exhaustive) list of nonparametric classes of generalized functions that provide sufficient conditions for existence of solutions to the models here is given. The classes are such that they provide minimal or often no restrictions on one of the functions and restrict the class of the other in order that the assumptions be satisfied. In models 3 and 4 the functions $h_{k}$ are transformed into derivatives of continuous characteristic functions. An assumption that either the characteristic function of $x^{\ast }$ or the characteristic function of $u$ be continuously differentiable is sufficient, without any restrictions on the other to ensure that Assumption 2 holds. Define the subset of all continuously differentiable characteristic functions by $\bar{C}^{(1)}.$ In model 5 equations involve a product of the regression function $g$ with $ f_{x^{\ast }}.$ Products of generalized functions in $S^{\ast }$ do not always exist and so additional restrictions are needed in that model. If $g$ is an arbitrary element of $S^{\ast },$ then for the product to exist, $ f_{x^{\ast }}$ should be in $\mathit{O}_{M}$. On the other hand, if $ f_{x^{\ast }}$ is an arbitrary generalized density it is sufficient that $g$ and $h_{k}$ belong to the space of $d$ times continuously differentiable functions with derivatives that are majorized by polynomial functions for $ gf_{x^{\ast }},h_{k}f_{x^{\ast }}$ to be elements of $S^{\ast }.$ Indeed, the value of the functional $h_{k}f_{x^{\ast }}$ for an arbitrary $\psi \in S $ is defined by \begin{equation*} (h_{k}f_{x^{\ast }},\psi )=\left( -1\right) ^{d}\int F_{x^{\ast }}(x)\partial ^{(1,...,1)}(h_{k}(x)\psi (x))dx; \end{equation*} here $F$ is the distribution (ordinary bounded) function and this integral exists because $\psi $ and all its derivatives go to zero at infinity faster than any polynomial function. Denote by $\bar{S}^{B,1}$ the space of continuously differentiable functions $g\in S^{\ast }$ such that the functions $h_{k}(x)=x_{k}g(x)$ are also continuously differentiable with all derivatives majorized by polynomial functions$.$ Since the products are in $ S^{\ast }$ then the Fourier transforms of the products are defined in $ S^{\ast }.$ Further restrictions requiring the Fourier transforms of the products $gf_{x^{\ast }}$\ and $h_{k}f_{x^{\ast }}$ to be continuously differentiable functions in $S^{\ast }$ would remove any restrictions on $ f_{u}$ for the convolution to exist. Denote the space of all continuously differentiable functions in $S^{\ast }$ by $\bar{S}^{(1)}.$ If $g$ is an ordinary function that represents a regular element in $S^{\ast }$ the infinite differentiability condition on $f_{x^{\ast }}$ can be relaxed to simply requiring continuous first derivatives. In models 6 and 7 if the generalized density function for the error, $f_{u},$ decreases faster than any polynomial (all moments need to exist for that), so that $f_{u}\in \mathit{O}_{C}^{\ast },$ \ then $g$ could be any generalized function in $S^{\ast };$ this will of course hold if $f_{u}$ has bounded support. Generally, the more moments the error is assumed to have, the fewer restrictions on the regression function $g$ are needed to satisfy the convolution equations of the model and the exchange formula. The models 6, 7 satisfy the assumptions for any error $u$ when support of generalized function $g$ is compact (as for the "sum of peaks"), then $g\in E^{\ast }\subset S^{\ast },$ where $E^{\ast }$ is the space of generalized functions with compact support. More generally the functions $g$ and all the $h_{k}$ could belong to the space $\mathit{O}_{C}^{\ast }$ of generalized functions that decrease at infinity faster than any polynomial, and still no restrictions need to be placed on $u.$ Denote for any generalized density function $f_{\cdot }$ the corresponding characteristic function, $Ft(f_{\cdot }),$ by $\phi _{\cdot }.$ Denote Fourier transform of the (generalized) regression function $g,$ $Ft(g),$ by $ \gamma .$ The following table summarizes some fairly general sufficient conditions on the models that place restrictions on the functions themselves or on the characteristic functions of distributions in the models that will ensure that Assumption 2 is satisfied and a solution exists. The nature of these assumptions is to provide restrictions on some of the functions that allow the others to be completely unrestricted for the corresponding model. \textbf{Table 3.} Some nonparametric classes of generalized functions for which the convolution equations of the models are defined in $S^{\ast }$. \begin{tabular}{|c|c|c|} \hline Model & Sufficient & assumptions \\ \hline 1 & no restrictions: & $\phi _{x^{\ast }}\in \bar{C};\phi _{u}\in \bar{C}$ \\ \hline 2 & no restrictions: & $\phi _{x^{\ast }}\in \bar{C};\phi _{u}\in \bar{C}$ \\ \hline & Assumptions A & Assumptions B \\ \hline \multicolumn{1}{|l|}{$\ \ \ \ $3} & \multicolumn{1}{|l|}{any$\ \ \phi _{x^{\ast }}\in \bar{C};\phi _{u}\in \bar{C}^{(1)}$} & \multicolumn{1}{|l|}{ any $\phi _{u}\in \bar{C};\phi _{x^{\ast }}\in \bar{C}^{(1)}$} \\ \hline 4 & any $\phi _{u_{x}},\phi _{x^{\ast }}\in \bar{C};\phi _{u}\in \bar{C} ^{(1)}$ & any $\phi _{u},\phi _{x^{\ast }}\in \bar{C};\phi _{u_{x}}\in \bar{C }^{(1)}$ \\ \hline 4a & any $\phi _{u_{x}},\phi _{x^{\ast }}\in \bar{C};\phi _{u}\in \bar{C} ^{(1)}$ & any $\phi _{u},\phi _{u_{x}}\in \bar{C};\phi _{x^{\ast }}\in \bar{C }^{(1)}$ \\ \hline \multicolumn{1}{|l|}{$\ \ \ \ $5} & \multicolumn{1}{|l|}{any $g\in S^{\ast };f_{x^{\ast }}\in O_{M};f_{u}\in O_{C}^{\ast }$} & \multicolumn{1}{|l|}{$\ $ any $\ f_{x^{\ast }}\in \bar{D};\ g,h_{k}\in \bar{S}^{B,1};f_{u}\in O_{C}^{\ast }$} \\ \hline \multicolumn{1}{|l|}{$\ \ \ \ $6} & \multicolumn{1}{|l|}{any$\ g\in S^{\ast };f_{u}\in O_{C}^{\ast }$} & \multicolumn{1}{|l|}{$\ g\in O_{C}^{\ast };$ any $f_{u}:\phi _{u}\in \bar{C}$} \\ \hline 7 & any $g\in S^{\ast };f_{u}\in O_{C}^{\ast }$ & $g\in O_{C}^{\ast };$ any $ f_{u}:\phi _{u}\in \bar{C}$ \\ \hline \end{tabular} The next table states the equations and systems of equations for Fourier transforms that follow from the convolution equations. \textbf{Table 4.} The form of the equations for the Fourier transforms:
3,450
26,749
en
train
0.94.7
If $g$ is an ordinary function that represents a regular element in $S^{\ast }$ the infinite differentiability condition on $f_{x^{\ast }}$ can be relaxed to simply requiring continuous first derivatives. In models 6 and 7 if the generalized density function for the error, $f_{u},$ decreases faster than any polynomial (all moments need to exist for that), so that $f_{u}\in \mathit{O}_{C}^{\ast },$ \ then $g$ could be any generalized function in $S^{\ast };$ this will of course hold if $f_{u}$ has bounded support. Generally, the more moments the error is assumed to have, the fewer restrictions on the regression function $g$ are needed to satisfy the convolution equations of the model and the exchange formula. The models 6, 7 satisfy the assumptions for any error $u$ when support of generalized function $g$ is compact (as for the "sum of peaks"), then $g\in E^{\ast }\subset S^{\ast },$ where $E^{\ast }$ is the space of generalized functions with compact support. More generally the functions $g$ and all the $h_{k}$ could belong to the space $\mathit{O}_{C}^{\ast }$ of generalized functions that decrease at infinity faster than any polynomial, and still no restrictions need to be placed on $u.$ Denote for any generalized density function $f_{\cdot }$ the corresponding characteristic function, $Ft(f_{\cdot }),$ by $\phi _{\cdot }.$ Denote Fourier transform of the (generalized) regression function $g,$ $Ft(g),$ by $ \gamma .$ The following table summarizes some fairly general sufficient conditions on the models that place restrictions on the functions themselves or on the characteristic functions of distributions in the models that will ensure that Assumption 2 is satisfied and a solution exists. The nature of these assumptions is to provide restrictions on some of the functions that allow the others to be completely unrestricted for the corresponding model. \textbf{Table 3.} Some nonparametric classes of generalized functions for which the convolution equations of the models are defined in $S^{\ast }$. \begin{tabular}{|c|c|c|} \hline Model & Sufficient & assumptions \\ \hline 1 & no restrictions: & $\phi _{x^{\ast }}\in \bar{C};\phi _{u}\in \bar{C}$ \\ \hline 2 & no restrictions: & $\phi _{x^{\ast }}\in \bar{C};\phi _{u}\in \bar{C}$ \\ \hline & Assumptions A & Assumptions B \\ \hline \multicolumn{1}{|l|}{$\ \ \ \ $3} & \multicolumn{1}{|l|}{any$\ \ \phi _{x^{\ast }}\in \bar{C};\phi _{u}\in \bar{C}^{(1)}$} & \multicolumn{1}{|l|}{ any $\phi _{u}\in \bar{C};\phi _{x^{\ast }}\in \bar{C}^{(1)}$} \\ \hline 4 & any $\phi _{u_{x}},\phi _{x^{\ast }}\in \bar{C};\phi _{u}\in \bar{C} ^{(1)}$ & any $\phi _{u},\phi _{x^{\ast }}\in \bar{C};\phi _{u_{x}}\in \bar{C }^{(1)}$ \\ \hline 4a & any $\phi _{u_{x}},\phi _{x^{\ast }}\in \bar{C};\phi _{u}\in \bar{C} ^{(1)}$ & any $\phi _{u},\phi _{u_{x}}\in \bar{C};\phi _{x^{\ast }}\in \bar{C }^{(1)}$ \\ \hline \multicolumn{1}{|l|}{$\ \ \ \ $5} & \multicolumn{1}{|l|}{any $g\in S^{\ast };f_{x^{\ast }}\in O_{M};f_{u}\in O_{C}^{\ast }$} & \multicolumn{1}{|l|}{$\ $ any $\ f_{x^{\ast }}\in \bar{D};\ g,h_{k}\in \bar{S}^{B,1};f_{u}\in O_{C}^{\ast }$} \\ \hline \multicolumn{1}{|l|}{$\ \ \ \ $6} & \multicolumn{1}{|l|}{any$\ g\in S^{\ast };f_{u}\in O_{C}^{\ast }$} & \multicolumn{1}{|l|}{$\ g\in O_{C}^{\ast };$ any $f_{u}:\phi _{u}\in \bar{C}$} \\ \hline 7 & any $g\in S^{\ast };f_{u}\in O_{C}^{\ast }$ & $g\in O_{C}^{\ast };$ any $ f_{u}:\phi _{u}\in \bar{C}$ \\ \hline \end{tabular} The next table states the equations and systems of equations for Fourier transforms that follow from the convolution equations. \textbf{Table 4.} The form of the equations for the Fourier transforms: \begin{tabular}{|c|c|c|} \hline Model & Eq's for Fourier transforms & Unknown functions \\ \hline 1 & $\phi _{x^{\ast }}\phi _{u}=\phi _{z};$ & $\phi _{x^{\ast }}$ \\ \hline 2 & $\phi _{x^{\ast }}=\phi _{z}\phi _{-u};$ & $\phi _{x^{\ast }}$ \\ \hline 3 & $\left\{ \begin{array}{c} \phi _{x^{\ast }}\phi _{u}=\phi _{z}; \\ \left( \phi _{x^{\ast }}\right) _{k}^{\prime }\phi _{u}=\varepsilon _{k},k=1,...,d. \end{array} \right. $ & $\phi _{x^{\ast }},\phi _{u}$ \\ \hline 4 & $\left\{ \begin{array}{c} \phi _{x^{\ast }}\phi _{u}=\phi _{z}; \\ \left( \phi _{x^{\ast }}\right) _{k}^{\prime }\phi _{u}=\varepsilon _{k},k=1,...,d; \\ \phi _{x^{\ast }}\phi _{u_{x}}=\phi _{x}. \end{array} \right. $ & $\phi _{x^{\ast }},\phi _{u},\phi _{u_{x}}$ \\ \hline 4a & $\left\{ \begin{array}{c} \phi _{u_{x}}\phi _{u}=\phi _{z-x}; \\ \left( \phi _{u_{x}}\right) _{k}^{\prime }\phi _{u}=\varepsilon _{k},k=1,...,d. \\ \phi _{x^{\ast }}\phi _{u_{x}}=\phi _{x}. \end{array} \right. $ & --"-- \\ \hline 5 & $\left\{ \begin{array}{c} \phi _{x^{\ast }}\phi _{u}=\phi _{z}; \\ Ft\left( gf_{x^{\ast }}\right) \phi _{u}=\varepsilon \\ \left( Ft\left( gf_{x^{\ast }}\right) \right) _{k}^{\prime }\phi _{u}=\varepsilon _{k},k=1,...,d. \end{array} \right. $ & $\phi _{x^{\ast }},\phi _{u},g$ \\ \hline 6 & $\left\{ \begin{array}{c} \phi _{x}=\phi _{-u}\phi _{z}; \\ Ft(g)\phi _{-u}=\varepsilon . \end{array} \right. $ & $\phi _{u},g$ \\ \hline 7 & $\left\{ \begin{array}{c} Ft(g)\phi _{u}=\varepsilon ; \\ \left( Ft\left( g\right) \right) _{k}^{\prime }\phi _{u}=\varepsilon _{k},k=1,...,d. \end{array} \right. $ & $\phi _{u},g$ \\ \hline \end{tabular} Notes. Notation $\left( \cdot \right) _{k}^{\prime }$ denotes the k-th partial derivative of the function. The functions $\varepsilon $ are Fourier transforms of the corresponding $w,$ and $\varepsilon _{k}=-iFt(w_{k})$ defined for the models in Tables 1 and 2. Assumption 2 (that is fulfilled e.g. by generalized functions classes of Table 3) ensures existence of solutions to the convolution equations for models 1-7; this does not exclude multiple solutions and the next section provides a discussion of solutions for equations in Table 4.
2,086
26,749
en
train
0.94.8
\subsection{Classes of solutions; support and multiplicity of solutions} Typically, support assumptions are required to restrict multiplicity of solutions; here we examine the dependence of solutions on the support of the functions. The results here also give conditions under which some zeros, e.g. in the characteristic functions, are allowed. Thus in common with e.g. Carrasco and Florens (2010), Evdokimov and White (2011), distributions such as the uniform or triangular for which the characteristic function has isolated zeros are not excluded. The difference here is the extension of the consideration of the solutions to $S^{\ast }$ and to models such as the regression model where this approach to relaxing support assumptions was not previously considered. \ Recall that for a continuous function $\psi (x)$ on $R^{d}$ support is defined as the set $W=$supp($\psi ),$ such that \begin{equation*} \psi (x)=\left\{ \begin{array}{cc} a\neq 0 & \text{for }x\in W \\ 0 & \text{for }x\in R^{d}\backslash W. \end{array} \right. \end{equation*} Support of a continuous function is an open set. Generalized functions are functionals on the space $S$ and support of a generalized function $b\in S^{\ast }$ is defined as follows (Schwartz, 1967, p. 28). Denote by $\left( b,\psi \right) $ the value of the functional $b$ for $\psi \in S.$ Define a null set for $b\in S^{\ast }$ as the union of supports of all functions in $S$ for which the value of the functional is zero$:$ $\Omega =\{\cup $supp$\left( \psi \right) ,$ $\psi \in S,$ such that $\left( b,\psi \right) =0\}.$ Then supp$\left( b\right) =R^{d}\backslash \Omega .$ Note that a generalized function has support in a closed set, for example, support of the $\delta -function$ is just one point 0. Note that for model 2 Table 4 gives the solution for $\phi _{x^{\ast }}$ directly and the inverse Fourier transform can provide the (generalized) density function, $f_{x^{\ast }}.$ In Zinde-Walsh (2012) identification conditions in $S^{\ast }$ were given for models 1 and 7 under assumptions that include the ones in Table 3 but could also be more flexible. The equations in Table 3 for models 1,3, 4, 4a, 5, 6 and 7 are of two types, similar to those solved in Zinde-Walsh (2012). One is a convolution with one unknown function; the other is a system of equations with two unknown functions, each leading to the corresponding equations for their Fourier transforms. \subsubsection{Solutions to the equation $\protect\alpha \protect\beta = \protect\gamma .$} Consider the equation \begin{equation} \alpha \beta =\gamma , \label{product} \end{equation} with one unknown function $\alpha ;$ $\beta $ is a given continuous function. By assumption 2 the non-parametric class for $\alpha $ is such that the equation holds in $S^{\ast }$ on $R^{d}$; it is also possible to consider a nonparametric class for $\alpha $ with restricted support, $\bar{W }.$ Of course without any restrictions $\bar{W}=R^{d}.$ Recall the differentiation operator, $\partial ^{m},$ for $m=(m_{1},...m_{d}\dot{)}$ and denote by $supp(\beta ,\partial )$ the set $\cup _{\Sigma m_{i}=0}^{\infty }supp(\partial ^{m}\beta );$ where $supp(\partial ^{m}\beta )$ is an open set where a continuous derivative $\partial ^{m}\beta $ exists. Any point where $\beta $ is zero belongs to this set if some finite-order partial continuous derivative of $\beta $ is not zero at that point (and in some open neighborhood); for $\beta $ itself $supp(\beta )\equiv supp(\beta ,0).$ Define the functions \begin{equation} \alpha _{1}=\beta ^{-1}\gamma I\left( supp(\beta ,\partial )\right) ;\alpha _{2}(x)=\left\{ \begin{array}{cc} 1 & \text{for }x\in supp(\beta ,\partial ); \\ \tilde{\alpha} & \text{for }x\in \bar{W}\backslash (supp(\beta ,\partial )) \end{array} \right. \label{division} \end{equation} with any $\tilde{\alpha}$ such that $\alpha _{1}\alpha _{2}\in Ft\left( A\right) .$ Consider the case when $\alpha ,\beta $ and thus $\gamma $ are continuous. For any point $x_{0}$ if $\beta (x_{0})\neq 0,$ there is a neighborhood $ N(x_{0})$ where $\beta \neq 0,$ and division by $\beta $ is possible. If $ \beta (x_{0})$ has a zero, it could only be of finite order and in some neighborhood, $N(x_{0})\in supp(\partial ^{m}\beta )$ a representation \begin{equation} \beta =\eta (x)\Pi _{i=1}^{d}\left( x_{i}-x_{0i}\right) ^{m_{i}} \label{finitezero} \end{equation} holds for some continuous function $\eta $ in $S^{\ast },$ such that $\eta >c_{\eta }>0$ on $supp(\eta ).$Then $\eta ^{-1}\gamma $ in $N(x_{0})$ is a non-zero continuous function; division of such a function by $\Pi _{i=1}^{d}\left( x_{i}-x_{0i}\right) ^{m_{i}}$ in $S^{\ast }$ is defined (Schwartz, 1967, pp. 125-126), thus division by $\beta $ is defined in this neighborhood $N(x_{0})$. For the set $supp(\beta ,\partial )$ consider a covering of every point by such neighborhoods, the possibility of division in each neighborhood leads to possibility of division globally on the whole $ supp(\beta ,\partial ).$ Then $a_{1}$ as defined in $\left( \ref{division} \right) $ exists in $S^{\ast }.$ In the case where $\gamma $ is an arbitrary generalized function, if $\beta $ is infinitely differentiable then then by (Schwartz, 1967, pp.126-127) division by $\beta $ is defined on $supp(\beta ,\partial )$ and the solution is given by $\left( \ref{division}\right) .$ For the cases where $\gamma $ is not continuous and $\beta $ is not infinitely differentiable the solution is provided by \begin{equation*} \alpha _{1}=\beta ^{-1}\gamma I\left( supp(\beta ,0)\right) ;\alpha _{2}(x)=\left\{ \begin{array}{cc} 1 & \text{for }x\in supp(\beta ,0); \\ \tilde{\alpha} & \text{for }x\in \bar{W}\backslash (supp(\beta ,0)) \end{array} \right. \end{equation*} with any $\tilde{\alpha}$ such that $\alpha _{1}\alpha _{2}\in Ft\left( A\right) .$ Theorem 2 in Zinde-Walsh (2012) implies that the solution to $\left( \ref {product}\right) $ is $a=Ft^{-1}(\alpha _{1}\alpha _{2});$ the sufficient condition for the solution to be unique is $supp(\beta ,0)\supset \bar{W};$ if additionally either $\gamma $ is a continuous function or $\beta $ is an infinitely continuously differentiable function it is sufficient for uniqueness that $supp(\beta ,\partial )\supset \bar{W}.$ This provides solutions for models 1 and 6 where only equations of this type appear. \subsubsection{Solutions to the system of equations} For models 3,4,5 and 7 a system of equations of the form \begin{eqnarray} && \begin{array}{cc} \alpha \beta & =\gamma ; \\ \alpha \beta _{k}^{\prime } & =\gamma _{k}, \end{array} \label{twoeq} \\ k &=&1,...,d. \notag \end{eqnarray} (with $\beta $ continuously differentiable) arises. Theorem 3 in Zinde-Walsh (2012) provides the solution and uniqueness conditions for this system of equations. It is first established that a set of continuous functions $ \varkappa _{k},k=1,...,d,$ that solves the equation \begin{equation} \varkappa _{k}\gamma -\gamma _{k}=0 \label{difeq} \end{equation} in the space $S^{\ast }$ exists and is unique on $W=supp(\gamma )$ as long as $supp(\beta )\supset W.$ Then $\beta _{k}^{\prime }\beta ^{-1}=\varkappa _{k}$ and substitution into $\left( \ref{difeq}\right) $ leads to a system of first-order differential equations in $\beta .$ Case 1. Continuous functions; $W$ is an open set. For the models 3 and 4 the system $\left( \ref{twoeq}\right) $ involves continuous characteristic functions thus there $W$ is an open set. In some cases $W$ can be an open set under conditions of models 5 and 7, e.g. if the regression function is integrable in model 7. For this case represent the open set $W$ as a union of (maximal) connected components $\cup _{v}W_{v}.$ Then by the same arguments as in the proof of Theorem 3 in Zinde-Walsh (2012)\ the solution can be given uniquely on $W$ as long as at some point $ \zeta _{0v}\in (W_{v}\cap W)$ the value $\beta \left( \zeta _{0\nu }\right) $ is known for each of the connected components . Consider then $\beta _{1}(\zeta )=\Sigma _{\nu }[\beta \left( \zeta _{0\nu }\right) \exp \int_{\zeta _{0}}^{\zeta }\tsum\limits_{k=1}^{d}\varkappa _{k}(\xi )d\xi ]I(W_{\nu }),$ where integration is along any arc within the component that connects $\zeta $ to $\zeta _{0\nu }.$ Then $\alpha _{1}=\beta _{1}^{-1}\gamma ,$ and $\alpha _{2},\beta _{2}$ are defined as above by being $1$ on $\cup _{v}W_{v}$ and arbitrary outside of this set. When $\beta (0)=1$ as is the case for the characteristic function, the function is uniquely determined on the connected component that includes 0. Evdokimov and White (2012) provide a construction that permits in the univariate case to extend the solution $\beta \left( \zeta _{0\nu }\right) [\exp \int_{\zeta _{0}}^{\zeta }\tsum\limits_{k=1}^{d}\varkappa _{k}(\xi )d\xi ]I(W_{\nu })$ from a connected component of support where $\beta \left( \zeta _{0\nu }\right) $ is known (e.g. at 0 for a characteristic function) to a contiguous connected component when on the border between the two where $\beta =0,$ at least some finite order derivative of $\beta $ is not zero. In the multivariate case this approach can be extended to the same construction along a one-dimensional arc from one connected component to the other. Thus identification is possible on a connected component of $ supp(\beta ,\partial ).$ Case 2. $W$ is a closed set. Generally for models 5 and 7, $W$ is the support of a generalized function and is a closed set. It may intersect with several connected components of support of $\beta .$ Denote by $W_{v\text{ }}$ here the intersection of a connected component of support of $\beta $ and $W.$ Then similarly $\beta _{1}(\zeta )=\tsum\limits_{\nu }[\beta \left( \zeta _{0\nu }\right) \exp \int_{\zeta _{0}}^{\zeta }\tsum\limits_{k=1}^{d}\varkappa _{k}(\xi )d\xi ]I(W_{\nu }),$ where integration is along any arc within the component that connects $\zeta $ to $\zeta _{0\nu }.$ Then $\alpha _{1}=\beta _{1}^{-1}\varepsilon ,$ and $\alpha _{2},\beta _{2}$ are defined as above by being $1$ on $\cup _{v}W_{v}$ and arbitrary outside of this set. The issue of the value of $\beta $ at some point within each connected component arises. In the case of $\beta $ being a characteristic function if there is only one connected component, $W$ and $0\in W$ the solution is unique, since then $\beta (0)=1.$ Note that for model 5 the solution to equations of the type $\left( \ref {twoeq}\right) $ would only provide $Ft(gf_{x^{\ast }})$ and $\phi _{u};$ then from the first equation for this model in Table 4 $\phi _{x^{\ast }}$ can be obtained; it is unique if supp$\phi _{x^{\ast }}=$supp$\phi _{z}$. To solve for $g$ find $g=Ft^{-1}\left( Ft\left( gf_{x^{\ast }}\right) \right) \cdot \left( f_{x^{\ast }}\right) ^{-1}.$
3,417
26,749
en
train
0.94.9
\section{Identification, partial identification and well-posedness} \subsection{Identified solutions for the models 1-7} As follows from the discussion of the solutions uniqueness in models 1,2,3,4,4a,5,6 holds (in a few cases up to a value of a function at a point) if all the Fourier transforms are supported over the whole $R^{d};$ in many cases it is sufficient that $supp(\beta ,\partial )=R^{d}.$ The classes of functions could be defined with Fourier transforms supported on some known subset $\bar{W}$ of $R^{d},$ rather than on the whole space; if all the functions considered have $\bar{W}$ as their support, and the support consists of one connected component that includes 0 as an interior point then identification for the solutions holds. For the next table assume that $\bar{W}$ is a single connected component with $0$ as an interior point; again $\bar{W}$ could coincide with $supp(\beta ,\partial )$. For model 5 under Assumption B assume additionally that the value at zero: $ Ft(gf_{x^{\ast }})(0)$ is known; similarly for model 7 under assumption B additionally assume that $Ft(g)(0)$ is known. Table 5. The solutions for identified models on $\bar{W}.$ \begin{tabular}{|c|c|} \hline Model & $ \begin{array}{c} \text{Solution to } \\ \text{equations} \end{array} $ \\ \hline \multicolumn{1}{|l|}{$\ \ \ $1.} & \multicolumn{1}{|l|}{$\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ f_{x^{\ast }}=Ft^{-1}\left( \phi _{u}^{-1}\phi _{z}\right) .$} \\ \hline 2. & $f_{x^{\ast }}=Ft^{-1}\left( \phi _{-u}\phi _{z}\right) .$ \\ \hline \multicolumn{1}{|l|}{$\ $\ 3.} & \multicolumn{1}{|l|}{$ \begin{array}{c} \text{Under Assumption A} \\ f_{x^{\ast }}=Ft^{-1}(\exp \int_{\zeta _{0}}^{\zeta }\tsum\limits_{k=1}^{d}\varkappa _{k}(\xi )d\xi ), \\ \text{where }\varkappa _{k}\text{ solves }\varkappa _{k}\phi _{z}-[\left( \phi _{z}\right) _{k}^{\prime }-\varepsilon _{k}]=0; \\ f_{u}=Ft^{-1}(\phi _{x^{\ast }}^{-1}\varepsilon ). \\ \text{Under Assumption B} \\ f_{u}=Ft^{-1}(\exp \int_{\zeta _{0}}^{\zeta }\tsum\limits_{k=1}^{d}\varkappa _{k}(\xi )d\xi ); \\ \varkappa _{k}\text{ solves }\varkappa _{k}\phi _{z}-\varepsilon _{k}=0; \\ f_{x^{\ast }}=Ft^{-1}(\phi _{u}^{-1}\varepsilon ). \end{array} $} \\ \hline 4 & $ \begin{array}{c} f_{x^{\ast }},f_{u}\text{ obtained similarly to those in 3.;} \\ \phi _{u_{x}}=\phi _{x^{\ast }}^{-1}\phi _{x}. \end{array} $ \\ \hline 4a. & $ \begin{array}{c} f_{u_{x}},f_{u}\text{ obtained similarly to }\phi _{x^{\ast }},\phi _{u} \text{ in 3.;} \\ \phi _{x^{\ast }}=\phi _{u_{x}}^{-1}\phi _{x}. \end{array} $ \\ \hline 5. & $ \begin{array}{c} \text{Three steps:} \\ \text{1. (a) Get }Ft(gf_{x^{\ast }}),\phi _{u}\text{ similarly to }\phi _{x^{\ast }},\phi _{u}\text{ in model 3} \\ \text{(under Assumption A use }Ft(gf_{x^{\ast }})(0))\text{;} \\ \text{2. Obtain }\phi _{x^{\ast }}=\phi _{u}^{-1}\phi _{z}; \\ \text{3. Get }g=\left[ Ft^{-1}\left( \phi _{x^{\ast }}\right) \right] ^{-1}Ft^{-1}(Ft(gf_{x^{\ast }})). \end{array} $ \\ \hline 6. & $\phi _{-u}=\phi _{z}^{-1}\phi _{x}$ and $g=Ft^{-1}(\phi _{x}^{-1}\phi _{z}\varepsilon ).$ \\ \hline 7. & $ \begin{array}{c} \phi _{x^{\ast }},Ft(g)\text{obtained similarly to }\phi _{x^{\ast }},\phi _{u}\text{in }3 \\ \text{(under Assumption A use }Ft(g)(0)). \end{array} $ \\ \hline \end{tabular} \subsection{Implications of partial identification.} Consider the case of Model 1. Essentially lack of identification, say in the case when the error distribution has characteristic function supported on a convex domain $W_{u}$ around zero results in the solution for $\phi _{x^{\ast }}=\phi _{1}\phi _{2},$ with $\phi _{1}$ non-zero and unique on $ W_{u},$ and thus captures the lower-frequency components of $x^{\ast },$ and with $\phi _{2}$ is a characteristic function of a distribution with arbitrary high frequency components. Transforming back to densities provides a corresponding model with independent components \begin{equation*} z=x_{1}^{\ast }+x_{2}^{\ast }+u, \end{equation*} where $x_{1}^{\ast }$ uniquely extracts the lower frequency part of observed $z.$ The more important the contribution of $x_{1}^{\ast }$ to $x^{\ast }$ the less important is lack of identification. If the feature of interest as discussed e.g. by Matzkin (2007) involves only low frequency components of $x^{\ast },$ it may still be fully identified even when the distribution for $x^{\ast }$ itself is not. An example of that is a deconvolution applied to an image of a car captured by a traffic camera; although even after deconvolution the image may still appear blurry the licence plate number may be clearly visible. In nonparametric regression the polynomial growth of the regression or the expectation of the response function may be identifiable even if the regression function is not fully identified. Features that are identified include any functional, $\Phi ,$ linear or non-linear on a class of functions of interest, such that in the frequency domain $\Phi $ is supported on $W_{u}.$ \subsection{Well-posedness in $S^{\ast }$} Conditions for well-posedness in $S^{\ast }$ for solutions of the equations entering in models 1-7 were established in Zinde-Walsh (2012). Well-posedness is needed to ensure that if a sequence of functions converges (in the topology of $S^{\ast })$ to the known functions of the equations characterizing the models 1-7 in tables 1 and 2, then the corresponding sequence of solutions will converge to the solution for the limit functions. A feature of well-posedness in $S^{\ast }$ is that the solutions are considered in a class of functions that is a bounded set in $S^{\ast }.$ The properties that differentiation is a continuous operation, and that the Fourier transform is an isomorphism of the topological space $S^{\ast },$ make conditions for convergence in this space much weaker than those in functions spaces, say, $L_{1},$ $L_{2}.$ Thus for density that is given by the generalized derivative of the distribution function well-posedness holds in spaces of generalized functions by the continuity of the differentiation operator$.$ For the problems here however, well-posedness does not always obtain. The main sufficient condition is that the inverse of the characteristic function of the measurement error satisfy the condition $\left( \ref{condition} \right) $ with $b=\phi _{u}^{-1}$ on the corresponding support. This holds if either the support is bounded or if the distribution is not super-smooth. If $\phi _{u}$ has some zeros but satisfies the identification conditions so that it has local representation $\left( \ref{finitezero}\right) $ where $ \left( \ref{condition}\right) $ is satisfied for $b=\eta ^{-1}$ well-posedness will hold. Example in Zinde-Walsh (2012) demonstrates that well-posedness of deconvolution will not hold even in the weak topology of $S^{\ast }$ for super-smooth (e.g. Gaussian) distributions on unbounded support. On the other hand, well-posedness of deconvolution in $S^{\ast }$ obtains for ordinary smooth distributions and thus under less restrictive conditions than in function spaces, such as $L_{1}$ or $L_{2}$ usually considered. In the models 3-7 with several unknown functions, more conditions are required to ensure that all the operations by which the solutions are obtained are continuous in the topology of $S^{\ast }.$ It may not be sufficient to assume $\left( \ref{condition}\right) $ for the inverses of unknown functions where the solution requires division; for continuity of the solution the condition may need to apply uniformly. Define a class of ordinary functions on $R^{d},$ $\Phi (m,V)$ (with $m$ a vector of integers, $V$ a positive constant) where $b\in \Phi (m,V)$ if \begin{equation} \int \Pi \left( (1+t_{i}^{2})^{-1}\right) ^{m_{i}}\left\vert b(t)\right\vert dt<V<\infty .\text{ } \label{condb} \end{equation} Then in Zinde-Walsh (2012) well-posedness is proved for model 7 as long as in addition to Assumption A or B, for some $\Phi (m,V)$ both $\phi _{u}$ and $\phi _{u}^{-1}$ belong to the class $\Phi (m,V)$. This condition is fulfilled by non-supersmooth $\phi _{u};$ this could be an ordinary smooth distribution or a mixture with some mass point. A convenient way of imposing well-posedness is to restrict the support of functions considered to a bounded $\bar{W}.$ If the features of interest are associated with low-frequency components only, then if the functions are restricted to a bounded space the low-frequency part can be identified and is well-posed.
2,679
26,749
en
train
0.94.10
\section{Implications for estimation} \subsection{Plug-in non-parametric estimation} Solutions in Table 5 for the equations that express the unknown functions via known functions of observables give scope for plug-in estimation. As seen e.g. in the example of Model 4, 4 and 4a are different expressions that will provide different plug-in estimators for the same functions. The functions of the observables here are characteristic functions and Fourier transforms of density-weighted conditional expectations and in some cases their derivatives, that can be estimated by non-parametric methods. There are some direct estimators, e.g. for characteristic functions. In the space $S^{\ast }$ the Fourier transform and inverse Fourier transform are continuous operations thus using standard estimators of density weighted expectations and applying the Fourier transform would provide consistency in $S^{\ast }$; the details are provided in Zinde-Walsh (2012). Then the solutions can be expressed via those estimators by the operations from Table 5 and, as long as the problem is well-posed, the estimators will be consistent and the convergence will obtain at the appropriate rate. As in An and Hu (2012), the convergence rate may be even faster for well-posed problems in $S^{\ast }$ than the usual nonparametric rate in (ordinary) function spaces. For example, as demonstrated in Zinde-Walsh (2008) kernel estimators of density that may diverge if the distribution function is not absolutely continuous, are always (under the usual assumptions on kernel/bandwidth) consistent in the weak topology of the space of generalized functions, where the density problem is well-posed. Here, well-posedness holds for deconvolution as long as the error density is not super-smooth. \subsection{Regularization in plug-in estimation} When well-posedness cannot be ensured, plug-in estimation will not provide consistent results and some regularization is required; usually spectral cut-off is employed for the problems considered here. In the context of these non-parametric models regularization requires extra information: the knowledge of the rate of decay of the Fourier transform of some of the functions. For model 1 this is not a problem since $\phi _{u}$ is assumed known; the regularization uses the information about the decay of this characteristic function to construct a sequence of compactly supported solutions with support increasing at a corresponding rate. In $S^{\ast }$ no regularization is required for plug-in estimation unless the error distribution is super-smooth. Exponential growth in $\phi _{u}^{-1}$ provides a logarithmic rate of convergence in function classes for the estimator (Fan, 1991). Below we examine spectral cut-off regularization for the deconvolution in $S^{\ast }$ when the error density is super-smooth. With super-smooth error in $S^{\ast }$ define a class of generalized functions $\Phi (\Lambda ,m,V)$ for some non-negative-valued function $ \Lambda $; a generalized function $b\in \Phi (\Lambda ,m,V)$ if there exists a function $\bar{b}(\zeta )\in \Phi (m,V)$ such that also $\bar{b}(\zeta )^{-1}\in \Phi (m,V)$ and $b=\bar{b}(\zeta )\exp \left( -\Lambda (\zeta )\right) .$ Note that a linear combination of functions in $\Phi (\Lambda ,m,V)$ belongs to the same class. Define convergence: a sequence of $ b_{n}\in \Phi (\Lambda ,m,V)$ converges to zero if the corresponding sequence $\bar{b}_{n}$ converges to zero in $S^{\ast }.$ Convergence in probability for a sequence of random functions, $\varepsilon _{n},$ in $S^{\ast }$ is defined as follows: $(\varepsilon _{n}-\varepsilon )\rightarrow _{p}0$ in $S^{\ast }$ if for any set $\psi _{1},...,\psi _{v}\in S$ the random vector of the values of the functionals converges: $ \left( (\varepsilon _{n}-\varepsilon ,\psi _{1}),...,(\varepsilon _{n}-\varepsilon ,\psi _{v})\right) \rightarrow _{p}0.$ \textbf{Lemma 2.} \textit{If in model 1 }$\phi _{u}=b\in \Phi (\Lambda ,m,V), $\textit{\ where }$\Lambda $\textit{\ is a polynomial function of order no more than }$k,$\textit{\ and }$\varepsilon _{n}$\textit{\ is a sequence of estimators of }$\varepsilon $\textit{\ that are consistent in }$S^{\ast }:r_{n}(\varepsilon _{n}-\varepsilon )\rightarrow _{p}0$\textit{\ in }$ S^{\ast }$\textit{\ at some rate }$r_{n}\rightarrow \infty ,$\textit{\ then for any sequence of constants }$\bar{B}_{n}:$\textit{\ }$0<\bar{B} _{n}<\left( \ln r_{n}\right) ^{\frac{1}{k}}$\textit{\ and the corresponding set }$B_{n}=\left\{ \zeta :\left\Vert \zeta \right\Vert <\bar{B}_{n}\right\} $\textit{\ the sequence of regularized estimators }$\phi _{u}^{-1}(\varepsilon _{n}-\varepsilon )I(B_{n})$\textit{\ converges to zero in probability in }$S^{\ast }.$\textit{\ } Proof. For $n$ the value of the random functional \begin{equation*} (\phi _{u}^{-1}(\varepsilon _{n}-\varepsilon )I(B_{n}),\psi )=\int \bar{b} ^{-1}(\zeta )r_{n}(\varepsilon _{n}-\varepsilon )r_{n}^{-1}I(B_{n})\exp \left( \Lambda (\zeta )\right) \psi (\zeta )d\zeta . \end{equation*} Multiplication by $\bar{b}^{-1}\in \Phi (m,V),$ that corresponds to $\phi _{u}=b$ does not affect convergence thus $\bar{b}^{-1}(\zeta )r_{n}(\varepsilon _{n}-\varepsilon )$ converges to zero in probability in $ S^{\ast }.$ To show that $(\phi _{u}^{-1}(\varepsilon _{n}-\varepsilon )I(B_{n}),\psi )$ converges to zero it is sufficient to show that the function $r_{n}^{-1}I(B_{n})\exp \left( \Lambda (\zeta )\right) \psi (\zeta ) $ is bounded$.$ It is then sufficient to find $B_{n}$ such that $ r_{n}^{-1}I(B_{n})\exp \left( \Lambda (\zeta )\right) $ is bounded (by possibly a polynomial), thus it is sufficient that $\underset{B_{n}}{\sup } \left\vert \exp \left( \Lambda (\zeta )\right) r_{n}^{-1}\right\vert $ be bounded. This will hold if $\exp \left( \bar{B}_{n}^{k}\right) <r_{n},$ $ \bar{B}_{n}^{k}<\ln r_{n}.\blacksquare $ Of course an even slower growth for spectral cut-off would result from $ \Lambda $ that grows faster than a polynomial. The consequence of the slow growth of the support is usually a correspondingly slow rate of convergence for $\phi _{u}^{-1}\varepsilon _{n}I(B_{n}).$ Additional conditions (as in function spaces) are needed for the regularized estimators to converge to the true $\gamma $. It may be advantageous to focus on lower frequency components and ignore the contribution from high frequencies when the features of interest depend on the contribution at low frequency. \section{Concluding remarks} Working in spaces of generalized functions extends the results on nonparametric identification and well-posedness for a wide class of models. Here identification in deconvolution is extended to generalized densities in the class of all distributions from the usually considered classes of integrable density functions. In regression with Berkson error nonparametric identification in $S^{\ast }$\ holds for functions of polynomial growth, extending the usual results obtained in $L_{1};$ a similar extension applies to regression with measurement error and Berkson type measurement; this allows to consider binary choice and polynomial regression models. Also, identification in models with sum-of-peaks regression function that cannot be represented in function spaces is included. Well-posedness results in $ S^{\ast }$ also extend the results in the literature provided in function spaces; well-posedness of deconvolution holds as long as the characteristic function of the error distribution does not go to zero at infinity too fast (as e.g. super-smooth) and a similar condition provides well-posedness in the other models considered here. Further investigation of the properties of estimators in spaces of generalized functions requires deriving the generalized limit process for the function being estimated and investigating when it can be described as a generalized Gaussian process. A generalized Gaussian limit process holds for kernel estimator of the generalized density function (Zinde-Walsh, 2008). Determining the properties of inference based on the limit process for generalized random functions requires both further theoretical development and simulations evidence. \end{document}
2,212
26,749
en
train
0.95.0
\begin{document} \def{\mathbb{R}}{{\mathbb{R}}} \newcommand{ \Om }{ \Omega} \newcommand{ \pOm}{\partial \Omega} \newcommand{ \RO}{\mathbb R^n\setminus \Omega} \def{\mathbb{R}}{{\mathbb{R}}} \title{Regularity of extremal solutions of nonlocal elliptic systems} \author{Mostafa Fazly} \address{Department of Mathematics, The University of Texas at San Antonio, San Antonio, TX 78249, USA} \email{[email protected]} \maketitle \begin{abstract} We examine regularity of the extremal solution of nonlinear nonlocal eigenvalue problem \begin{eqnarray*} \left\{ \begin{array}{lcl} \mathcal L u &=& \lambda F(u,v) \qquad \text{in} \ \ \Omega, \\ \mathcal L v &=& \gamma G(u,v) \qquad \text{in} \ \ \Omega, \\ u,v &=&0 \qquad \qquad \text{on} \ \ \RO , \end{array}\right. \end{eqnarray*} with an integro-differential operator, including the fractional Laplacian, of the form \begin{equation*}\label{} \mathcal L(u (x))= \lim_{\epsilon\to 0} \int_{\mathbb R^n\setminus B_\epsilon(x) } [u(x) - u(z)] J(z-x) dz , \end{equation*} when $J$ is a nonnegative measurable even jump kernel. In particular, we consider jump kernels of the form of $J(y)=\frac{a(y/|y|)}{|y|^{n+2s}}$ where $s\in (0,1)$ and $a$ is any nonnegative even measurable function in $L^1(\mathbb {S}^{n-1})$ that satisfies ellipticity assumptions. We first establish stability inequalities for minimal solutions of the above system for a general nonlinearity and a general kernel. Then, we prove regularity of the extremal solution in dimensions $n < 10s$ and $ n<2s+\frac{4s}{p\mp 1}[p+\sqrt{p(p\mp1)}]$ for the Gelfand and Lane-Emden systems when $p>1$ (with positive and negative exponents), respectively. When $s\to 1$, these dimensions are optimal. However, for the case of $s\in(0,1)$ getting the optimal dimension remains as an open problem. Moreover, for general nonlinearities, we consider gradient systems and we establish regularity of the extremal solution in dimensions $n<4s$. As far as we know, this is the first regularity result on the extremal solution of nonlocal system of equations. \end{abstract} \noindent {\it \footnotesize 2010 Mathematics Subject Classification}. {\scriptsize 35R09, 35R11, 35B45, 35B65, 35J50}\\ {\it \footnotesize Key words: Nonlocal elliptic systems, regularity of extremal solutions, stable solutions, nonlinear eigenvalue problems}. {\scriptsize }
814
31,238
en
train
0.95.1
\section{Introduction and main results}\label{secin} Let $\Omega\subset\mathbb R^n$ be a bounded smooth domain. Consider the nonlinear nonlocal eigenvalue problem \begin{eqnarray*} (P)_{\lambda,\gamma} \qquad \left\{ \begin{array}{lcl} \mathcal L u &=& \lambda F(u,v) \qquad \text{in} \ \ \Omega, \\ \mathcal L v &=& \gamma G(u,v) \qquad \text{in} \ \ \Omega, \\ u,v &=&0 \qquad \qquad \text{on} \ \ \RO , \end{array}\right. \end{eqnarray*} where $ \lambda, \gamma$ are positive parameters, $F,G$ are smooth functions and the operator $\mathcal L$ is an integral operator of convolution type \begin{equation}\label{Lui} \mathcal L(u (x))= \lim_{\epsilon\to 0} \int_{\mathbb R^n\setminus B_\epsilon(x) } [u(x) - u(z)] J(z-x) dz . \end{equation} Here, $J$ is a nonnegative measurable even jump kernel such that \begin{equation} \int_{\mathbb R^n} \min\{|y|^2,1\} J(y) dy<\infty. \end{equation} The above nonlocal operator with a measurable kernel \begin{equation}\label{Jump} J(x,z)= \frac{c(x,z)}{|x-z|^{n+2s}}, \end{equation} when the function $c(x,z)$ is bounded between two positive constants, $0<c_1 \le c_2$, is studied extensively in the literature from both theory of partial differential equations and theory of probability points of view, see the book of Bass \cite{bass} and references therein. Integro-differential equations and systems, of the above form, arise naturally in the study of stochastic processes with jumps, and more precisely in L\'{e}vy processes. A L\'{e}vy process is a stochastic process with independent and stationary increments. A special class of such processes is the so called stable processes. These are the processes that satisfy self-similarity properties, and they are also the ones appearing in the Generalized Central Limit Theorem. We refer interested readers to the book of Bertoin \cite{ber} for more information. The infinitesimal generator of any isotropically symmetric stable L\'{e}vy process in $\mathbb R^n$ is \begin{equation}\label{} \mathcal Lu(x)=\int_{\mathbb S^{n-1}} \int_{-\infty}^\infty [u(x+r\theta)+u(x-r\theta) -2 u(x)]\frac{dr}{r^{1+2s}} d\mu(\theta), \end{equation} where $\mu$ is any nonnegative and finite measure on the unit sphere $\mathbb S^{n-1}$ called the spectral measure and $s\in (0, 1)$. When the spectral measure is absolutely continuous, $d\mu(\theta) = a(\theta)d\theta$, the above operators can be rewritten in the form of \begin{equation}\label{La} \mathcal L(u (x))= \lim_{\epsilon\to 0} \int_{\mathbb R^n\setminus B_\epsilon(x) } [ u(x+y)+u(x-y)-2u(x)] \frac{a(y/|y|)}{|y|^{n+2s}} dy , \end{equation} where $s\in (0,1)$ and $a$ is any nonnegative even function in $L^1(\mathbb S^{n-1})$. Note that the fractional Laplacian operator $\mathcal L={(-\Delta)}^{s}$ with $0<s<1$ that is \begin{equation}\label{Luj} \mathcal L u(x) = \lim_{\epsilon\to 0} \int_{\mathbb R^n\setminus B_\epsilon(x) } [u(x) - u(z)] \frac{c_{n,s}}{|x-z|^{n+2s}} dz, \end{equation} for a positive constant $c_{n,s}$ is the simplest stable L\'{e}vy process for $d\mu(\theta) = c_{n,s} d\theta$. Note that the above operator can be written in the form of (\ref{Lui}) due to the fact that $a$ is even. The regularity of solutions for equation $\mathcal Lu=f$ has been studied thoroughly in the literature by many experts and in this regard we refer interested to \cite{bass, cs, fro1, rs1, si} and references therein. The most common assumption on the jump kernel in this context is $0<c_1\le a(\theta) \le c_2$ in $\mathbb S^{n-1}$ and occasionally $a(\theta)\ge c_1>0$ in a subset of $\mathbb S^{n-1}$ with positive measure. In this article, we consider the ellipticity assumption on the operator $\mathcal L$ of the form \begin{equation}\label{c1c2} 0<c_1 \le \inf_{\nu\in \mathbb S^{n-1}} \int_{\mathbb S^{n-1}} |\nu\cdot\theta|^{2s} a(\theta)d\theta \ \ \text{and} \ \ 0 \le a(\theta) < c_2 \ \ \text{for all}\ \ \theta\in \mathbb S^{n-1}, \end{equation} where $c_1$ and $c_2$ are constants. Note that regularity results (interior and boundary) under such an assumption on general operator $\mathcal L$ is studied in the literature, and in this regard we refer interested readers to \cite{rs2, rs1} and references therein. For particular nonlinearities of $F$ and $G$, we consider the following Gelfand system \begin{eqnarray*} (G)_{\lambda,\gamma}\qquad \left\{ \begin{array}{lcl} \mathcal L u &=& \lambda e^v \qquad \text{in} \ \ \Omega, \\ \mathcal L v &=& \gamma e^u \qquad \text{in} \ \ \Omega, \\ u, v &=&0 \qquad \text{on} \ \ \RO , \end{array}\right. \end{eqnarray*} and the Lane-Emden system, when $p>1$ \begin{eqnarray*} (E)_{\lambda,\gamma}\qquad \left\{ \begin{array}{lcl} \mathcal L u &=& \lambda (1+v)^p \qquad \text{in} \ \ \Omega, \\ \mathcal L v &=& \gamma (1+u)^p \qquad \text{in} \ \ \Omega, \\ u, v &=&0 \qquad \qquad \text{on} \ \ \RO, \end{array}\right. \end{eqnarray*} and the Lane-Emden system with singular nonlinearity, for $p>1$ and when $0<u,v<1$ \begin{eqnarray*} (M)_{\lambda,\gamma}\qquad \left\{ \begin{array}{lcl} \mathcal L u &=& \frac{\lambda}{(1-v)^p} \qquad \text{in} \ \ \Omega, \\ \mathcal L v &=& \frac{\gamma}{(1-u)^p} \qquad \text{in} \ \ \Omega, \\ u, v &=&0 \qquad \qquad \text{on} \ \ \RO. \end{array}\right. \end{eqnarray*} Note that for the case of $p=2$ the above singular nonlinearity and system is known as the MicroElectroMechanical Systems (MEMS), see \cite{egg2,lw} and references therein for the mathematical analysis of such equations. In addition, we study the following gradient system with more general nonlinearities \begin{eqnarray*} (H)_{\lambda,\gamma}\qquad \left\{ \begin{array}{lcl} \mathcal L u &=& \lambda f'(u) g(v) \qquad \text{in} \ \ \Omega, \\ \mathcal L v &=& \gamma f(u) g'(v) \qquad \text{in} \ \ \Omega, \\ u, v &=&0 \qquad \qquad \text{on} \ \ \RO. \end{array}\right. \end{eqnarray*} The nonlinearities $f$ and $g$ will satisfy various properties but will always at least satisfy \begin{equation} \label{R} f \text{ is smooth, increasing and convex with } f(0)=1 \text{ and } f \text{ superlinear at infinity}. \end{equation} A bounded weak solution pair $(u,v)$ is called a classical solution when both components $u,v$ are regular in the interior of $\Omega$ and $(P)_{\lambda,\gamma}$ holds. Given a nonlinearity $ f$ which satisfies (\ref{R}), the following nonlinear eigenvalue problem \begin{eqnarray*} \hbox{$(Q)_{\lambda}$}\hskip 50pt \left\{ \begin{array}{lcl} -\Delta u &=& \lambda f(u)\qquad \text{in} \ \ \Omega, \\ u &=&0 \qquad \qquad \text{on} \ \ \partial \Omega, \end{array}\right. \end{eqnarray*} is now well-understood. Brezis and V\'{a}zquez in \cite{BV} raised the question of determining the boundedness of $u^*$ for general nonlinearities $f$ satisfying (\ref{R}). See, for instance, \cite{BV,Cabre,CC, CR, cro, rs, cdds, Nedev,bcmr,v2,ns} for both local and nonlocal cases. It is known that there exists a critical parameter $ \lambda^* \in (0,\infty)$, called the extremal parameter, such that for all $ 0<\lambda < \lambda^*$ there exists a smooth, minimal solution $u_\lambda$ of $(Q)_\lambda$. Here the minimal solution means in the pointwise sense. In addition for each $ x \in \Omega$ the map $ \lambda \mapsto u_\lambda(x)$ is increasing in $ (0,\lambda^*)$. This allows one to define the pointwise limit $ u^*(x):= \lim_{\lambda \nearrow \lambda^*} u_\lambda(x)$ which can be shown to be a weak solution, in a suitably defined sense, of $(Q)_{\lambda^*}$. For this reason $ u^*$ is called the extremal solution. It is also known that for $ \lambda >\lambda^*$, there are no weak solutions of $(Q)_\lambda$. The regularity of the extremal solution has been of great interests in the literature. There have several attempts to tackle the problem and here we list a few. For a general nonlinearity $f$ satisfying (\ref{R}), Nedev in \cite{Nedev} proved that $u^*$ is bounded when $ n \le 3$. This was extended to fourth dimensions when $ \Omega$ is a convex domain by Cabr\'{e} in \cite{Cabre}. The convexity of the domain was relaxed by Villegas in \cite{v2}. Most recently, Cabr\'{e} et al. in \cite{cfsr} claimed the regularity result when $n\le 9$. For the particular nonlinearity $f(u)=e^u$, known as the Gelfand equation, the regularity is shown $u^*\in L^{\infty}(\Omega)$ for dimensions $n<10$ by Crandall and Rabinowitz in \cite{CR}, see also \cite{far2}. If $ \Omega$ is a radial domain in $ {\mathbb{R}}^n$ with $ n <10$ the regularity is shown in \cite{CC} when $f$ is a general nonlinearity satisfying conditions (\ref{R}) but without the convexity assumption. In view of the above result for the exponential nonlinearity, this is optimal. Note that for the case of $\Omega=B_1$, the classification of all radial solutions to this problem was originally done by Liouville in \cite{liou} for $n=2$ and then in higher dimensions in \cite{ns, jl,MP1} and references therein. For power nonlinearity $f(u)=(1+u)^p$ and for singular nonlinearity $f(u)=(1-u)^{-p}$ when $0<u<1$ for $p>1$, known as the Lane-Emden equation and MEMS equation respectively, the regularity of extremal solutions is established for the Joseph-Lundgren exponent, see \cite{jl}, in the literature. We refer interested readers to \cite{gg,egg1,egg2,far1,CR} and references therein for regularity results and Liouville theorems. The regularity of extremal solutions for nonlocal eigenvalue problem, \begin{eqnarray*} \hbox{$(S)_{\lambda}$}\hskip 50pt \left\{ \begin{array}{lcl} {(-\Delta)}^s u &=& \lambda f(u)\qquad \text{in} \ \ \Omega, \\
3,294
31,238
en
train
0.95.2
\mathcal L u &=& \frac{\lambda}{(1-v)^p} \qquad \text{in} \ \ \Omega, \\ \mathcal L v &=& \frac{\gamma}{(1-u)^p} \qquad \text{in} \ \ \Omega, \\ u, v &=&0 \qquad \qquad \text{on} \ \ \RO. \end{array}\right. \end{eqnarray*} Note that for the case of $p=2$ the above singular nonlinearity and system is known as the MicroElectroMechanical Systems (MEMS), see \cite{egg2,lw} and references therein for the mathematical analysis of such equations. In addition, we study the following gradient system with more general nonlinearities \begin{eqnarray*} (H)_{\lambda,\gamma}\qquad \left\{ \begin{array}{lcl} \mathcal L u &=& \lambda f'(u) g(v) \qquad \text{in} \ \ \Omega, \\ \mathcal L v &=& \gamma f(u) g'(v) \qquad \text{in} \ \ \Omega, \\ u, v &=&0 \qquad \qquad \text{on} \ \ \RO. \end{array}\right. \end{eqnarray*} The nonlinearities $f$ and $g$ will satisfy various properties but will always at least satisfy \begin{equation} \label{R} f \text{ is smooth, increasing and convex with } f(0)=1 \text{ and } f \text{ superlinear at infinity}. \end{equation} A bounded weak solution pair $(u,v)$ is called a classical solution when both components $u,v$ are regular in the interior of $\Omega$ and $(P)_{\lambda,\gamma}$ holds. Given a nonlinearity $ f$ which satisfies (\ref{R}), the following nonlinear eigenvalue problem \begin{eqnarray*} \hbox{$(Q)_{\lambda}$}\hskip 50pt \left\{ \begin{array}{lcl} -\Delta u &=& \lambda f(u)\qquad \text{in} \ \ \Omega, \\ u &=&0 \qquad \qquad \text{on} \ \ \partial \Omega, \end{array}\right. \end{eqnarray*} is now well-understood. Brezis and V\'{a}zquez in \cite{BV} raised the question of determining the boundedness of $u^*$ for general nonlinearities $f$ satisfying (\ref{R}). See, for instance, \cite{BV,Cabre,CC, CR, cro, rs, cdds, Nedev,bcmr,v2,ns} for both local and nonlocal cases. It is known that there exists a critical parameter $ \lambda^* \in (0,\infty)$, called the extremal parameter, such that for all $ 0<\lambda < \lambda^*$ there exists a smooth, minimal solution $u_\lambda$ of $(Q)_\lambda$. Here the minimal solution means in the pointwise sense. In addition for each $ x \in \Omega$ the map $ \lambda \mapsto u_\lambda(x)$ is increasing in $ (0,\lambda^*)$. This allows one to define the pointwise limit $ u^*(x):= \lim_{\lambda \nearrow \lambda^*} u_\lambda(x)$ which can be shown to be a weak solution, in a suitably defined sense, of $(Q)_{\lambda^*}$. For this reason $ u^*$ is called the extremal solution. It is also known that for $ \lambda >\lambda^*$, there are no weak solutions of $(Q)_\lambda$. The regularity of the extremal solution has been of great interests in the literature. There have several attempts to tackle the problem and here we list a few. For a general nonlinearity $f$ satisfying (\ref{R}), Nedev in \cite{Nedev} proved that $u^*$ is bounded when $ n \le 3$. This was extended to fourth dimensions when $ \Omega$ is a convex domain by Cabr\'{e} in \cite{Cabre}. The convexity of the domain was relaxed by Villegas in \cite{v2}. Most recently, Cabr\'{e} et al. in \cite{cfsr} claimed the regularity result when $n\le 9$. For the particular nonlinearity $f(u)=e^u$, known as the Gelfand equation, the regularity is shown $u^*\in L^{\infty}(\Omega)$ for dimensions $n<10$ by Crandall and Rabinowitz in \cite{CR}, see also \cite{far2}. If $ \Omega$ is a radial domain in $ {\mathbb{R}}^n$ with $ n <10$ the regularity is shown in \cite{CC} when $f$ is a general nonlinearity satisfying conditions (\ref{R}) but without the convexity assumption. In view of the above result for the exponential nonlinearity, this is optimal. Note that for the case of $\Omega=B_1$, the classification of all radial solutions to this problem was originally done by Liouville in \cite{liou} for $n=2$ and then in higher dimensions in \cite{ns, jl,MP1} and references therein. For power nonlinearity $f(u)=(1+u)^p$ and for singular nonlinearity $f(u)=(1-u)^{-p}$ when $0<u<1$ for $p>1$, known as the Lane-Emden equation and MEMS equation respectively, the regularity of extremal solutions is established for the Joseph-Lundgren exponent, see \cite{jl}, in the literature. We refer interested readers to \cite{gg,egg1,egg2,far1,CR} and references therein for regularity results and Liouville theorems. The regularity of extremal solutions for nonlocal eigenvalue problem, \begin{eqnarray*} \hbox{$(S)_{\lambda}$}\hskip 50pt \left\{ \begin{array}{lcl} {(-\Delta)}^s u &=& \lambda f(u)\qquad \text{in} \ \ \Omega, \\ u &=&0 \qquad \text{on} \ \ \RO, \end{array}\right. \end{eqnarray*} is studied in the literature, see \cite{rs,r1, sp, cdds}, when $0<s<1$. However, there are various questions remaining as open problems. Ros-Oton and Serra in \cite{rs} showed that for a general nonlinearity $f$, $u^*$ is bounded when $n<4s$. In addition, if the following limit exists \begin{equation}\label{conf} \lim_{t\to\infty} \frac{f(t)f''(t)}{\left|f'(t)\right|^2} <\infty , \end{equation} then $u^*$ is bounded when $n<10s$. Note that specific nonlinearities $f(u)=e^u$, $f(u)=(1+u)^p$ and $f(u)=(1-u)^{-p}$ for $p>1$ satisfy the above condition (\ref{conf}). When $s\to 1$, the dimension $n<10$ is optimal. However, for the fractional Laplacian $n<10s$ is not optimal, see Remark \ref{rem1}. In the current article, we prove counterparts of these regularity results for system of nonlocal equations. Later in \cite{r1}, Ros-Oton considered the fractional Gelfand problem, $f(u)=e^u$, on a domain $\Omega$ that is convex in the $x_i$-direction and symmetric with respect to $\{x_i=0\}$, for $1\le i\le n$. As an example, the unit ball satisfies these conditions. And he proved that $u^*$ is bounded for either $n\le 2s$ or $n>2s$ and \begin{equation} \frac{ \Gamma(\frac{n}{2}) \Gamma(1+s)}{\Gamma(\frac{n-2s}{2})} > \frac{ \Gamma^2(\frac{n+2s}{4})}{\Gamma^2(\frac{n-2s}{4})} . \end{equation} This, in particular, implies that $u^*$ is bounded in dimensions $ n\le 7$ for all $s\in (0,1)$. The above inequality is expected to provide the optimal dimension, see Remark \ref{rem1}. Relaxing the convexity and symmetry conditions on the domain remains an open problem. Capella et al. in \cite{cdds} studied the extremal solution of a problem related to $(S)_{\lambda}$ in the unit ball $B_1$ with a spectral fractional Laplacian operator that is defined using Dirichlet eigenvalues and eigenfunctions of the Laplacian operator in $B_1$. They showed that $u^*\in L^\infty(B_1)$ when $2\le n< 2\left[s+2+\sqrt{2(s+1)}\right]$. More recently, Sanz-Perela in \cite{sp} proved regularity of the extremal solution of $(S)_{\lambda}$ with the fractional Laplacian operator in the unit ball with the same condition on $n$ and $s$. This implies that $u^*$ is bounded in dimensions $2\le n\le 6$ for all $s\in (0,1)$. Note also that it is well-known that there is a correspondence between the regularity of stable solutions on bounded domains and the Liouville theorems for stable solutions on the entire space, via rescaling and a blow-up procedure. For the classification of solutions of above nonlocal equations on the entire space we refer interested readers to \cite{clo,ddw,fw,li}, and for the local equations to \cite{egg2,far1,far2} and references therein. For the case of systems, as discussed in \cite{cf,Mont,faz}, set $ \mathcal{Q}:=\{ (\lambda,\gamma): \lambda, \gamma >0 \}$ and define \begin{equation} \mathcal{U}:= \left\{ (\lambda,\gamma) \in \mathcal{Q}: \mbox{ there exists a smooth solution $(u,v)$ of $(P)_{\lambda,\gamma}$} \right\}. \end{equation} We assume that $F(0,0),G(0,0)>0$. A simple argument shows that if $F$ is superlinear at infinity for $ u$, uniformly in $v$, then the set of $ \lambda$ in $\mathcal{U}$ is bounded. Similarly we assume that $ G$ is superlinear at infinity for $ v$, uniformly in $u$, and hence we get $ \mathcal{U}$ is bounded. We also assume that $F,G$ are increasing in each variable. This allows the use of a sub/supersolution approach and one easily sees that if $ (\lambda,\gamma) \in \mathcal{U}$ then so is $ (0,\lambda] \times (0,\gamma]$. One also sees that $ \mathcal{U}$ is nonempty. We now define $ \Upsilon:= \partial \mathcal{U} \cap \mathcal{Q}$, which plays the role of the extremal parameter $ \lambda^*$. Various properties of $ \Upsilon$ are known, see \cite{Mont}. Given $ (\lambda^*,\gamma^*) \in \Upsilon$ set $ \sigma:= \frac{\gamma^*}{\lambda^*} \in (0,\infty)$ and define \begin{equation} \Gamma_\sigma:=\{ (\lambda, \lambda \sigma): \frac{\lambda^*}{2} < \lambda < \lambda^*\}. \end{equation} We let $ (u_\lambda,v_\lambda)$ denote the minimal solution $(P)_{\lambda, \sigma \lambda}$ for $ \frac{\lambda^*}{2} < \lambda < \lambda^*$. One easily sees that for each $ x \in \Omega$ that $u_\lambda(x), v_\lambda(x)$ are increasing in $ \lambda$ and hence we define \begin{equation} u^*(x):= \lim_{\lambda \nearrow \lambda^*} u_\lambda(x) \ \ \text{and} \ \ v^*(x):= \lim_{\lambda \nearrow \lambda^*} v_\lambda(x), \end{equation} and we call $(u^*,v^*)$ the extremal solution associated with $ (\lambda^*,\gamma^*) \in \Upsilon$. Under some very minor growth assumptions on $F$ and $G$ one can show that $(u^*,v^*)$ is a weak solution of $(P)_{\lambda^*,\gamma^*}$. For the rest of this article we refer to $(u^*,v^*)$ as $(u,v)$. For the case of local Laplacian operator, Cowan and the author in \cite{cf} proved that the extremal solution of $(H)_{\lambda,\gamma}$ when $\Omega$ is a convex domain is regular provided $1\le n \le 3$ for general nonlinearities $f,g\in C^1(\mathbb R)$ that satisfying (\ref{R}). This can be seen as a counterpart of the Nedev's result for elliptic gradient systems. For radial solutions, it is also shown in \cite{cf} that stable solutions are regular in dimensions $1\le n <10$ for general nonlinearities. This is a counterpart of the regularity result of Cabr\'{e}-Capella \cite{CC} and Villegas \cite{v2} for elliptic gradient systems. For the local Gelfand system, regularity of the extremal solutions is given by Cowan in \cite{cow} and by Dupaigne et al. in \cite{dfs} when $n<10$. Here are our main results. The following theorem deals with regularity of the extremal solution of nonlocal Gelfand, Lane-Emden and MEMS systems. \begin{thm}\label{thmg} Suppose that $ \Omega$ is a bounded smooth domain in $ {\mathbb{R}}^n$. Let $(\lambda^*,\gamma^*) \in \Upsilon$ and $\mathcal L$ is given by (\ref{La}) where the ellipticity condition (\ref{c1c2}) holds and $0<s<1$. Then, the associated extremal solution of $ (G)_{\lambda^*,\gamma^*}$, $ (E)_{\lambda^*,\gamma^*}$ and $ (M)_{\lambda^*,\gamma^*}$ is bounded when \begin{eqnarray}\label{dimg} &&n < 10s, \\&&\label{dime} n<2s+\frac{4s}{p-1}[p+\sqrt{p(p-1)}], \\&&\label{dimm} n<2s+\frac{4s}{p+1}[p+\sqrt{p(p+1)}], \end{eqnarray} respectively. \end{thm} The following theorem is a counterpart of the Nedev regularity result for nonlocal system $(H)_{\lambda,\gamma}$. \begin{thm} \label{nedev} Suppose that $ \Omega$ is a bounded smooth convex domain in $ {\mathbb{R}}^n$. Assume that $f$ and $g$ satisfy condition (\ref{R}) and $f'(0), g'(0)>0$ when $ f'(\cdot),g'(\cdot)$ are convex and \begin{equation} \label{deltaeps} \liminf_{s \rightarrow \infty} \frac{\left[f''(s)\right]^2}{f'''(s)f'(s)} >0 \ \ \text{and} \ \ \liminf_{s \rightarrow \infty} \frac{\left[g''(s)\right]^2}{g'''(s)g'(s)} >0, \end{equation} holds. Let $(\lambda^*,\gamma^*) \in \Upsilon$ and $\mathcal L={(-\Delta)}^s$ for $0<s<1$. Then, the associated extremal solution $(u,v)$ of $ (H)_{\lambda^*,\gamma^*}$ is bounded where $n < 4s$. \end{thm} In order to prove the above results, we first establish integral estimates for minimal solutions of systems with a general nonlocal operator $\mathcal L$ given by (\ref{Lui}) when $J$ is a nonnegative measurable even jump kernel. Then, we apply nonlocal Sobolev embedding arguments to conclude boundedness of the extremal solutions. Note that when $\lambda=\gamma$ the systems $(G)_{\lambda,\gamma}$, $(E)_{\lambda,\gamma}$ and $(M)_{\lambda,\gamma}$ turn into scalar equations.
4,016
31,238
en
train
0.95.3
Here is how this article is structured. In Section \ref{secpre}, we provide regularity theory for nonlocal operators. In Section \ref{secstab}, we establish various stability inequalities for minimal solutions of systems with a general nonlocal operator of the form (\ref{Lui}) with a nonnegative measurable even jump kernel. In Section \ref{secint}, we provide some technical integral estimates for stable solutions of systems introduced in the above. In Section \ref{secreg}, we apply the integral estimates to establish regularity of extremal solutions for nonlocal Gelfand, Lane-Emden and MEMS systems with exponential and power nonlinearities. In addition, we provide regularity of the extremal solution for the gradient system $(H)_{\lambda,\gamma}$ with general nonlinearities and also for particular power nonlinearities. \section{Preliminaries}\label{secpre} In this section, we provide regularity results not only to the fractional Laplacian, but also to more general integro-differential equations. We omit the proofs in this section and refer interested readers to corresponding references. Let us start with the following classical regularity result concerning embeddings for the Riesz potential, see the book of Stein \cite{st}. \begin{thm} Suppose that $0<s<1$, $n>2s$ and $f$ and $u$ satisfy \begin{equation*} u = {(-\Delta)}^{-s} f \qquad \text{in} \ \ \mathbb R^n, \end{equation*} in the sense that $u$ is the Riesz potential of order $2s$ of $f$. Let $u,f\in L^p(\mathbb R^n)$ when $1\le p<\infty$. \begin{enumerate} \item[(i)] For $p=1$, there exists a positive constant $C$ such that \begin{equation*} ||u||_{L^q(\mathbb R^n)} \le C ||f||_{L^1(\mathbb R^n)} \ \ \text{for} \ \ q=\frac{n}{n-2s}. \end{equation*} \item[(ii)] For $1 < p<\frac{n}{2s}$, there exists a positive constant $C$ such that \begin{equation*} ||u||_{L^q(\mathbb R^n)} \le C ||f||_{L^p(\mathbb R^n)} \ \ \text{for} \ \ q=\frac{np}{n-2ps}. \end{equation*} \item[(iii)] For $\frac{n}{2s}<p<\infty$, there exists a positive constant $C$ such that \begin{equation*} [u]_{C^\beta(\mathbb R^n)} \le C ||f||_{L^p(\mathbb R^n)} \ \ \text{for} \ \ \beta=2s-\frac{n}{p} , \end{equation*} where $[\cdot]_{C^\beta(\mathbb R^n)}$ denotes the $C^\beta$ seminorm. \end{enumerate} Here the constant $C$ depending only on $n$, $s$ and $p$. \end{thm} The above theorem is applied by Ros-Oton and Serra in \cite{rs} to establish the following regularity theory for the fractional Laplacian. See also \cite{rs1} for the boundary regularity results. \begin{prop} Suppose that $0<s<1$, $n>2s$ and $f\in C(\bar \Omega)$ where $\Omega\subset\mathbb R^n$ is a bounded $C^{1,1}$ domain. Let $u$ be the solution of \begin{eqnarray*} \left\{ \begin{array}{lcl} {(-\Delta)}^s u &=& f \qquad \text{in} \ \ \Omega, \\ u&=&0 \qquad \text{in} \ \ \RO. \end{array}\right. \end{eqnarray*} \begin{enumerate} \item[(i)] For $1\le r<\frac{n}{n-2s}$, there exists a positive constant $C$ such that \begin{equation*} ||u||_{L^r(\Omega)} \le C ||f||_{L^1(\Omega)} \ \ \text{for} \ \ r<\frac{n}{n-2s}. \end{equation*} \item[(ii)] For $1 < p<\frac{n}{2s}$, there exists a positive constant $C$ such that \begin{equation*} ||u||_{L^q(\Omega)} \le C ||f||_{L^p(\Omega)} \ \ \text{for} \ \ q=\frac{np}{n-2ps}. \end{equation*} \item[(iii)] For $\frac{n}{2s}<p<\infty$, there exists a positive constant $C$ such that \begin{equation*} ||u||_{C^\beta(\Omega)} \le C ||f||_{L^p(\Omega)} \ \ \text{for} \ \ \beta=\min\left\{s,2s-\frac{n}{p}\right\}. \end{equation*} \end{enumerate} Here the constant $C$ depending only on $n$, $s$, $p$, $r$ and $\Omega$. \end{prop} For the case of $n\le 2s$, the fact that $0<s<1$ implies that $n=1$ and $s\ge \frac{1}{2}$. Note that in this case the Green function $G(x,y)$ is explicitly known. Therefore, $G(\cdot,y)\in L^\infty(\Omega)$ for $s>\frac{1}{2}$ and in $ L^p(\Omega)$ for all $p<\infty$ when $s=\frac{1}{2}$. We summarize this as $||u||_{L^\infty(\Omega)}\le C ||f||_{L^1(\Omega)}$ when $n<2s$. In addition, for the case of $n=2s$, we conclude that $||u||_{L^p(\Omega)}\le C ||f||_{L^1(\Omega)}$ for all $p<\infty$ and $||u||_{L^\infty(\Omega)}\le C ||f||_{L^p(\Omega)}$ for $p>1$. In what follows we provide a counterpart of the above regularity result for general integro-differential operators given by (\ref{La}). These operators are infinitessimal generators of stable and symmetric L\'{e}vy processes and they are uniquely determined by a finite measure on the unit sphere $\mathbb S^{n-1}$, often referred as the spectral measure of the process. When this measure is absolutely continuous, symmetric stable processes have generators of the form (\ref{La}) where $0<s<1$ and $a$ is any nonnegative function $L^1(\mathbb S^{n-1})$ satisfying $a(\theta)=a(-\theta)$ for $\theta\in \mathbb S^{n-1}$. The regularity theory for general operators of the form (\ref{La}) has been recently developed by Fern\'{a}ndez-Real and Ros-Oton in \cite{fro1}. In order to prove this result, authors apply results of \cite{gh} to study the fundamental solution associated to the operator $\mathcal L$ in view of the one of the fractional Laplacian. \begin{prop}\label{propregL} Let $\Omega\subset \mathbb R^{n}$ be any bounded domain, $0<s<1$ and $f\in L^2(\Omega)$. Let $u$ be any weak solution of \begin{eqnarray*} \left\{ \begin{array}{lcl} \mathcal L u &=& f \qquad \text{in} \ \ \Omega, \\ u&=&0 \qquad \text{in} \ \ \RO , \end{array}\right. \end{eqnarray*} where the operator $\mathcal L$ is given by (\ref{La}) and the ellipticity condition (\ref{c1c2}) holds. Assume that $f\in L^r(\Omega)$ for some $r$. \begin{enumerate} \item[(i)] For $1< r<\frac{n}{2s}$, there exists a positive constant $C$ such that \begin{equation*} ||u||_{L^q(\Omega)} \le C ||f||_{L^r(\Omega)} \ \ \text{for} \ \ q=\frac{nr}{n-2rs}. \end{equation*} \item[(ii)] For $r=\frac{n}{2s}$, there exists a positive constant $C$ such that \begin{equation*} ||u||_{L^q(\Omega)} \le C ||f||_{L^r(\Omega)} \ \ \text{for} \ \ q<\infty. \end{equation*} \item[(iii)] For $\frac{n}{2s}<r<\infty$, there exists a positive constant $C$ such that \begin{equation*} ||u||_{L^\infty(\Omega)} \le C ||f||_{L^r(\Omega)}. \end{equation*} \end{enumerate} Here the constant $C$ depending only on $n$, $s$, $r$, $\Omega$ and ellipticity constants. \end{prop} We end this section with this point that $(u,v)$ is a weak solution of $(P)_{\lambda,\gamma}$ for $u,v\in L^1(\Omega)$ if $ F(u,v)\delta^s \in L^1(\Omega)$ and $G(u,v) \delta^s \in L^1(\Omega)$ where $\delta(x)=\text{dist}(x,\Omega)$ and $$ \int_{\Omega} u \mathcal L \zeta = \int_{\Omega} \lambda F(u,v) \zeta \ \ \text{and} \ \ \int_{\Omega} v \mathcal L \eta = \int_{\Omega} \gamma G(u,v) \eta , $$ when $\zeta,\eta$ and $\mathcal \zeta,\mathcal \eta$ are bounded in $\Omega$ and $\zeta,\eta\equiv 0$ on $\partial\Omega$. Any bounded weak solution is a classical solution, in the sense that it is regular in the interior of $\Omega$, continuous up to the boundary, and $(P)_{\lambda,\gamma}$ holds pointwise. Note that for the case of local operators, that is $s = 1$, the above notion of weak solution is consistent with the one introduced by Brezis et al. in \cite{bcmr,BV}.
2,596
31,238
en
train
0.95.4
\section{Stability inequalities}\label{secstab} In this section, we provide stability inequalities for minimal solutions of system $(P)_{\lambda,\gamma}$ for various nonlinearities $F$ and $G$. We start with the following technical lemma in regards to nonlocal operator $\mathcal L$ with even symmetric kernel $J$. \begin{lemma}\label{fgprop} Assume that an operator $\mathcal L$ is given by (\ref{Luj}) with a measurable symmetric kernel $J(x,z)=J(x-z)$ that is even. Then, \begin{eqnarray*} &&\mathcal L(f(x)g(x)) = f(x)\mathcal L(g(x))+g(x)\mathcal L(f(x)) - \int_{\mathbb R^n} \left[f(x)-f(z) \right] \left[g(x)-g(z) \right] J(x-z) dz,\\ &&\int_{\mathbb R^n} g(x)\mathcal L(f(x)) dx = \frac{1}{2} \int_{\mathbb R^n} \int_{\mathbb R^n} \left[f(x)-f(z) \right] \left[g(x)-g(z) \right] J(x-z) dx dz, \end{eqnarray*} where $f,g\in C^1(\mathbb R^n)$ and the integrals are finite. \end{lemma} \begin{proof} The proof is elementary and we omit it here. \end{proof} We now establish a stability inequality for minimal solutions of system $(P)_{\lambda,\gamma}$. Note that for the case of local operators this inequality is established by the author and Cowan in \cite{cf} and in \cite{faz}. \begin{prop}\label{stablein} Let $(u_\lambda,v_\lambda)$ be a minimal solution of system $(P)_{\lambda,\gamma}$ such that $u_\lambda,v_\lambda$ are increasing in $\lambda$. Assume that $J$ is a measurable even kernel and $F_vG_u\ge 0$. Then, \begin{eqnarray} \label{stability} && \int_{\Omega} F_u \zeta^2 +G_v \eta^2 + 2\sqrt{F_vG_u} \zeta\eta dx \\& \le& \nonumber \frac{1}{2} \int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n}\left( \frac{1}{\lambda} [\zeta(x)- \zeta(z)]^2 +\frac{1}{\gamma} [\eta(x)- \eta(z)]^2\right) J(x-z) dz dx , \end{eqnarray} for test functions $\zeta,\eta$ so that $\zeta,\eta=0$ in $\RO$. \end{prop} \begin{proof} Since $u_\lambda,v_\lambda$ are increasing in $\lambda$, differentiating $(P)_{\lambda,\gamma}$ with respect to $\lambda$ we get \begin{eqnarray*} \mathcal L (\partial_\lambda u_\lambda) &=& F + \lambda F_u \partial_\lambda u_\lambda + \lambda F_v \partial_\lambda v_\lambda , \\ \mathcal L (\partial_\lambda v_\lambda) &=&\sigma G + \gamma G_u \partial_\lambda u_\lambda + \gamma G_v \partial_\lambda v_\lambda, \end{eqnarray*} where $u_\lambda,v_\lambda>0$. Multiply both sides with $\frac{1}{\lambda}\frac{\zeta^2}{\partial_\lambda u_\lambda}$ and $\frac{1}{\gamma}\frac{\eta^2}{\partial_\lambda v_\lambda}$ to get \begin{eqnarray*} \label{L1} \frac{1}{\lambda} \mathcal L (\partial_\lambda u_\lambda) \frac{\zeta^2}{\partial_\lambda u_\lambda} + \frac{1}{\gamma}\mathcal L (\partial_\lambda v_\lambda) \frac{\eta^2}{\partial_\lambda v_\lambda} &=& \frac{1}{\lambda} F\frac{\zeta^2}{\partial_\lambda u_\lambda} + F_u \zeta^2 + F_v \partial_\lambda v_\lambda \frac{\zeta^2}{\partial_\lambda u_\lambda} \\&& + \frac{\sigma}{\gamma} G\frac{\zeta^2}{\partial_\lambda v_\lambda} + G_v \eta^2 + G_u \partial_\lambda u_\lambda \frac{\eta^2}{\partial_\lambda v_\lambda} . \end{eqnarray*} Note that the following lower-bound holds for the left-hand side of the above equality \begin{eqnarray*} RHS& \ge & F_u \zeta^2 + G_v \eta^2 + F_v \partial_\lambda v_\lambda \frac{\zeta^2}{\partial_\lambda u_\lambda} + G_u \partial_\lambda u_\lambda \frac{\eta^2}{\partial_\lambda v_\lambda} \ge F_u \zeta^2 + G_v \eta^2 + 2 \sqrt{ F_vG_u} \zeta\eta. \end{eqnarray*} Integrating the above we end up with \begin{equation} \label{LL1} \int_{\Omega} F_u \zeta^2 + G_v \eta^2 + 2 \sqrt{ F_vG_u} \zeta\eta dx \le \int_{\mathbb R^n} \frac{1}{\lambda} L (\partial_\lambda u_\lambda) \frac{\zeta^2}{\partial_\lambda u_\lambda} + \frac{1}{\gamma} L (\partial_\lambda v_\lambda) \frac{\eta^2}{\partial_\lambda v_\lambda} dx . \end{equation} Applying Lemma \ref{fgprop}, we have \begin{eqnarray*}\label{Lphi} &&\int_{{\mathbb{R}}^n} \mathcal L (\partial_\lambda u_\lambda(x)) \frac{\zeta^2(x)}{\partial_\lambda u_\lambda(x)} dx \\&=& \frac{1}{2} \int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} [\partial_\lambda u_\lambda(x) - \partial_\lambda u_\lambda(z)] \left[ \frac{\zeta^2(x)}{\partial_\lambda u_\lambda(x)}- \frac{\zeta^2(z)}{\partial_\lambda u_\lambda(z)} \right] J(x-z) dx dz. \end{eqnarray*} Note that for $a,b,c,d\in\mathbb R$ when $ab<0$ we have \begin{equation*} (a+b)\left[ \frac{c^2}{a} + \frac{d^2}{b} \right] \le (c-d)^2 . \end{equation*} Since each $\partial_\lambda u_\lambda$ does not change sign, we have $\partial_\lambda u_\lambda(x)\partial_\lambda u_\lambda(z)>0$. Setting $a=\partial_\lambda u_\lambda(x)$, $b=-\partial_\lambda u_\lambda(z)$, $c=\zeta(x)$ and $d=\zeta(z)$ in the above inequality and from the fact that $ab=- \partial_\lambda u_\lambda(x) \partial_\lambda u_\lambda(z)<0$, we conclude \begin{equation*} [\partial_\lambda u_\lambda(x) - \partial_\lambda u_\lambda(z)] \left[ \frac{\zeta^2(x)}{\partial_\lambda u_\lambda(x)}- \frac{\zeta^2(z)}{\partial_\lambda u_\lambda(z)} \right] \le [\zeta(x)- \zeta(z)]^2 . \end{equation*} Therefore, \begin{equation*} \int_{{\mathbb{R}}^n}\mathcal L (\partial_\lambda u_\lambda(x)) \frac{\zeta^2(x)}{\partial_\lambda u_\lambda(x)} dx\le \frac{1}{2} \int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} [\zeta(x)- \zeta(z)]^2 J(z-x) dz dx. \end{equation*} This together with (\ref{LL1}) complete the proof. \end{proof} Following ideas provided in the above, we provide stability inequalities for minimal solutions of Gelfand, Lane-Emden and MEMS systems with exponential and power-type nonlinearities. \begin{cor}\label{stablein} Let $(u,v)$ be the extremal solution of system $(G)_{\lambda,\gamma}$, $(E)_{\lambda,\gamma}$ and $(M)_{\lambda,\gamma}$. Then, \begin{eqnarray}\label{stabilityG} \sqrt{\lambda\gamma} \int_{\Omega} e^{\frac{u+v}{2}}\zeta^2 dx &\le& \frac{1}{2}\int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} {|\zeta(x)- \zeta(z)|^2} J(x-z) dz dx , \\ \label{stabilityE} p\sqrt{\lambda\gamma} \int_{\Omega} (1+u)^{\frac{p-1}{2}} (1+v)^{\frac{p-1}{2}} \zeta^2 dx &\le& \frac{1}{2}\int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} {|\zeta(x)- \zeta(z)|^2} J(x-z) dz dx , \\ \label{stabilityM} p\sqrt{\lambda\gamma} \int_{\Omega} (1-u)^{-\frac{p+1}{2}} (1-v)^{-\frac{p+1}{2}} \zeta^2 dx &\le& \frac{1}{2}\int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n} {|\zeta(x)- \zeta(z)|^2} J(x-z) dz dx , \end{eqnarray} for test functions $\zeta$ so that $\zeta=0$ in $\RO$. \end{cor} \begin{cor}\label{stablein1} Let $(u,v)$ be the extremal solution of system $(H)_{\lambda,\gamma}$ when $f'g'\ge 0$. Then, \begin{eqnarray}\label{stabilityH} && \int_{\Omega} f''g \zeta^2 +fg'' \eta^2 + 2f'g' \zeta\eta dx \\& \le&\nonumber \frac{1}{2} \int_{{\mathbb{R}}^n} \int_{{\mathbb{R}}^n}\left( \frac{1}{\lambda} |\zeta(x)- \zeta(z)|^2 + \frac{1}{\gamma} |\eta(x)- \eta(z)|^2 \right) J(x-z) dz dx , \end{eqnarray} for test functions $\zeta,\eta$ so that $\zeta,\eta=0$ in $\RO$. \end{cor}
2,729
31,238
en