Search is not available for this dataset
text
string | meta
dict |
---|---|
% import packages
\documentclass[8pt,table=xcolor,usenames,dvipsnames]{beamer}
\usepackage[utf8]{inputenc}
\usepackage{colortbl}
\usepackage{ragged2e}
\usepackage{booktabs}
\usepackage{threeparttable}
\usepackage{pifont}
\usepackage{graphicx}
\usepackage{hhline}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{tikz}
\usepackage{bm}
\usepackage{etoolbox}
\usepackage[export]{adjustbox}
\usepackage[justification=centering]{caption}
\usepackage[backend=bibtex,style=authoryear,maxcitenames=2,natbib=true,maxbibnames=99]{biblatex}
% patch circles for framebreaks
% source: https://tex.stackexchange.com/a/132312
\makeatletter
\patchcmd{\slideentry}{\ifnum#2>0}{\ifnum2>0}{}{}
\patchcmd{\slideentry}{\c@subsectionslide}{\c@framenumber}{}{}
\patchcmd{\beamer@writeslidentry}{\c@subsectionslide}{\c@framenumber}{}{}
\makeatother
% set beamer parameters
\usetheme{Frankfurt}
\usecolortheme{default}
\setbeamerfont{footnote}{size=\Tiny}
\setbeamertemplate{page number in head/foot}{}
\setbeamertemplate{bibliography item}{}
\setbeamertemplate{caption}[numbered]
\setbeamercovered{transparent}
\setbeamerfont{institute}{size=\small}
\addtobeamertemplate{navigation symbols}{}{%
\usebeamerfont{footline}%
\usebeamercolor[fg]{footline}%
\hspace{2em}%
\raisebox{1.7pt}[0pt][0pt]{\insertframenumber/\inserttotalframenumber}
}
\setbeamertemplate{enumerate items}[square]
\setbeamertemplate{section in toc}[square]
\setbeamertemplate{theorems}[numbered]
% special command to uncover graphics
% source: https://tex.stackexchange.com/a/415335
\newcommand<>{\uncovergraphics}[2][{}]{
% Taken from: <https://tex.stackexchange.com/a/354033/95423>
\begin{tikzpicture}
\node[anchor=south west,inner sep=0] (B) at (4,0)
{\includegraphics[#1]{#2}}; \alt#3{}{%
\fill [draw=none, fill=white, fill opacity=0.7] (B.north west) -- (B.north
east) -- (B.south east) -- (B.south west) -- (B.north west) -- cycle; }
\end{tikzpicture}
}
% set caption parameters
\DeclareCaptionFormat{myformat}{\fontsize{6}{6}\selectfont#1#2#3}
\captionsetup{format=myformat}
\captionsetup[figure]{labelfont={bf},name={Figure}}
\captionsetup[table]{labelfont={bf},name={Table}}
% set bibliography parameters
\renewcommand\refname{Bibliography}
\addbibresource{../bibtex.bib}
\setlength\bibitemsep{1.5\itemsep}
\let\oldcitep=\citep
\renewcommand\citep[1]{{\textcolor{blue}{\oldcitep{#1}}}}
\let\oldcitet=\citet
\renewcommand\citet[1]{{\textcolor{blue}{\oldcitet{#1}}}}
% miscellaneous settings
\settowidth{\leftmargini}{\usebeamertemplate{itemize item}}
\addtolength{\leftmargini}{\labelsep}
\renewcommand{\arraystretch}{1.3}
\graphicspath{{../visuals/}}
\newcolumntype{L}[1]{>{\RaggedRight\hspace{0pt}}p{#1}}
% set admin details
\title{SoPa++: Leveraging explainability from hybridized RNN, CNN and weighted
finite-state neural architectures}
\subtitle{M.Sc. Thesis Defense}
\author{Atreya Shankar (799227), \texttt{[email protected]} \\ Cognitive Systems:
Language, Learning, and Reasoning (M.Sc.) \\ 1\textsuperscript{st} Supervisor: Dr. Sharid
Loáiciga, University of Potsdam \\ 2\textsuperscript{nd}
Supervisor: Mathias Müller, M.A., University of Zurich}
\institute{Foundations of Computational Linguistics \\ Department of Linguistics \\ University of Potsdam, SoSe 2021}
\date{July 8, 2021}
% start presentation
\begin{document}
\begin{frame}
\maketitle
\end{frame}
\begin{frame}
\frametitle{Overview}
\tableofcontents
\end{frame}
\section{Introduction}
\begin{frame}
\frametitle{Motivation}
\begin{columns}[T]
\begin{column}{.40\textwidth}
\begin{itemize}
\setlength\itemsep{1.5em}
\uncover<1>{
\item Increasingly complex deep learning models achieving
SOTA performance on ML and NLP tasks (Figure \ref{fig:nlp_progress})
\item Emerging concerns ranging from adversarial samples to unforeseen
inductive biases \citep{danilevsky2020survey,arrieta2020explainable}}
\uncover<2>{
\item \citet{schwartz2018sopa} propose an
explainable hybridized neural architecture called \textbf{So}ft
\textbf{Pa}tterns (SoPa; Figure \ref{fig:sopa_crop})
\item SoPa limited to \textbf{localized} and \textbf{indirect} explainability despite
being suited for globalized and direct \textbf{explanations by
simplification}
}
\end{itemize}
\end{column}
\hfill
\begin{column}{.60\textwidth}
\centering
\uncovergraphics<1>[width=6cm, valign=t]{pdfs/borrowed/nlp_sota_model_size_progress.pdf}
\uncover<1>{\captionof{figure}{Parameter counts of recently released pre-trained language
models; figure taken from \citet{sanh2019distilbert}}}
\label{fig:nlp_progress}
\vspace{10pt}
\uncover<2>{\fbox{\uncovergraphics<2>[width=6cm, valign=t]{pdfs/borrowed/sopa_crop.pdf}}}
\uncover<2>{\captionof{figure}{Excerpt from \citet{schwartz2018sopa}}}
\label{fig:sopa_crop}
\end{column}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Objective and research questions}
\uncover<1->{ Objective: \setlength{\leftmargini}{0.5cm}
\begin{itemize}
\item Address limitations of SoPa by proposing \textbf{SoPa++}, which
could allow for effective explanations by simplification
\end{itemize}
}
\vspace{10pt}
\uncover<2->{ Process:
\begin{itemize}
\item We study the performance and explainability of SoPa++ on the
Facebook Multilingual Task Oriented Dialog (\textbf{FMTOD}) data set from
\citet{schuster-etal-2019-cross-lingual}; focusing on the English-language
intent classification task
\end{itemize}
}
\vspace{10pt}
\uncover<3->{ Research questions:
\begin{enumerate}
\setlength\itemsep{1em}
\item Does SoPa++ provide \textbf{competitive} performance?
\item To what extent does SoPa++ contribute to \textbf{effective}
explanations by simplification?
\item What \textbf{interesting and relevant} explanations can SoPa++
provide?
\end{enumerate}
}
\end{frame}
% macro for showing TOC on each new section
\AtBeginSection[]
{
\begin{frame}
\frametitle{Overview}
\tableofcontents[currentsection]
\end{frame}
}
\section{Background concepts}
\begin{frame}
\frametitle{Explainability}
\begin{columns}[T]
\begin{column}{.40\textwidth}
\begin{itemize}
\setlength\itemsep{1.5em} \uncover<1>{
\item \citet{arrieta2020explainable} conduct literature review from
$\sim$400 XAI publications
\item Transparency is a passive feature $\Rightarrow$ transparent
and black-box models
\item Explainability is an active feature that involves target
audiences (Figure \ref{fig:xai_target_audience})} \uncover<2>{
\item Explainability techniques include local explanations, feature
relevance and \textbf{explanations by simplification}
\item Explainability techniques provide meaningful insights into
decision boundaries (Figure \ref{fig:lime_husky})}
\end{itemize}
\end{column}
\hfill
\begin{column}{.60\textwidth}
\centering \uncovergraphics<1>[width=6.2cm,trim={0.3cm 0.3cm 0.5cm
0.3cm},clip,valign=t]{pdfs/borrowed/xai_target_audience.pdf}
\uncover<1>{\captionof{figure}{Examples of various target audiences in
XAI; figure taken from \citet{arrieta2020explainable}}}
\label{fig:xai_target_audience}
\vspace{5pt} \uncovergraphics<2>[width=6cm,trim={0.1cm 0.1cm 0.1cm
0.1cm},clip,valign=t]{pdfs/borrowed/lime_husky.pdf}
\uncover<2>{\captionof{figure}{Local explanation for ``Wolf''
classification decision, figure taken from \citet{lime}}}
\label{fig:lime_husky}
\end{column}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{SoPa: Computational graph}
\centering \captionsetup{width=9cm} \uncovergraphics<1->[width=8cm,
valign=t]{pdfs/generated/generic_nfa_linear_chain/main.pdf}
\uncover<1->{\captionof{figure}{Weighted finite-state automaton (WFA) slice: FA with self-loop
(blue), $\epsilon$ (red) and main-path (black) transitions; figure adapted
from \citet{schwartz2018sopa}}}
\label{fig:fa}
\vspace{10pt} \uncovergraphics<2>[width=8cm,
valign=t]{pdfs/borrowed/sopa_computational_graph.pdf}
\uncover<2>{\captionof{figure}{SoPa's partial computational graph; figure
taken from \citet{schwartz2018sopa}}}
\label{fig:sopa}
\end{frame}
\begin{frame}
\frametitle{SoPa: Explainability techniques}
\begin{columns}[T]
\begin{column}{.40\textwidth}
\begin{itemize}
\setlength\itemsep{1.7em}
\uncover<1->{
\item Two explainability techniques; namely \textbf{local
explanations} and \textbf{feature relevance}
\item Local explanations find highest scoring phrases (Figure
\ref{fig:sopa_local_explanations})}
\uncover<2>{
\item Feature relevance perturbs inputs to determine the highest
impact phrases (Figure \ref{fig:sopa_feature_relevance})
\item Both techniques are \textbf{localized} and
\textbf{indirect}
\item WFAs have a rich theoretical background which can be exploited
for direct and globalized explanations
}
\end{itemize}
\end{column}
\hfill
\begin{column}{.60\textwidth}
\centering
\uncovergraphics<1->[width=6cm,valign=t]{pdfs/borrowed/sopa_local_explanations.pdf}
\uncover<1->{\captionof{figure}{Ranked local explanations from SoPa;
table taken from \citet{schwartz2018sopa}}}
\label{fig:sopa_local_explanations}
\vspace{5pt}
\uncovergraphics<2>[width=6cm,trim={0cm 2.3cm 0cm
0cm},clip,valign=t]{pdfs/borrowed/sopa_feature_relevance.pdf}
\uncover<2>{\captionof{figure}{Feature relevance outputs from SoPa;
table taken from \citet{schwartz2018sopa}}}
\label{fig:sopa_feature_relevance}
\end{column}
\end{columns}
\end{frame}
\section{Data and methodologies}
\begin{frame}
\frametitle{FMTOD: Summary statistics}
\begin{table}
\small
\centering
\begin{threeparttable}
\begin{tabular}{llll}
\toprule
Class and description & \uncover<3>{Frequency} & \uncover<2->{Utterance length$^{\dagger}$ & Example$^{\ddagger}$}\\
\midrule
0: \texttt{alarm/cancel\_alarm} & \uncover<3>{\cellcolor{red!5}1791} & \uncover<2->{5.6 $\pm$ 1.9 & cancel weekly alarm} \\
1: \texttt{alarm/modify\_alarm} & \uncover<3>{\cellcolor{red!2}566} & \uncover<2->{7.1 $\pm$ 2.5 & change alarm time} \\
2: \texttt{alarm/set\_alarm} & \uncover<3>{\cellcolor{red!17}5416} & \uncover<2->{7.5 $\pm$ 2.5 & please set the new alarm} \\
3: \texttt{alarm/show\_alarms} & \uncover<3>{\cellcolor{red!3}914} & \uncover<2->{6.9 $\pm$ 2.2 & check my alarms.} \\
4: \texttt{alarm/snooze\_alarm} & \uncover<3>{\cellcolor{red!1}366} & \uncover<2->{6.1 $\pm$ 2.1 & pause alarm please} \\
5: \texttt{alarm/time\_left\_on\_alarm} & \uncover<3>{\cellcolor{red!1}344} & \uncover<2->{8.6 $\pm$ 2.1 & minutes left on my alarm} \\
6: \texttt{reminder/cancel\_reminder} & \uncover<3>{\cellcolor{red!3}1060} & \uncover<2->{6.6 $\pm$ 2.2 & clear all reminders.} \\
7: \texttt{reminder/set\_reminder} & \uncover<3>{\cellcolor{red!17}5549} & \uncover<2->{8.9 $\pm$ 2.5 & birthday reminders} \\
8: \texttt{reminder/show\_reminders} & \uncover<3>{\cellcolor{red!2}773} & \uncover<2->{6.8 $\pm$ 2.2 & list all reminders} \\
9: \texttt{weather/check\_sunrise} & \uncover<3>{\cellcolor{red!1}101} & \uncover<2->{6.7 $\pm$ 1.7 & when is sunrise} \\
10: \texttt{weather/check\_sunset} & \uncover<3>{\cellcolor{red!1}136} & \uncover<2->{6.7 $\pm$ 1.7 & when is dusk} \\
11: \texttt{weather/find} & \uncover<3>{\cellcolor{red!45}14338} & \uncover<2->{7.8 $\pm$ 2.3 & jacket needed?} \\
\hline \hline \\[-10pt]
$\Sigma/\mu$ & \uncover<3>{31354} & \uncover<2->{7.7 $\pm$ 2.5 & \textemdash} \\
\bottomrule
\end{tabular}
\begin{tablenotes}[flushleft]
\footnotesize
\uncover<2->{\item $^{\dagger}$Summary statistics follow the mean $\pm$
standard-deviation format
\item $^{\ddagger}$Short and simple examples were chosen for brevity and
formatting purposes}
\end{tablenotes}
\end{threeparttable}
\caption{Summary statistics and examples for the preprocessed
FMTOD data set}
\label{tab:fmtod_examples}
\end{table}
\end{frame}
\begin{frame}
\frametitle{SoPa++: WFA-$\omega$ and TauSTE}
\centering
\captionsetup{width=8cm}
\uncovergraphics<1>[width=7cm,valign=t]{pdfs/generated/w_nfa_linear_chain/main.pdf}
\uncover<1>{\captionof{figure}{WFA-$\omega$ slice: FA with
$\omega$ (blue) and main-path (black) transitions}}
\label{fig:omega_fa}
\vspace{10pt}
\begin{columns}[T]
\begin{column}{.37\textwidth}
\uncover<2>{\begin{equation*}
\footnotesize
\label{eq:tau_ste_forward}
\text{TauSTE}(x)=
\begin{cases}
1 & x \in (\tau, +\infty) \\
0 & x \in (-\infty, \tau]
\end{cases}
\end{equation*}
\begin{equation*}
\footnotesize
\label{eq:tau_ste_backward}
\text{TauSTE}'(x)=
\begin{cases}
1 & x \in (1, +\infty) \\
x & x \in [-1, 1] \\
-1 & x \in (-\infty, -1) \\
\end{cases}
\end{equation*}
\begin{itemize}
\small
\item $\text{TauSTE}'(x)$ implies the backward pass and \textbf{not} the gradient
in this context
\item Flavors of STEs are being extensively researched, such as in \citet{yin2019understanding}
\end{itemize}
}
\end{column}
\begin{column}{.63\textwidth}
\centering
\uncovergraphics<2>[width=6.7cm,valign=t]{pdfs/generated/tau_ste_applied/main.pdf}
\uncover<2>{\captionof{figure}{TauSTE's forward and backward passes}}
\label{fig:tau_ste}
\end{column}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{SoPa++: Computational graph}
\begin{figure}
\centering
\begin{tikzpicture}
\node[anchor=south west,inner sep=0] at (0,0) {\includegraphics[width=8.5cm]{pdfs/generated/spp_computational_graph/main.pdf}};
\path<1>[fill=white, fill opacity=0.7] (0,3.75) rectangle (7,7.3);
\path<2>[fill=white, fill opacity=0.7] (3.125,4.1) rectangle (7,7.3);
\path<3>[fill=white, fill opacity=0.7] (4,4.1) rectangle (7,7.3);
\path<4>[fill=white, fill opacity=0.7] (0.1,0.1) rectangle (0.1,0.1);
\path<5>[fill=white, fill opacity=0.7] (0,4.3) rectangle (3.125,7.3);
\path<5>[fill=white, fill opacity=0.7] (3.85,4.3) rectangle (7,7.3);
\end{tikzpicture}
\caption{SoPa++ computational graph; flow of graph is
from bottom to top and left to right}
\label{fig:spp_computational_graph}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{SoPa++: Regular Expression (RE) proxy}
\begin{figure}
\centering
\begin{tikzpicture}
\node[anchor=south west,inner sep=0] at (0,0) {\includegraphics[width=8cm]{pdfs/generated/regex_computational_graph/main.pdf}};
\path<1>[fill=white, fill opacity=0.7] (0,2.7) rectangle (8,7.2);
\path<2>[fill=white, fill opacity=0.7] (0,3.525) rectangle (8,7.2);
\path<3>[fill=white, fill opacity=0.7] (2,4.3) rectangle (8,7.2);
\path<4>[fill=white, fill opacity=0.7] (0.1,0.1) rectangle (0.1,0.1);
\end{tikzpicture}
\caption{RE proxy computational graph; flow of graph is
from bottom to top and left to right}
\label{fig:regex_computational_graph}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{SoPa vs. SoPa++}
\begin{table}[t!]
\centering \def\arraystretch{1.5}
\begin{tabular}{L{0.275\linewidth} L{0.3\linewidth} L{0.3\linewidth}}
\toprule
Characteristic & SoPa & SoPa++ \\
\midrule
Text casing & True-cased & Lower-cased \\
Token embeddings & GloVe 840B 300-dimensions & GloVe 6B 300-dimensions \\
\uncover<2->{\textbf{WFAs} & WFAs with $\epsilon$, self-loop and main-path transitions & WFA-$\omega$'s with $\omega$ and main-path transitions} \\
\uncover<3->{\textbf{Hidden layers} & Multi-layer perceptron after max-pooling & Layer normalization, TauSTE and linear transformation after max-pooling} \\
\uncover<4> {\textbf{Explainability \qquad technique(s)} & Local explanations, feature relevance & Explanations by simplification} \\
\bottomrule
\end{tabular}
\caption{Summarized differences for SoPa vs. SoPa++}
\label{tab:sopa_spp_comparison}
\end{table}
\end{frame}
\begin{frame}
\frametitle{Research Question 1: Competitive performance}
\uncover<2>{\begin{table}[t!]
\centering
\begin{tabular}{lll}
\toprule
Model size & Patterns hyperparameter $P$ & Parameter count \\
\midrule
Small & \texttt{6-10\_5-10\_4-10\_3-10} & 1,260,292 \\
Medium & \texttt{6-25\_5-25\_4-25\_3-25} & 1,351,612 \\
Large & \texttt{6-50\_5-50\_4-50\_3-50} & 1,503,812 \\
\bottomrule
\end{tabular}
\caption{Three different SoPa++ model sizes used during training}
\label{tab:model_types}
\end{table}}
\begin{itemize}
\setlength\itemsep{1em}
\uncover<1>{\item RQ 1: Does SoPa++ provide \textbf{competitive} performance?
\item Competitive accuracy range: \bm{$96.6-99.5\%$}
\citep{schuster-etal-2019-cross-lingual,zhang2019joint,zhang-etal-2020-intent}}
\uncover<2>{
\item Upsampling minority classes to mitigate data imbalance
\item Grid-search with three model sizes, varying $\tau$-thresholds: $\{0.00, 0.25, 0.50,
0.75, 1.00\}$ and 10 random seed iterations
\item $3 \times 5 \times 10 = 150$ model runs
\item Evaluation and comparison on the test set}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Research Question 2: Effective explanations by simplification}
\begin{itemize}
\setlength\itemsep{1em}
\uncover<1>{\item RQ 2: To what extent does SoPa++ contribute to \textbf{effective}
explanations by simplification?
\item Effective explanations by simplification require \textbf{simpler model},
\textbf{similar performance} and \textbf{maximizing resemblance} \citep{arrieta2020explainable}
\item Similar performance $\Rightarrow$ compare test set evaluations
\item Maximum resemblance $\Rightarrow$ minimum distances over test set}
\uncover<2>{\item Softmax distance norm:
\begin{equation*}
\delta_{\sigma}(\bm{y}) = \left\Vert \bm{\sigma_{\mathcal{S}}} - \bm{\sigma_{\mathcal{R}}} \right\Vert_{2} = \sqrt{\sum^n_{i=1} (\sigma_{\mathcal{S}_i} - \sigma_{\mathcal{R}_i})^2}
\end{equation*}
\item Binary-misalignment rate:
\begin{equation*}
\delta_b(\bm{y}) = \dfrac{\left\Vert \bm{b_{\mathcal{S}}} - \bm{b_{\mathcal{R}}} \right\Vert_{1}}{dim(\bm{b_{\mathcal{S}}} - \bm{b_{\mathcal{R}}})} = \dfrac{\sum^n_{i=1} |b_{\mathcal{S}_i} - b_{\mathcal{R}_i}|}{{dim(\bm{b_{\mathcal{S}}} - \bm{b_{\mathcal{R}}})}}
\end{equation*}}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Research Question 3: Interesting and relevant explanations}
\begin{itemize}
\setlength\itemsep{2em}
\uncover<1>{\item RQ 3: What \textbf{interesting and relevant} explanations can SoPa++
provide?
\item Open-ended question, can answer in different ways}
\uncover<2>{\item Capitalize on the new linear layer $\Rightarrow$ allows for direct
analysis of relative linear weights
\item Sample REs from RE lookup layer corresponding to salient TauSTE
neurons
\item Analyze REs for interesting linguistic features and inductive biases}
\end{itemize}
\end{frame}
\section{Results}
\begin{frame}
\frametitle{Research Question 1: Competitive performance}
\centering
\uncovergraphics<1->[width=9cm,valign=t]{pdfs/generated/train_spp_grid_1624885671.pdf}
\captionof{figure}{Validation accuracies of SoPa++ models against training updates}
\label{fig:results_training}
\uncover<2>{\begin{table}
\centering \def\arraystretch{1.3}
\tiny
\begin{tabular}{lllllll}
\toprule
&& \multicolumn{5}{c}{Accuracy in $\%$ with mean $\pm$ standard-deviation} \\[2pt]
\cline{3-7} \\[-7pt]
Size & Parameters & $\tau$=0.00 & $\tau$=0.25 & $\tau$=0.50 & $\tau$=0.75 & $\tau$=1.00 \\
\midrule
Small & 1,260,292 & \bm{$97.6 \pm 0.2$} & 97.6 $\pm$ 0.2 & 97.3 $\pm$ 0.2 & 97.0 $\pm$ 0.3 & 96.9 $\pm$ 0.3 \\
Medium & 1,351,612 & \bm{$98.3 \pm 0.2$} & 98.1 $\pm$ 0.1 & 98.0 $\pm$ 0.2 & 97.9 $\pm$ 0.1 & 97.7 $\pm$ 0.1 \\
Large & 1,503,812 & \bm{$98.3 \pm 0.2$} & 98.3 $\pm$ 0.2 & 98.2 $\pm$ 0.2 & 98.1 $\pm$ 0.2 & 98.0 $\pm$ 0.2 \\
\bottomrule
\end{tabular}
\caption{Test accuracies of SoPa++ models}
\label{tab:results_evaluation}
\end{table}}
\end{frame}
\begin{frame}
\frametitle{Research Question 2: Effective explanations by simplification}
\begin{figure}
\centering
\begin{tikzpicture}
\node[anchor=south west,inner sep=0] at (0,0)
{\includegraphics[width=10.5cm]{pdfs/generated/evaluate_spp_grid_1618059389.pdf}
};
\path<1>[fill=white, fill opacity=0.7] (0.4,1.325) rectangle (10.5,3.9);
\path<2>[fill=white, fill opacity=0.7] (0.4,3.9) rectangle (10.5,7);
\end{tikzpicture}
\caption{Visualization of model-pair accuracies and distance metrics}
\label{fig:explain_evaluate}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Research Question 3: Interesting and relevant explanations}
\begin{figure}
\centering
\begin{tikzpicture}
\node[anchor=south west,inner sep=0] at (0,0) {\includegraphics[width=10cm]{pdfs/generated/neurons_1618067685.pdf}};
\path<1>[fill=white, fill opacity=0.7] (0,0.7) rectangle (10,5.65);
\draw<3>[red] (7,3.2) rectangle (8,4.45);
\end{tikzpicture}
\vspace{5pt}
\caption{Relative linear layer weights applied to TauSTE neurons for the best
performing small RE proxy model with a test accuracy of 97.4$\%$}
\label{fig:neuron_weights}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Research Question 3: Interesting and relevant explanations}
\begin{figure}
\centering \includegraphics[trim={1.1cm 20.5cm 1.1cm
1.1cm},clip,height=4.9cm,valign=t]{pdfs/generated/neurons_regex_1618067722/activating_regex_sample_17.pdf}
\hfill \includegraphics[trim={1.1cm 1.1cm 1.1cm
24.5cm},clip,height=4.9cm,valign=t]{pdfs/generated/neurons_regex_1618067722/activating_regex_sample_17.pdf}
\vspace{5pt}
\caption{Ten sampled regular expressions from the RE lookup layer
corresponding to TauSTE neuron 17 for the best performing small RE proxy
model}
\label{fig:regex_example_neuron_weather}
\end{figure}
\end{frame}
\section{Discussion}
\begin{frame}
\frametitle{Research Question 1: Competitive performance}
\uncover<1->{Overview: \setlength{\leftmargini}{0.5cm}
\begin{itemize}
\setlength\itemsep{1em}
\item RQ 1: Does SoPa++ provide \textbf{competitive} performance?
\item Competitive accuracy range: $96.6-99.5\%$
\citep{schuster-etal-2019-cross-lingual,zhang2019joint,zhang-etal-2020-intent}
\item Observed best accuracy range: \bm{$97.6-98.3\%$}
\item SoPa++ offers \textbf{competitive} performance on FMTOD's English language
intent detection task
\end{itemize}}
\vspace{15pt}
\uncover<2>{Discussion: \setlength{\leftmargini}{0.5cm}
\begin{itemize}
\setlength\itemsep{1em}
\item Other studies worked with true-cased text
\item Observed performance is in the middle of competitive range
\item Worth noting the sizes of competitive BERT-derived models with
external data
\end{itemize}}
\end{frame}
\begin{frame}
\frametitle{Research Question 2: Effective explanations by simplification}
\uncover<1->{Overview: \setlength{\leftmargini}{0.5cm}
\begin{itemize}
\setlength\itemsep{1em}
\item RQ 2: To what extent does SoPa++ contribute to \textbf{effective}
explanations by simplification?
\item Effective explanations by simplification require simpler model,
similar performance and maximizing resemblance
\item \textbf{Effective} to the extent of: lowest accuracy differences ranging
from \bm{$0.1-0.7\%$} and softmax distance norms ranging from \bm{$4.3-10.0\%$}
\item Most effective for medium-large sized models with $\tau \in [0.50, 1.00]$
\end{itemize}}
\vspace{15pt}
\uncover<2>{Discussion: \setlength{\leftmargini}{0.5cm}
\begin{itemize}
\setlength\itemsep{1em}
\item No benchmark for effective explanations by simplification
\item RE proxy may not necessarily always be transparent given size of RE
lookup layer
\item Target audience was omitted in this analysis
\end{itemize}}
\end{frame}
\begin{frame}
\frametitle{Research Question 3: Interesting and relevant explanations}
\uncover<1->{Overview: \setlength{\leftmargini}{0.5cm}
\begin{itemize}
\setlength\itemsep{1em}
\item RQ 3: What \textbf{interesting and relevant} explanations can SoPa++
provide?
\item Similar lexical properties in branches
\item USA-centric inductive biases
\item Pronoun-level inductive biases
\end{itemize}}
\vspace{10pt}
\uncover<2->{Discussion:}
\begin{figure}
\centering
\uncovergraphics<2>[trim={1.1cm 28.5cm 1.1cm
9cm},clip,width=5cm]{pdfs/generated/neurons_regex_1618067722/activating_regex_sample_17.pdf}
\raisebox{0.37cm}{\uncovergraphics<3>[trim={1.1cm 9cm 1.1cm
32cm},clip,width=5cm]{pdfs/generated/neurons_regex_1618067722/activating_regex_sample_17.pdf}}
\vspace{5pt}
\uncovergraphics<4>[trim={1.1cm 1.1cm 1.1cm
40cm},clip,width=5cm]{pdfs/generated/neurons_regex_1618067722/activating_regex_sample_17.pdf}
\uncover<2->{\caption{Sampled regular expressions from the RE lookup layer
corresponding to TauSTE neuron 17 for the best performing small RE proxy
model}}
\label{fig:regex_more_examples}
\end{figure}
\end{frame}
\section{Conclusions}
\begin{frame}
\frametitle{Conclusions}
Objective: \setlength{\leftmargini}{0.5cm}
\begin{itemize}
\item Address limitations of SoPa by proposing \textbf{SoPa++}, which could
allow for effective explanations by simplification
\uncover<2->{\large\color{ForestGreen}\checkmark}
\end{itemize}
\vspace{15pt}
Research questions:
\begin{enumerate}
\setlength\itemsep{1.5em}
\item Does SoPa++ provide \textbf{competitive} performance? \uncover<3->{
\begin{itemize}
\item Best accuracy range: \bm{$97.6-98.3\%$}
\large\color{ForestGreen}\checkmark
\end{itemize}}
\item To what extent does SoPa++ contribute to \textbf{effective}
explanations by simplification?
\begin{itemize}
\setlength\itemsep{0.5em} \uncover<4->{
\item Lowest accuracy differences ranging from \bm{$0.1-0.7\%$} and
softmax distance norms ranging from \bm{$4.3-10.0\%$}
{\large\color{ForestGreen}\checkmark}} \uncover<5->{
\item Target audience analysis omitted {\large\color{orange}\ding{55}}}
\end{itemize}
\item What \textbf{interesting and relevant} explanations can SoPa++
provide?
\begin{itemize}
\setlength\itemsep{0.5em} \uncover<6->{\item Regular expression samples
from salient TauSTE neurons analyzed
{\large\color{ForestGreen}\checkmark}
\item Linguistic features and inductive biases
{\large\color{ForestGreen}\checkmark}} \uncover<7->{\item Small sample
size {\large\color{orange}\ding{55}}}
\end{itemize}
\end{enumerate}
\end{frame}
\section{Further work}
\begin{frame}
\frametitle{Further work}
\uncover<1>{ Explainability: \setlength{\leftmargini}{0.5cm}
\begin{itemize}
\setlength\itemsep{0.5em}
\item Are SoPa++'s explanations \textbf{useful} for its target audience?
\end{itemize}
}
\vspace{10pt}
\uncover<2>{ Bias correction:
\begin{itemize}
\setlength\itemsep{0.5em}
\item Manual bias corrections through large-scale analysis of RE lookup layer
\item Mitigate \textbf{ethical} issues of using black-box models?
\end{itemize}
}
\vspace{10pt}
\uncover<3>{ Generalization:
\begin{itemize}
\setlength\itemsep{0.5em}
\item Possible to generalize branches with broad categories like
locations and numbers
\item For example, replace digital tokens with
\texttt{\textbackslash-?[\textbackslash d]+\textbackslash
.?[\textbackslash d]*}
\item \textbf{Robustness} on unseen data?
\end{itemize}
}
\vspace{10pt}
\uncover<4>{ Efficiency:
\begin{itemize}
\setlength\itemsep{0.5em}
\item \textbf{Parallelize} RE lookup layer
\item Utilize GPU-based regular expression matching algorithms
\citep{wang2011gregex,zu2012gpu,yu2013gpu}
\end{itemize}
}
\end{frame}
\begin{frame}{}
\frametitle{\null}
\centering
\Huge
\textit{Thank you for your time and attention} \ensuremath\heartsuit
\end{frame}
\section*{Bibliography}
\begin{frame}[allowframebreaks]
\frametitle{Bibliography}
\printbibliography[title = {Bibliography}, heading=none]
\end{frame}
\section*{Appendix}
\begin{frame}
\frametitle{Weighted Finite-State Automaton (WFA)}
\begin{definition}[Semiring; \citealt{kuich1986linear}]
\label{def:semiring} A semiring is a set $\mathbb{K}$ along with two
binary associative operations $\oplus$ (addition) and $\otimes$
(multiplication) and two identity elements: $\bar{0}$ for addition and
$\bar{1}$ for multiplication. Semirings require that addition is
commutative, multiplication distributes over addition, and that
multiplication by $\bar{0}$ annihilates, i.e., $\bar{0} \otimes a = a
\otimes \bar{0} = \bar{0}$.
\begin{itemize}
\item Semirings follow the following generic notation: $\langle
\mathbb{K}, \oplus, \otimes, \bar{0}, \bar{1} \rangle$.
\item \textbf{Max-sum} semiring: $\langle \mathbb{R} \cup \{-\infty\},
\text{max}, +, -\infty, 0 \rangle$
\item \textbf{Max-product} semiring: $\langle \mathbb{R}_{>0} \cup
\{-\infty\}, \text{max}, \times, -\infty, 1 \rangle$
\end{itemize}
\end{definition}
\begin{definition}[Weighted finite-state automaton;
\citealt{peng2018rational}]
\label{def:wfa} A weighted finite-state automaton over a semiring
$\mathbb{K}$ is a 5-tuple $\mathcal{A} = \langle \Sigma, \mathcal{Q},
\bm{\Gamma}, \bm{\lambda}, \bm{\rho} \rangle$, with:
\begin{itemize} \itemsep0em
\item[--] a finite input alphabet $\Sigma$;
\item[--] a finite state set $\mathcal{Q}$;
\item[--] transition matrix $\bm{\Gamma}: \mathcal{Q} \times
\mathcal{Q} \times (\Sigma \cup \{\epsilon\}) \rightarrow \mathbb{K}$;
\item[--] initial vector $\bm{\lambda}: \mathcal{Q} \rightarrow
\mathbb{K}$;
\item[--] and final vector $\bm{\rho}: \mathcal{Q} \rightarrow
\mathbb{K}$.
\end{itemize}
\end{definition}
\end{frame}
\begin{frame}
\frametitle{Explainability evaluation guidelines}
How do we estimate quality of explanations? \setlength{\leftmargini}{0.5cm}
\begin{itemize}
\item Difficult to evaluate due to subjectivity
\item Involves cognitive sciences, sociology and human psychology
\item Or at the simplest, a survey of target audience
\end{itemize}
\vspace{15pt}
\citet{arrieta2020explainable} and \citet{MILLER20191} provide three
guidelines for this:
\begin{enumerate}
\setlength\itemsep{1em}
\item Constrictive
\begin{itemize}
\item Why is decision X $>$ decision Y?
\end{itemize}
\item Causal
\begin{itemize}
\item What caused the model to choose decision X?
\item Discrete causes over probabilities
\end{itemize}
\item Selective
\begin{itemize}
\item Rank possible explanations
\item Provide the most salient explanation
\end{itemize}
\end{enumerate}
\end{frame}
\end{document}
% LocalWords: explainability Atreya Shankar Sharid Loáiciga nd Müller SoSe NLP WFA
% LocalWords: Semiring semiring Semirings SoPa's WFAs preprocessed TauSTE STEs
% LocalWords: TauSTE's embeddings GloVe WFA's perceptron hyperparameter RQ REs
% LocalWords: Upsampling Softmax accuracies FMTOD's softmax centric SOTA NLP
% LocalWords: Parallelize XAI FMTOD TOC
| {
"alphanum_fraction": 0.6761574497,
"avg_line_length": 39.4645858343,
"ext": "tex",
"hexsha": "8a25534d009a95e39a0aa418bc393a8409c983e3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c959b837591cc1980d057a67f682e00b1f3e8e37",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "atreyasha/spp-explainability",
"max_forks_repo_path": "docs/defense/main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c959b837591cc1980d057a67f682e00b1f3e8e37",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "atreyasha/spp-explainability",
"max_issues_repo_path": "docs/defense/main.tex",
"max_line_length": 270,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "c959b837591cc1980d057a67f682e00b1f3e8e37",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "atreyasha/spp-explainability",
"max_stars_repo_path": "docs/defense/main.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-01T00:41:36.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-01T00:41:36.000Z",
"num_tokens": 10586,
"size": 32874
} |
\documentclass[a4paper,10pt]{article}
\usepackage{graphicx}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{latexsym}
\usepackage{enumerate}
\usepackage{mdwlist}
% Set some parameters to make the document look better.
\setlength{\parskip}{0.25pc}
%\setlength{\parindent}{0pt}
%opening
%\title{An Exploration of HQSOMs}
\title{An Exploration of Hierarchical Quilted Self-Organizing Maps}
\author{Theodore Hilk, Joseph Lynch}
\begin{document}
\maketitle
\begin{abstract}
\noindent We explore the paper ``Biomimetic sensory abstraction using hierarchical quilted
self-organizing maps," by J. W. Miller and P. H. Lommel of the Charles Stark Draper Laboratory. We
place the paper in historical context, provide a high-level summary, and briefly review related
literature. We give both a qualitative overview and a specific mathematical formulation of the
hierarchical quilted self-organizing map (HQSOM) model, and provide a thorough, reproducible record
of our design and implementation process for a Python-based HQSOM framework. We present our results
from conducting each of the experiments described in Miller and Lommel's paper, which involved the
classification of various shapes invariant to shift and scale transformations in 3x3- and 7x7-pixel
fields. We obtain results consistent with those of the authors, albeit noting considerable
dependence on parameter values and the ambiguity of a certain key formula in the paper.
Subsequently, we develop and test several improvements to the HQSOM algorithm itself. Some
result in outperforming the original in classification tasks, especially when noise is introduced.
We then implement preprocessing adaptations to allow us to apply both the original and improved
HQSOM algorithms to audio, and we succeed in developing a genre classifier with excellent
out-of-sample performance. We conclude by describing further possibilities for extension and
discussing the broader significance of this line of research.
\end{abstract}
\section{Introduction and Motivation}
Biologically-motivated algorithms have a long history in artificial intelligence and machine
learning. Indeed, AI as a field was born out of attempts to simulate human intelligence using
artificial systems\cite{AIHistory1}, and one of the earliest techniques, the perceptron, was
explicitly based on a simplified model of a biological neuron.\cite{Rosenblatt}
Although symbolic approaches enjoyed much more prominence following the publication of papers
recognizing the limitations of early neural networks in the early 1960s\cite{DreyfusMind}, their own
difficulties in dealing with problems of perception, learning, and pattern recognition led to a
resurgence of interest in sub-symbolic systems from the 1980s onward.\cite{AIHistory1, AIHistory2,
MLHistory1} It was during this period that the field of machine learning gained substantial
prominence, focusing on pattern recognition, classification, and adaptive control, and placing a
heavy emphasis on quantitative and often statistical methods as opposed to symbolic and logical
ones.\cite{MLHistory1}
Beginning largely in the early 1990s, collaboration in turn between machine learning researchers and
neuroscientists produced models of neurological and cognitive systems demonstrating both strong
concordance with empirical observations and excellent performance in the tasks addressed by those
same systems.\cite{Poggio} Investigation in the area from that time onward has focused on the theory
and simulation of increasingly substantial and complex aspects of human neuroscience, with a
particular emphasis on neocortical functionality, the problem of vision, and the use of
hierarchical, invariant representations for sensory stimuli.\cite{Poggio, HQSOM} The significance of
such modeling efforts can hardly be overstated: they have led to new findings in neuroscience,
improvements in brain-computer interface technology, and perhaps most profoundly, a new paradigm in
artificial intelligence research.\cite{HQSOM, OnIntelligence}
Currently, the most prominent of these models include the Neocognitron (Fukushima), HMAX
(Riesenhuber, Poggio, Serre), the Neural Abstraction Pyramid (Behnke), Hierarchical-Temporal Memory
(HTM) (George, Hawkins), Adaptive Resonance Theory (Carpenter, Grossberg), and VisNet (Stringer,
Rolls).\cite{HQSOM} With the exception of HTM, each of these approaches either relies on substantial
hard-coded \em a priori \em knowledge specific to vision, neglects the role of temporal associations
in learning, or requires separate training and execution phases in use.\cite{HQSOM, HTMAlgo} HTM in
turn involves complex and inelegant models of ``neurons", though they bear little resemblence to
those modeled by computational biologists, and at its core is nothing more than a recurrent
spatio-temporal clustering algorithm incorporating interlayer feedback to provide auto-associative
prediction capabilities.\cite{HTMAlgo}
Our goals in undertaking this project were to identify and generalize the essential aspects of
existing hierarchical cortical models, and to extend and improve upon techniques from the
literature. Therefore, we chose the simplest and most general such model we could find that was not
subject to the above-enumerated limitations. This was the Hierarchical Quilted Self-Organizing Map
(HQSOM) model developed by Miller and Lommel, which we have succeeded in analyzing, reproducing,
testing, improving, and extending.
Specifically, we have implemented the HQSOM model in numerical Python, conducted the same visual
shape recognition experiments as the paper's authors over a wide range of parameter values, and
succeeded in reproducing their results in spite of ambiguity surrounding a particular key formula in
the paper. Further, we have characterized the significance and implications of the HQSOM's
parameters, improved the noise performance of the algorithm by including a mean-squared-error-based
activation function, and enhanced its separability characteristics by using a peripherally
inhibitory "Mexican hat" neighborhood function instead of the standard gaussian. We have also
improved its convergence rate without
compromising representation quality, through the incorporation of an adaptive learning rate that is
applied to the best-matching input
prototype when a given input differs greatly from all current prototypical
representations. We have also improved the efficiency of the training process by
creating a "reset" function, which both eliminates the need for blank input sequences between
successive training sequences, and ensures that no residual effects can carry over between these
training sequences. Finally, we have extended our implementation to represent and classify
audio, and successfully built a system to learn and classify music based on genre which performs
very well out-of-sample. We believe that even greater potential exists for the improvement and
generalization of this approach, and we highlight some of our views on the matter in our conclusion
below.
\section{Paper and Summary}
We chose to replicate the paper ``Biomimetic sensory abstraction using hierarchical quilted
self-organizing maps," by Jeffrey W. Miller and Peter H. Lommel of the Draper Laboratory. The paper
develops a novel classification algorithm called a hierarchical quilted self-organizing map (HQSOM),
then applies it to two simple visual processing tasks. The motivation for this approach lies in the
desire to model the operation of the human visual cortex in a way that improves upon the realism of
earlier techniques by: relying more on learning and less on hard-coded \em a priori \em knowledge,
using temporal associations and not just spatial ones to construct representations, and learning
on-line instead of requiring a separate training period.
The paper discusses several benefits of modeling the brain, noting that more accurate models have
allowed for the creation of improved AI techniques, neurophysiological discoveries, and even better
brain-computer interfaces. The authors choose to focus on vision in particular because it has such
rich applications in software and is handled so easily by animals.
They then discuss various aspects of brain structure and function, noting that the isocortex,
comprising about 85\% of the brain in humans, is responsible most sensory processing, motor control,
language, logic, mathematics, and spatial visualization. In spite of this, it appears to have a
uniform, hierarchical structure throughout its various functional regions, and a substantial portion
of the contemporary cognitive neuroscience community posits that the elements of this structure
perform essentially the same information processing operations throughout the cortex.
Examining the ventral stream of the visual cortex in particular, the authors note that its regions
are wired in a hierarchy. At the base, V1 neurons respond to small lines of various orientations in
their respective and narrow portions of the visual field, while neurons in the higher V2 region
respond to somewhat less localized simple shapes, those of the even higher V4 region respond to more
complex shapes across larger areas, and those in the IT region respond invariantly to complex
objects throughout the entire field of view. The authors then mention a few similar cortical models,
noting the above limitations in realism. Such alternative models are described in more detail in our
literature review below.
Miller and Lommel define the goals for their own model in terms of biological functional
equivalence, generalizability across multiple sensory domains, and simplicity. They specifically
note that computational efficiency was only a secondary goal. In pursuing functional equivalence,
the authors take the explicit position that there exists some basic "building block" or unit of the
cortex beyond which no further details must be modeled, so long as the abstract functionality of the
unit is properly preserved.
They describe this functionality in terms of the unsupervised learning of abstract patterns in the
input data, and of the recurrent learning of patterns in the sequences of inputs patterns at lower
levels of the hierarchy. They propose that this reduction of sensory input to abstract concepts may
be conceived in terms of the related processes of spatial and temporal clustering, or "pooling".
Spatial pooling learns patterns of input data that are spatially co-incident at a given level of the
hierarchy, while temporal pooling learns patterns that tend to occur near one another in time.
The authors note that numerous algorithms exist for spatial clustering, including K-means, SOMs,
expectation maximization, winner-take-all neural networks. This is discussed further below in terms
of our current and future extensions to the HQSOM model. Likewise, temporal clustering and sequence
processing can be performed with anything from basic statistical techniques like ARMA, ARIMA, NARMA,
GARCH, N-GARCH, etc. to recurrent neural networks, temporal Kohonen maps (TKMs), or the simpler and
faster-converging recurrent self-organizing maps (RSOMs).
In keeping with this approach, the HQSOM algorithm creates invariant, low-dimensional
representations of high-dimensional data by passing them through successive stages of spatial and
temporal pooling. Spatial pooling is accomplished by means of self-organizing maps (SOMs), while
temporal pooling is handled by a type of modified SOM called a recurrent self-organizing map (RSOM).
The authors elected to use the SOM model because it was familiar to them, and because it is
relatively simple and well-researched. Likewise, they chose RSOMs for temporal pooling due to their
demonstrated performance in the area and their elegant similarity to the ordinary SOM.
A SOM, or Kohonen network, is a type of unsupervised nonlinear dimensionality reduction algorithm,
often used for data visualization. It can also be viewed as a form of vector quantization. SOMs
operate by building a table of "weight vectors" in the input space, then modifying the weight
vectors closest to each successive input vector to make them closer to that input vector, such that
the set of weight vectors overall converges to yield a summary representation of the clusters in the
input data in a topology-preserving fashion. An RSOM is a SOM that takes an exponential moving
average over the differences between input vectors and weight vectors when computing these
distances, which allows measures of similarity to extend across a succession of inputs and hence
into the temporal domain. Both SOMs and RSOMs are described in more mathematical detail in the
section entitled \em System Design and Variables \em below.
After describing the motivation and technical details of HQSOMs, Miller and Lommel's paper describes
the recursive combination of HQSOM base units, which are comprised of a SOM whose regularized
activation vector over its weight vectors is provided as input to an RSOM (see below for specific
mathematical details). The SOM performs spatial clustering over its input, yielding a set of
representative spatial feature vectors in the input space, and the RSOM proceeds to perform temporal
clustering over sequences of these spatial patterns, such that the RSOM's weight vectors each
correspond to some particular spatiotemporal cluster in the input data. Thus, the HQSOM base unit as
a whole produces a dimensionally-reduced representations of the spatiotemporal contents of its
input. It produces spatiotemporal abstractions. Composing HQSOMs in a hierarchical arrangement, such
that the activation vector over the weight vectors of the RSOM in a given HQSOM base unit are
provided as the inputs to the SOM of another HQSOM base unit, allows for the representation of
progressively more invariant spatiotemporal patterns in the input data.
The authors then describe two experiments involving the learning and classification of simple visual
patterns, which we describe in detail and reproduce below.
\section{Hypothesis, Plan, and Risks}
We wish to begin by re-implementing the HQSOM algorithm described in the paper, using numerical
Python. The primary risk in doing so is that the algorithm is rather complex, and certain aspects
are not thoroughly described. We also expect that it will take a long time to code, and that the
high dimensionality and fairly opaque nature of the HQSOM's representations may make it difficult to
debug our implementation. For example, if a network is failing to produce the desired result,
looking at the RSOM unit map is not particularly helpful.
We will then attempt to reproduce the experimental results presented by Miller and Lommel,
which showed that HQSOMs could form shift- and scale-invariant representations of various shapes
within
3x3- and 7x7-pixel fields of vision. We expect that the testing framework and test data may take a
considerable amount of time to create. We also anticipate that the experimental results may be
difficult to reproduce due to parameter sensitivity, and that computational efficiency may pose
hurdles given the number of training cycles used in the paper. As above, we again note that it can
be difficult to debug these types of networks due to the internal structure being fairly
mathematical in nature. Finally, the potential fragility of
these types of systems is well known in the academic community, so it is very possible that we will
not be able to get any viable results whatsoever due to bad parameters and long test runs.
Once we are able to replicate these results to a
reasonable degree of precision, we will extend the HQSOM framework into the audio domain and attempt
to
classify music by genre. The main risks emerge from the potential complexity of these extensions,
the nuances involved in producing high-quality spectrograms, and the anticipated large size and
computational burden of networks capable of classifying something as abstract as genre.
\section{System Design and Variables}
HQSOM networks are comprised of hierarchically stacked building blocks known as HQSOM base units.
An HQSOM base unit in turn consists of a stacked SOM-RSOM pair, such that input to the base
unit as a whole is provided as the input to the SOM, then the regularized activation vector over the
map space of the SOM is provided
as the input to the RSOM, and finally the activation vector over the map space of the RSOM is
provided as the input to
any HQSOM base units stacked above this one. If this is the top unit in the network, then this
final RSOM activation vector yields an invariant summary of the input data as a whole and may be
used for classification.
Composing these SOM-RSOM pairs into multi-layered networks yields Hierarchical Quilted
Self-Organizing Maps (HQSOMs), which can
identify both spatial and temporal clustering in data over multiple levels of abstraction. The
general use case for HQSOMs is to identify spatiotemporal clusters in input data, such that a
supervised learning technique can be applied to actually make the classification, since the clusters
themselves rely on some associated semantic meaning in order to be regarded as labels. However, for
the sake of this paper, we will take such semantic meaning for granted and thus consider
clustering equivalent to classification. The following
discussion of SOMs, RSOMs and HQSOMs is based on the corresponding sections of the Miller and Lommel
paper, and hence the vast majority of this method explanation can be found in that
paper as well \cite{HQSOM}.
\subsection{SOM}
The basic SOM computational block can either be trained on data or asked to classify data. The SOM
is made up of a $m$x$n$ matrix that maps inputs of dimension $m$ to outputs of dimension $n$. For
example a SOM designed to take in 3d vectors and output a 5d vector looks like:
\begin{center}
$
\begin{pmatrix}
.3 & .7 & .1 & .14 & .01\\
.3 & .1 & .01 & .16 & .9\\
.3 & .03 & .8 & .7 & .01
\end{pmatrix}
$
\end{center}
Each column is a map unit, and whichever map unit $\bold{w_b}$ is closest to the input $\bold{x}$
is considered the best-matching unit (BMU). The measure of closeness is usually simply Euclidean
distance, so:
\begin{equation}
\bold{w_b} = argmin_{w_i} ||\bold{x} - \bold{w_i}||
\end{equation}
During the training stage, input vectors are applied to the input and then an
update rule is applied over the entire map space that shifts map units that are nearest to the
input data towards the input data:
\begin{equation} \label{eq:UPDATE}
\bold{w_{i}}(t+1) = \bold{w_i}(t) + \gamma h_{ib}(t)(\bold{x}(t)-\bold{w_i}(t))
\end{equation}
where gamma is the rate of learning, $h_{ib}$ is the neighborhood function, and $\bold{w_i}$ is the
map unit being modified. The neighborhood function is defined as a function that is close to zero
for units far away from the BMU. Traditionally a Gaussian is used:
\begin{equation} \label{eq:GAUSSIAN}
h_{ib}(t) = exp(\frac{-||I_i-I_b||^2}{\mu(t)\sigma^2})
\end{equation}
where $I_i$ indicates the index of the $i$th unit, $\mu(t)$ is a decreasing function of mean
squared error and $\sigma$ is the learning radius. A SOM therefore has two parameters that need to
be tuned: the learning rate $\gamma$ and the learning radius $\sigma$. For example, two sample unit
maps after an update with $\bold{x}=(.1,.1,.1), \gamma=.2$ and different sigmas would look like:
\begin{figure}[h]
\begin{center}
$
\begin{pmatrix}
0.26 & 0.7 & 0.1 & 0.14 & 0.01 \\
0.26 & 0.1 & 0.01 & 0.16 & 0.9 \\
0.26 & 0.03 & 0.8 & 0.7 & 0.01
\end{pmatrix}$
\caption{$\sigma$ = 1}
\centering
$\begin{pmatrix}
0.26 & 0.607 & 0.1 & 0.139 & 0.01 \\
0.26 & 0.1 & 0.017 & 0.159 & 0.897 \\
0.26 & 0.041 & 0.748 & 0.687 & 0.01
\end{pmatrix}
$
\caption{$\sigma$ = 100}
\end{center}
\end{figure}
Clearly, the update with the larger sigma affected more of the map space. Also it is important to
note that units were pulled towards the input, but with a less dramatic effect as the map index
increased (separating itself further from the BMU).
During activation, the SOM can return two types of activation vectors:
\begin{enumerate}
\item Discrete: A vector of the correct dimension with the BMU index set to 1 and all others set
to 0.
\item Continuous: A vector $A(t)$ defined as the normalized form of a vector constructed as follows:
\begin{equation}
A_i = \frac{1}{||\bold{x(t)} - \bold{w_i}||^2}
\end{equation}
\end{enumerate}
\subsection{RSOM}
The Recurrent SOM is an extension of the basic SOM that adds an exponential moving average of
differences between observed inputs and units in the map with time-decay parameter $\alpha$ . At
each update the differences are updated and instead of looking for the BMU in map space, the BMU
index is chosen by finding the minimum magnitude recursive difference. Furthermore, instead of
applying $\bold{x}(t)$ directly to the map, the recursive difference for a particular unit is
applied in each unit's update rule and $\bold{x}(t)$ is used to update the recursive difference
matrix:
\begin{equation}
\bold{y_i}(t+1) = (1-\alpha)\bold{y_i}(t)+\alpha(\bold{x}(t)-\bold{w_i}(t))
\end{equation}
The update rule becomes:
\begin{equation} \label{eq:RUPDATE}
\bold{w_{i}}(t+1) = \bold{w_i}(t) + \gamma h_{ib_r}(t)\bold{y(t)}
\end{equation}
where the neighborhood function is computed using the recursive BMU as the BMU index instead of the
map space BMU. The proper tuning of $\alpha$ depends on how responsive one wishes for the RSOM to
be: lower values of $\alpha$ cause the moving average of inputs to dominate (long term memory),
whereas higher values of $\alpha$ (close to 1) mean that recent inputs dominate (short term memory)
\subsection{SOM-RSOM Pair}
The final computational structure is the SOM-RSOM pair. Recognizing that SOMs are effective at
identifying spatial clustering, and RSOMs are effective at identifying temporal clustering, but
SOMs do zero temporal clustering and RSOMs have degraded spatial clustering, the SOM-RSOM pair is
intended to get the best effect of both. First the input data is fed into the SOM to get a spatial
representation, and then this representation is fed into the RSOM to do temporal clustering.
\section{Implementation Process and Results}
Two implementations of SOMs and RSOMs were written, one being as close as possible
to the reference implementation for reproducibility, and the other having a number of slight
changes to improve convergance. As is to be expected, the reference paper leaves out a
number of implementation details so this is our best approximation.
\subsection{Reference Implementation of SOM and RSOM Units}
Self-Organizing Maps were implemented via a Python class with three main methods: a constructor,
an update method that takes a numpy array representing the input vector as well as the learning
parameters (gamma, sigma, etc...) to use for that particular update and modifies the internal state
of the SOM accordingly, and a method to request the activation vector for a given input vector.
Internally the SOM map was stored
as a $m$x$n$ numpy array where $n$ is the input vector size and $m$ is the size of the map space.
During an update call, the Best Matching Map Unit (BMU) for any given input vector was determined
using a linear search for the minimum Euclidean distance and then all map units near to the BMU as
well as the BMU were shifted towards the input according to a Gaussian neighborhood function. The
standard SOM update rule was used as per equation ($\ref{eq:UPDATE}$).
A linear search was prefered due to the high dimensionality of the space, and a Gaussian
neighborhood function was chosen for reproduceability. The activation method returned either a
discrete or a continuous representation of the map's activation to a given input. The discrete
representation is defined as above, and the normal representation is given in equation
($\ref{eq:EUCMETH}$) where $a_i$ represents the $i$th position in the activation vector,
$\bold{w_b}$ is the BMU, $\bold{x}$ is the input vector, and $\bold{w_i}$ represents the $i$th map
unit.
\begin{equation} \label{eq:EUCMETH}
a_i = \frac{1}{||\bold{w_i}-\bold{x}||}, \bold{a} = \frac{\bold{a}}{||\bold{a}||}
\end{equation}
\\
Recurrent Self-Organizing Maps were simply a subclass of the SOM that use the modified RSOM update
rule as well as storing the recursive difference matrix as a numpy array. The time decay parameter
($\alpha$) was passed in at every update call.
\subsection{Basic Design of SOM-RSOM Pair and Other Hierarchical Structures}
Since both SOMs and RSOMs are implemented as Python objects, the SOM-RSOM pair simply consisted of
a SOM object and a RSOM object with an update and activation method that takes in an input vector
$\bold{x}$, feeds it into the SOM to get a transformed activation vector $\bold{y}$ and finally
takes that $\bold{y}$ and feeds it into the RSOM to get the final output which is the BMU of the
RSOM. The only difference between the SOM-RSOM update and activation methods is that the update
method calls update internally (thus changing the state of the network), whereas the activation
method merely passes along activation
vectors. In the code this SOM-RSOM pair was refered to as a HQSOM because a single SOM-RSOM pair
does indeed form the simplest HQSOM.
Hierarchies were built at first by hard wiring these HQSOM base units together. However, in order
to facilitate testing of the audio extension, a framework was designed that allowed for any
arbitrary tree HQSOM accepting input with a 1-dimensional topology (e.g. a line of pixels or
spectral power densities, as opposed to a 2d image). The first level of the tree reads data from
the input, and passes its activation vector to the next layer, which passes its activation vector to
the next layer, etc ... The output of the top level node is the representation of the input that is
(hopefully) invariant under certain conditions.
\subsection{Replication of First Experiment}
The first experiment presented in the paper was a simple example of 3x3 images with 3
pixel horizontal and vertical lines that have been shifted to all possible positions. This data
set is small enough to be enumerated, and simple enough in concept to use a single SOM-RSOM pair as
the HQSOM network. We implemented the network as a single HQSOM that had an input size of 9,
internal SOM map size of 18, and internal RSOM map size of 3. We mapped the 3x3 image grids
to a linear vector of size 9 by iterating through the image left to right and top to bottom.
The mapping used is shown in Figure $\ref{fig:3TestMapping}$.
\begin{figure}[ht]
\begin{center}
$\begin{pmatrix}
1 & 2 & 3 \\
4 & 5 & 6 \\
7 & 8 & 9
\end{pmatrix}
\rightarrow
\begin{pmatrix}
1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9
\end{pmatrix}
$
\end{center}
\caption{Experiment 1 Image to Vector Mapping}
\label{fig:3TestMapping}
\end{figure}
The implementation was shown to be correct by two different tests: performance on non-noisy
data, and aggregate performance over many noisy data sets. During training, the HQSOM is exposed to
three blank images, followed by three line images, followed by three blank images where the three
line images alternate between the three horizontal and the three vertical images. An example
training sequence (without noise) is shown in Figure $\ref{fig:3TestData}$.
\begin{figure}[ht]
\begin{center}
\includegraphics[scale=.3]{./exp1_dataset.png}
% exp1_dataset.png: 200x200 pixel, 72dpi, 7.05x7.05 cm, bb=0 0 200 200
\end{center}
\caption{Experiment 1 Training Sequence}
\label{fig:3TestData}
\end{figure}
Applying the sequence shown in Figure $\ref{fig:3TestData}$ hundreds of times with parameters
$\gamma_{som} = \gamma_{rsom} = .1, \sigma_{som}=16, \sigma_{rsom}=90,$ and $ \alpha = .1$
trained the HQSOM and clustered the weight vectors in the map units of the SOM and RSOM. Since the
blank images are solely meant to reset the RSOM EMA difference matrix, a method was added to RSOMs
and HQSOMs that allows the difference matrix to be cleared (with a random index selected to have a
value of .01 so that the BMU varies randomly after each reset) and the training steps with blank
images were replaced with a call to this function. After roughly 2500 training samples were shown to
the network, the HQSOM was asked to classify (return the BMU of the top level RSOM) each piece of
data again and as expected all horizontal lines were classified the same, all vertical Lines were
classified the same, and all blank images were classified the same regardless of position. The
network had successfully formed an invariant representation of vertical and horizontal lines in a
3x3 field of view.
\\
To test the noise tolerance of the network, Gaussian noise with standard deviation .1, .2, .25 and
.3 was applied to the test data, which was then trained on as before and the HQSOM was again asked
to classify samples of noisy vertical and horizontal lines (with different noise from the training
data). The results of 100 trials with each noise level are summarized in the following table:
\begin{center}
\begin{tabular}{ | c | c | c | }
\hline
Noise Std. Deviation & Number Correctly Clustered\\ \hline
.1 & 99/100 \\ \hline
.2 & 31/100 \\ \hline
.25 & 5/100 \\ \hline
.3 & 4/100\\
\hline
\end{tabular}
\label{table:3TestResults}
\end{center}
Clustering ``correctly'' simply means that all vertical lines had the same BMU at the output, all
horizontal lines had the same BMU at the output but different from the vertical lines, and all
blank images had the same BMU at the output but different from either the vertical or horizontal
lines. For example: $ \begin{pmatrix} 1 & 0 & 0 & 0 & 2 & 2 & 2 \end{pmatrix}$ is a ``correct''
clustering if we apply one blank image, three vertical lines, and three horizontal lines but $
\begin{pmatrix} 2 & 0 & 0 & 0 & 0 & 0 & 0 \end{pmatrix}$ is not. It is worth noting that when fewer
than 2000 training steps were taken the map would often converge to a ``Something vs Nothing'' map
in that the HQSOM would be very good at clustering lines together and blank images together, but
would not differentiate between the two types of lines. This makes sense because the alpha is
small enough that it could take time for the final representation to form.
\subsection{Replication of Second Experiment}
The second experiment presented in the paper aimed to create shift and scale invariant
representations of squares, diamonds, and X shapes in a 7x7 grid. To replicate this experiment, a
two tiered network was created such that there were 9 low level HQSOMs that each inspected a 3x3
swatch of the 7x7 grid with 1 pixel overlaps on each side. These base units fed into a top level
HQSOM having a 9 dimensional vector input (composed of the BMUs of each of the bottom level HQSOMs),
which in turn
outputted a BMU index representing the cluster an input belongs to. The goal was to find a shift
invariant representation of these shapes by exposing the network to each family of shapes that
are both scaled and then shifted around in a spiral fashion, followed by blanks for 100 steps,
and then the next shape, more blanks, and so on. The input sequence is shown in Figure
$\ref{fig:7TestData}$.
\begin{figure}[ht]
\begin{center}
\includegraphics[scale=.3]{./exp2_dataset.png}
% exp2_dataset.png: 813x175 pixel, 72dpi, 28.68x6.17 cm, bb=0 0 813 175
\end{center}
\caption{Experiment 2 Input Data}
\label{fig:7TestData}
\end{figure}
The paper claims upward of 95\% clustering, but we were unable to even achieve a four
classifier using the specified parameters from the paper as per Figure $\ref{fig:PAPERSPECS}$.
\begin{figure}[ht]
\begin{center}
\begin{tabular}{ | l | c | c | c | c | }
\hline
& $\gamma$ & $\alpha$ & $\sigma$ & Map Size\\ \hline
Layer 1 SOMs & .1 & 1 & 4 & 65\\ \hline
Layer 1 RSOMs & .01 & .1 & 10 & 17\\ \hline
Layer 2 SOMs & .1 & 1 & 2 & 513\\ \hline
Layer 2 RSOMs & .001 & .01 & 50 & 17 \\
\hline
\end{tabular}
\caption{Experiment 2 Parameters}
\label{fig:PAPERSPECS}
\end{center}
\
\end{figure}
The best
run of our HQSOM yielded the following final distribution where the final distribution is simply the
number of test images classified as that BMU divided by the total number of test images for a given
data set.
\begin{center}
\small
\begin{verbatim}
################################################################################
Data Set: BLANK
Most Frequently Classified As (MODE): 4
Full Distribution over Final RSOM Map Space:
[ 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
################################################################################
Data Set: SQUARE
Most Frequently Classified As (MODE): 3
Full Distribution over Final RSOM Map Space:
[ 0. 0.08571429 0.17142857 0.45714286 0.28571429 0. 0.
0. 0. 0. 0. 0. 0. 0.
0. 0. 0. ]
################################################################################
Data Set: DIAMOND
Most Frequently Classified As (MODE): 2
Full Distribution over Final RSOM Map Space:
[ 0. 0.14285714 0.77142857 0.08571429 0. 0. 0.
0. 0. 0. 0. 0. 0. 0.
0. 0. 0. ]
################################################################################
Data Set: X
Most Frequently Classified As (MODE): 2
Full Distribution over Final RSOM Map Space:
[ 0. 0. 0.97142857 0.02857143 0. 0. 0.
0. 0. 0. 0. 0. 0. 0.
0. 0. 0. ]
\end{verbatim}
\end{center}
While there is certainly convergence, it is not 95\%. Due to the fact that runs of this particular
simulation took well over 9 hours, we were only able to test about 10 different
parameter combinations, none of which yielded a better result then the one supplied above. It
appeared that the HQSOM was not using the full breadth of the RSOM map space in any of the HQSOM
units, which may have resulted from an incorrect formulation of the activation vectors that were
being passed up. We suspect this as the cause because the formulas presented in the paper for the
continuous version of the activation vectors were not well-formed. Thus, we had to deduce the
proper formulas based on somewhat unclear explanations of these vectors and their significance. It
was also noticed that the networks frequently converged to certain states very quickly
and then never moved from that state. The lack of a good activation vector and unintentional fast
convergence
led us to the following innovations:
\begin{enumerate}
\item A regularized activation vector based on the Mean Squared Error of the BMU vector.
\item The notion of an adaptive $\gamma$.
\item The use of a Mexican Hat neighborhood function (second derivative of Gaussian) instead of a
Gaussian neighborhood function.
\end{enumerate}
\subsection{Changes to Algorithm and Relative Performance}
Having completed the implementation of the paper's networks as far as deemed feasible, the three
innovations previously mentioned were implemented in code and tested to compare relative
performance. In all cases our implementation proved to be superior, especially when we began
testing our Audio extension.
\\
\\
We began by implementing the new activation vector. Since the adaptive $\sigma$ function was based
on minimizing the mean-squared error (MSE) of the BMU and input vector, we believed that it would
also be a good metric to measure activation by. This lead to the definition in equation
($\ref{eq:MSEMETH}$) where the vector parameters are the same as in equation ($\ref{eq:EUCMETH}$)
except that now $MSE(\bold{x},\bold{y})$ indicates the mean squared error between vectors
$\bold{x}$ and $\bold{y}$.
\begin{equation} \label{eq:MSEMETH}
a_i = \frac{MSE(\bold{w_b}, \bold{x})^3}{MSE(\bold{w_i},\bold{x})^3}
\end{equation}
The errors were cubed so that the distribution in the vector would be more concentrated on the
BMU's index.
\\
\\
Next we implemented the adaptive $\gamma$. During testing, the MSE between the input and BMU would
often spike due to a training example that had never been seen before, which would cause $\sigma$ to
spike, which would in turn pull the entire map space drastically towards the new training example.
This led to things like oscillations between two clusters while entirely losing a third or fourth
possible clustering. To remedy this problem, we kept an exponential moving average of MSE in the
SOMs of each HQSOM unit and whenever the MSE spiked by more than an order of magnitude, we made
$\gamma$ equal to some high fraction (in our case .6) just for the update of the BMU weight. This
meant that a single map unit was pulled into the new cluster, becoming the semi-permanent BMU for
all new training data that fell in that cluster.
\\
\\
Lastly we replaced equation $\ref{eq:GAUSSIAN}$ with equation $\ref{eq:MEXICANHAT}$.
\begin{equation}\label{eq:MEXICANHAT}
h_{ib}(t) = (1-\frac{||I_i-I_b||^2}{\sigma^2})*exp(\frac{-||I_i-I_b||^2}{\mu(t)\sigma^2})
\end{equation}
The advantage of this function is that it pushes away map units that are near to the input,
but far enough away to be considered not matching. This helps deal with the pre-mature convergence
noticed during testing. The negative aspect of this function is that the $\sigma$ parameter has to
be chosen very carefully so as to prevent uniform distributions from forming.
\\
\\
With our improvements in place, we re-ran the noise tolerance test from Experiment 1 and the large
network test from Experiment 2. The results for the noise tolerance test over 100 runs is
summarized in table $\ref{fig:3BetterTestResults}$.
\begin{figure}[ht]
\begin{center}
\begin{tabular}{ | c | c | c | c | }
\hline
Noise Std. Deviation & Paper Implementation& Our Implementation \\ \hline
.1 & 99/100 & 100/100 \\ \hline
.2 & 31/100 & 73/100\\ \hline
.25 & 5/100 & 39/100\\ \hline
.3 & 4/100 & 12/100\\
\hline
\end{tabular}
\end{center}
\caption{Experiment 1 Test with Noisy Data: Clustering Results }
\label{fig:3BetterTestResults}
\end{figure}
Clearly our implementation represents a significant improvement in the face of noisy data. This is
most likely due to the fact that we better allow for late cycle plasticity and are therefore able
to compensate for early noisy examples with later less noisy examples.
\\
\\
Once again the second experiment proved challenging. We were unable to get any better results with
the parameters provided in the paper, but by using the following network parameters, we were able
to get a nearly equivalent result with far fewer cpu cycles:
\begin{center}
\begin{tabular}{ | l | c | c | c | c | }
\hline
& $\gamma$ & $\alpha$ & $\sigma$ & Map Size\\ \hline
Layer 1 SOMs & .1 & 1 & $\sqrt{20}$ & 40\\ \hline
Layer 1 RSOMs & .01 & .1 & $\sqrt{150}$ & 25\\ \hline
Layer 2 SOMs & .1 & 1 & $\sqrt{15}$ & 150\\ \hline
Layer 2 RSOMs & .05 & .02 & 50 & 7 \\
\hline
\end{tabular}
\end{center}
\begin{center}
\small
\begin{verbatim}
################################################################################
Data Set: BLANK
Most Frequently Classified As (MODE): 2
Full Distribution over Final RSOM Map Space:
[ 0. 0. 1. 0. 0. 0. 0.]
################################################################################
Data Set: SQUARE
Most Frequently Classified As (MODE): 0
Full Distribution over Final RSOM Map Space:
[ 0.343 0. 0.314 0.171 0.143 0.029 0. ]
################################################################################
Data Set: DIAMOND
Most Frequently Classified As (MODE): 2
Full Distribution over Final RSOM Map Space:
[ 0. 0. 0.457 0.171 0.029 0.029 0.314]
################################################################################
Data Set: X
Most Frequently Classified As (MODE): 6
Full Distribution over Final RSOM Map Space:
[ 0. 0. 0.2 0.2 0.2 0.057 0.343]
SUCCESS
\end{verbatim}
\end{center}
When run with the same network, the reference implementation only produced two main statistical
clusters. Once again, our implementation seemed superior.
\subsection{Extension into Audio}
The final stage of our project was extending our framework into audio classification. The goal was
to be able to give a HQSOM spectrograms, and have the network cluster similar genres together.
The first step of implementation was to build a generic framework that would allow for any arbitrary
tree type network to be constructed without hard-wiring the HQSOMs together. Then, a test
framework was created that allowed us to take 15 second snippets of songs, compute FFTs over windows
that lasted $\frac{1}{10}$ of a second for each song, and puncture the FFTs such that we reduced the
input space to a 128 dimension vector. At the end of data processing we had the six spectrograms
shown in Figure $\ref{fig:FFTS}$. The data were derived from the following songs:
\begin{enumerate}
\item Techno (Training) - Rudenko - Everybody
\item Techno (Testing) - Kid Cudi - Day and Night
\item Rock (Training) - Red Hot Chili Peppers - Californication
\item Rock (Testing) - Red Hot Chili Peppers - By the Way
\item Classical(Training) - Unknown Orchestra Conducted By George Winston - Carol Of The Bells
\item Classical(Testing) - Beethoven - Symphony No. 9
\end{enumerate}
\begin{figure}[ht]
\begin{center}
\includegraphics[scale=.2]{./all_ffts.png}
% exp2_dataset.png: 813x175 pixel, 72dpi, 28.68x6.17 cm, bb=0 0 813 175
\end{center}
\caption{Spectrograms of Input Data For Audio Classification}
\label{fig:FFTS}
\end{figure}
The ``Training'' tag indicates that the network was exposed to sequential samples of that song's
FFTs in entirety, and then shown random 1 second clips from the spectrogram in rotation with the
other Training songs. ``Testing'' songs were never exposed to the network during the training
phase, but during the classification stage the HQSOM was asked to classify them. A network was built
that consisted of two base SOM-RSOM nodes that take in 64 inputs each (half of the 128) and each
output a BMU to a second SOM-RSOM, which then output their BMUs to a combined 2d vector which is
used as the input to a final SOM-RSOM node which outputs the classification BMU (See the attached
code for exact parameter details). Unfortunately, the reference SOM-RSOM implementation was
entirely unable to separate the data, resulting in a 1-classifier for this network and every
other network that we could concieve of. When we used this network in conjunction with our
improvements, however, we were able to get the following positive results:
\begin{center}
\small
\begin{verbatim}
################################################################################
Results for Techno
Final Distribution Over Map Space
[ 0.265 0.349 0. 0.386 0. ]
MODE: 3
################################################################################
Results for TechnoTEST
Final Distribution Over Map Space
[ 0.287 0.275 0. 0.438 0. ]
MODE: 3
################################################################################
Results for Classical
Final Distribution Over Map Space
[ 0.526 0.154 0. 0.321 0. ]
MODE: 0
################################################################################
Results for ClassicalTEST
Final Distribution Over Map Space
[ 0.49 0.202 0. 0.308 0. ]
MODE: 0
################################################################################
Results for Rock
Final Distribution Over Map Space
[ 0.434 0.554 0. 0.012 0. ]
MODE: 1
################################################################################
Results for RockTEST
Final Distribution Over Map Space
[ 0.266 0.734 0. 0. 0. ]
MODE: 1
\end{verbatim}
\end{center}
These are the results after cycling through each training song in its entirety, followed by three
random 1-second window exposures for each song, making sure to clear the HQSOM's difference matrix
when switching between songs. From them, we observe that the network has successfully separated the
three different songs presented in the training data. More impressively, however, it has also
successfully classified the out-of-sample data by genre, even though these data were not even taken
from the same songs as those in the training set.
We note that the differences between the activation levels of the BMUs for different genres in this
result were in some cases small, and thus that the network was close to misclassifying some of the
data. We also note that it took quite a bit of parameter tweaking to obtain the above results.
Given our limited time and computational resources, however, we consider this acceptable as a
proof-of-concept.
Using only a single training cycle, we have successfully applied an HQSOM network to form invariant
representations of the genres of three musical pieces. Further, we have applied these
representations to correctly classify three additional songs to which the network had never been
previously exposed. We did this with no hard-coded \em a priori \em knowledge whatsoever -- the
network obtained the sum total of its musical knowledge through exposure to a single play of each of
the respective training songs.
\section{Discussion and Conclusions}
HQSOMs represent a promising path towards invariant spatio-temporal reasoning through massively
parallel vector math. This paper has successfully reproduced many of the results of Miller and
Lommel, improved upon the convergence and noise tolerance properties of their algorithm, and
extended the HQSOM model into the audio domain where these types of networks show great promise. We
have demonstrated that HQSOMs can simultaneously perform spatial and temporal clustering at multiple
layers of abstraction, permitting invariant feature representation and classification over both
space and time. Further, we have shown that they can do so in the presence of considerable noise,
and that they perform quite robustly out of sample -- in particular, they can successfully classify
never-before-seen audio data according to the highly abstract criterion of genre. All of this is
done in a fully unsupervised fashion.
In performing our experiments, however, we have also identified a number of shortcomings in both the
original HQSOM model that persist in our improved version. We have found that the networks are
highly sensitive
to parameter values and to some extent initial conditions, and that they often exhibit problems with
premature convergence. We believe these issues present promising avenues for further research, with
a particular focus on automatically tuning system parameters based on descriptive metrics for the
input data and the sets of SOM and RSOM map units. This would effectively reduce the number of
parameters
requiring specification in the model, and would make network performance much less reliant on the
empirical testing of configuration settings. As an example, the adaptive $\gamma$ as shown in this
paper is a good start, but these networks must become less fragile if they are to be widely adopted.
More broadly, our work has enabled us to characterize the essential properties of the general class
of hierarchical, invariant, spatiotemporal representation and classification algorithms. Namely,
they must perform recurrent spatial and temporal clustering over multiple levels of a hierarchy
while preserving the underlying topology of their high-dimensional input data. It has not escaped
our notice that SOMs are merely one of many topology-preserving techniques for nonlinear
dimensionality reduction, and we strongly believe that the use of others such as locally linear
embedding or Isomap in a sort of generalized spatiotemporal representation algorithm could yield
promising results.
The incorporation of feedback mechanisms sending information from higher regions to lower ones also
appears as an immediate possibility for extension, and including such capabilities would provide a
means for auto-associativity and prediction much like that attempted by the HTM model.\cite{HTMAlgo,
OnIntelligence} Having already
incorporated the property of invariance, there is considerable reason to believe that a generalized
spatiotemporal representation algorithm with such feedback could add emergence, reification, and
multistability to its repertoire, completing its implementation of the core principles of gestalt
systems.\cite{HTMAlgo, OnIntelligence} Much more distant possibilities include the incorporation of
reinforcement learning, models of motor control and attention, and the capacity for episodic memory
formation, with the eventual goal of constructing simulated or robotic intelligent agents that
exhibit goal-directed behaviors and learn from their interactions with the
world.\cite{OnIntelligence} For now, however, we are pleased with our results and look forward to
further investigation.
\begin{thebibliography}{}
\bibitem{AIHistory1} M. Lungarella, F. Iida, J. C. Bongard, R. Pfeifer. \textsc{AI in the 21st
Century -- With Historical Reflections}. 50 Years of AI, Festschrift, LNAI 4850, pp. 1–8, 2007.
\bibitem{Rosenblatt} F. Rosenblatt. \textsc{The Perceptron: A Probabilistic Model for Information
Storage and Organization in the Brain}. Cornell Aeronautical Laboratory, Psychological Review, v65,
No. 6, pp. 386–408, 1958.
\bibitem{DreyfusMind} H. L. Dreyfus, S. E. Dreyfus. \textsc{Making a Mind versus Modeling the Brain:
Artificial Intelligence Back at a Branchpoint}. \em Daedalus \em, v117, No. 1, Artificial
Intelligence pp. 15–43, Winter, 1988.
\bibitem{AIHistory2} J. Schmidhuber. \textsc{AI in the 21st Century -- With Historical Reflections}.
50 Years of AI, Festschrift, LNAI 4850, pp. 29–41, 2007.
\bibitem{MLHistory1} J. G. Carbonell, R. S. Michalski, T. M. Mitchell. \textsc{Machine Learning: A
Historical and Methodological Analysis}. The AI Magazine, pp. 69–79, Fall 1983.
\bibitem{Poggio} T. Serre, M. Kouh, C. Cadieu, U. Knoblich, G. Kreiman, T. Poggio. \textsc{A Theory
of Object Recognition: Computations and Circuits in the Feedforward Path of the Ventral Stream in
Primate Visual Cortex}. AI Memo 2005-036. CBCL Memo 259. Massachusetts Institute of Technology,
Center for Biological and Computational Learning. Dec. 2005.
\bibitem{HQSOM} J. W. Miller and P. H. Lommel. \textsc{Biomimetic sensory abstraction using
hierarchical quilted self-organizing maps}. The Charles Stark Draper Laboratory, Inc. 555 Technology
Square, Cambridge, MA 02139-3563, USA. 2006.
\bibitem{OnIntelligence} J. Hawkins and S. Blakeslee. \textsc{On Intelligence}. Tomes Books, Holt,
New York, USA. 2002.
\bibitem{HTMAlgo} J. Hawkings, S. Ahmad, D. Dubinsky. \textsc{Hierarchical Temporal Memory including
HTM Cortical Learning Algorithms}. Numenta, Inc. 811 Hamilton St., Redwood City, CA 94063, USA.
Sept. 2011.
\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.7602100578,
"avg_line_length": 59.7016317016,
"ext": "tex",
"hexsha": "e9dc743ee65638ec5b3f32166fde8656f089a178",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b3e89149d2fc0bc67c8c939e4dedfe00f560e5a7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jolynch/python-hqsom",
"max_forks_repo_path": "papers/first_draft.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b3e89149d2fc0bc67c8c939e4dedfe00f560e5a7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jolynch/python-hqsom",
"max_issues_repo_path": "papers/first_draft.tex",
"max_line_length": 100,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "b3e89149d2fc0bc67c8c939e4dedfe00f560e5a7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jolynch/python-hqsom",
"max_stars_repo_path": "papers/first_draft.tex",
"max_stars_repo_stars_event_max_datetime": "2021-11-08T11:21:11.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-01-11T10:33:13.000Z",
"num_tokens": 12473,
"size": 51224
} |
\documentclass[a4paper]{article}
\usepackage[english]{babel}
\usepackage[utf8x]{inputenc}
\usepackage{amsmath}
\usepackage{graphicx}
\title{MATH 542 Homework 5}
\author{Saket Choudhary\\[email protected]}
\begin{document}
\maketitle
\section*{Problem 1}
%
% Miscellaneous Exercise 1: #2 on page 15
% Exercise 2a: #1 and #3(b) on page 19
% Exercise 2b: #2, #3, #6, #8 on page 23-24
\subsection*{Problem 1a.}
Using variance-covariance expansion:
\begin{align*}
Var(X_1-2X_2+X_3) &= Var(X_1) + Var(-2X_2) + Var(X_3) + 2Cov(X_1,-2X_2) + 2Cov(-2X_2,X_3) + 2Cov(X_3,X_1)\\
&= Var(X_1) + 4Var(X_2) + Var(X_3) -4 Cov(X_1,X_2) -4Cov(X_2,X_3) + 2Cov(X_3,X_1)\\
&= 5+4(3)+3-4(2) -4(0) + 2(3)\\
&= 18
\end{align*}
\begin{align*}
Y &= \begin{pmatrix} Y_1\\ Y_2 \end{pmatrix}\\
&= \begin{pmatrix}
X_1+X_2\\
X_1+X_2+X_3\\
\end{pmatrix}\\
&= \begin{pmatrix}
1 & 1 & 0\\
1 & 1 & 1
\end{pmatrix} \begin{pmatrix}
X_1\\
X_2\\
X_3
\end{pmatrix}
\end{align*}
Now using $Var(AX) = AVar(X)A'$
\begin{align*}
Var(Y) &= \begin{pmatrix}
1 & 1 & 0\\
1 & 1 & 1
\end{pmatrix} Var(X) \begin{pmatrix}
1 & 1 \\
1 & 1 \\
0 & 1\\
\end{pmatrix}\\
&= \begin{pmatrix}
12 & 15 \\
15 & 21
\end{pmatrix}
\end{align*}
\section*{Ex2a Problem 1}
$f(y_1,y_2) = k^{-1}\exp(-\frac{1}{2}(2y_1^2+y_2^2+2y_1y_2-22y_1-14y_2+65))$
\begin{align*}
2y_1^2+y_2^2+2y_1y_2-22y_1-14y_2+65 &= \begin{pmatrix}y_1-\mu_1 & y_2-\mu_2\end{pmatrix} \begin{pmatrix}
a & b\\
b & c
\end{pmatrix} \begin{pmatrix}
y_1-\mu_1\\
y_2-\mu_2
\end{pmatrix}\\
&= a(y_1-\mu_1)^2+2b(y_1-\mu_1)(y_2-\mu_2) + c(y_2-\mu_2)^2\\
&= ay_1^2+cy_2^2+2by_1y_2-y_1(2a\mu_1+2b\mu_2)-y_2(2b\mu_1+2c\mu_2) + (a\mu_1^2+2b\mu_1\mu_2+c\mu_2^2)
\end{align*}
Now comparing the coefficient of $y_1^2$ $\implies\ a=2$
Comparing coefficient of $y_2^2\ \implies\ c=1$
Comparing coefficient of $y_1y_2\ \implies \ b=1$
Comparing coefficient of $y_1 \implies \ 4\mu_1+2\mu_2=22$
Comparing coefficient of $y_2 \implies \ 2\mu_1+2\mu_2=14$
Thus, $\mu_1 = 4$ and $\mu_2=3$
Check: $4\mu_1^2 + 2\mu_1\mu_2+\mu_2^2 = 2(16)+24+9 = 65$
and hence $\Sigma^{-1} = \begin{pmatrix} 2 & 1\\ 1 & 1 \end{pmatrix}$
$det(\Sigma^{-1}) = 1$
$\Sigma = \begin{pmatrix}
1 & -1\\
-1 & 2
\end{pmatrix}$
Thus, $k^{-1} = \frac{1}{\sqrt{2\pi det(\Sigma)}}^{2/2} = \frac{1}{2\pi}$
Thus, $k = 2\pi$
\subsection*{2a Problem 1b}
\begin{align*}
E[Y] & = \begin{pmatrix}
\mu_1\\
\mu_2
\end{pmatrix}\\
&= \begin{pmatrix}
4\\
3
\end{pmatrix}
\end{align*}
\begin{align*}
Var[Y] & = \begin{pmatrix}
a & b\\
b & c
\end{pmatrix}\\
&= \begin{pmatrix}
1 & -1\\
-1 & 2
\end{pmatrix}
\end{align*}
\section*{Ex2a Problem 3(b)}
\begin{align*}
\Sigma &= \begin{pmatrix}
1 & \rho \\
\rho & 1
\end{pmatrix}
\end{align*}
Determining eigen values:
\begin{align*}
det(\Sigma-\lambda I) &=0 \\
(1-\lambda)^2 = \rho^2 \\
\lambda &= 1\pm \rho
\end{align*}
And the corresponding eigen values:
\begin{align*}
\begin{pmatrix}
1 & \rho \\
\rho & 1
\end{pmatrix}\begin{pmatrix} v_1 \\ v_2 \end{pmatrix} &= \lambda \begin{pmatrix}
v_1\\ v_2
\end{pmatrix}
\end{align*}
One set of eigen vectors are given by: for $\lambda_1=1+\rho$: $\frac{1}{\sqrt{2}}\begin{pmatrix}1\\1 \end{pmatrix}$
and for $\lambda_2=1-\rho$: $\frac{1}{\sqrt{2}}\begin{pmatrix} 1 \\ -1 \end{pmatrix}$
Thus using eigen decomposition $\Sigma$ can be rewritten as:
\begin{align*}
\Sigma &= A\Lambda A'\\
&=\frac{1}{2}\begin{pmatrix}
1 & 1\\
1 & -1
\end{pmatrix}\begin{pmatrix}
1+\rho & 0\\
0 & 1-\rho
\end{pmatrix}\begin{pmatrix}
1 & 1\\
1 & -1
\end{pmatrix}
\end{align*}
And hence $\Sigma^{1/2} = A\Lambda^{1/2} A' = \frac{1}{2}\begin{pmatrix}
\sqrt{1+\rho} + \sqrt{1-\rho} & \sqrt{1+\rho} - \sqrt{1-\rho}\\
\sqrt{1+\rho} - \sqrt{1-\rho} & \sqrt{1+\rho} + \sqrt{1-\rho}
\end{pmatrix}$
\section*{Ex2b Problem 2}
$Y_i = \begin{pmatrix} 0 & 0 & \dots & 1_i & 0 \dots 0 \end{pmatrix}Y =a_i'Y$
Since $Y_i \sim N(\mu, \Sigma)$ Using Theorem 2.2
$Y_i \sim N(a_i'\mu, a_i'\Sigma a_i) = N(\mu_i, \sigma_{ii})$
\section*{Ex2b Problem 3}
Since $Y_1,Y_2,Y_3$ and $Y_1-Y_2$ are both normal, their joint distribution is normal too.
Consider: $A = \begin{pmatrix}
1 & 1 & 1\\
1 & -1 & 0
\end{pmatrix}$
Now, $Z = \begin{pmatrix}
Z_1\\
Z_2
\end{pmatrix} = AY$
and hence $Z \sim N(A\mu, A\Sigma A')$
$A\mu = \begin{pmatrix}
5\\
1
\end{pmatrix}$
$A\Sigma A' = \begin{pmatrix}
10 & 0\\
0 & 3
\end{pmatrix}$
Since $Z_1$ and $Z_2$ are normal, and they are independent $\sigma_{12}=0$ so the joint distribution is given by their product.
$\sigma_1^2 = 10$ ; $\sigma_2^2=3$
$\mu_1 = 5$, $\mu_2 =1$
\begin{align*}f(Z_1,Z_2) &= \frac{1}{2\pi \sigma_1 \sigma_2}\exp(-\frac{(Z_1-\mu_1)^2}{2\sigma_1^2}-\frac{(Z_2-\mu_2)^2}{2\sigma_2^2})\\
\end{align*}
\section*{Ex2b Problem 6}
Define $U_1 = Y_1+Y_2$
and $U_2 = Y_1-Y_2$ where $U_i \sim N(0,1)$
$Cov(U_1,U_2) = 0$
Rearranging gives:
\begin{align*}
Y_1 &= \frac{1}{2}(U_1+U_2)\\
Y_2 &= \frac{1}{2}(U_1-U_2)\\
\end{align*}
Thus, $Y_i \sim N(0, \frac{1}{4}$
Since any vector $a'Y$ has a univariate normal distribution(mean=0) using Theorem 2.3, we see that $Y \sim N(\mu, \Sigma)$ where
\begin{align*}
Y &= \begin{pmatrix}
Y_1\\Y_2 \end{pmatrix}\\
\mu &= \begin{pmatrix}
\mu_1\\
\mu_2
\end{pmatrix}\\
&= \begin{pmatrix}
0\\
0
\end{pmatrix}\\
\end{align*}
To find $\Sigma$:
\begin{align*}
Cov(U_1,U_2) &= 0\\
Cov(Y_1+Y_2, Y_1-Y_2) &=0\\
Cov(Y_1,Y_1)+Cov(Y_1, Y_2)+Cov(Y_2,Y_1)+Cov(Y_2, -Y_2) &= 0\\
\sigma_{11}+2\sigma_{12}-\sigma_{22} &=0
\implies \sigma_{12} &=0 \text{ using } \sigma_{11} = \sigma_{22} =1
\end{align*}
Thus, $Y_1,Y_2$ have a bivariate normal ditribution. with $\mu= \begin{pmatrix}
0\\
0
\end{pmatrix}$ and $\Sigma = \begin{pmatrix}
1 & 0\\
0 & 1
\end{pmatrix}$
\section*{Ex2b Problem 8}
\begin{align*}
\begin{pmatrix} \bar{Y} & Y_1-\bar{Y}& Y_2-\bar{Y_3} \dots Y_n-\bar{Y}\end{pmatrix}' &= \begin{pmatrix}
1/n & 1/n & 1/n & \dots & 1/n\\
1-1/n & -1/n & -1/n & \dots & -1/n\\
1 & 1-1/n & -1/n & \dots & -1/n\\
\vdots \\
-1/n & -1/n & -1/n & \dots & 1-1/n\\
\end{pmatrix} \begin{pmatrix} Y_1 & Y_2 & Y_3 &\dots Y_n \end{pmatrix}'\\
%&= (I-\frac{1}{n}1_n1_n')\begin{pmatrix} Y_1 & Y_2 & Y_3 &\dots Y_n \end{pmatrix}'
Z &= AY
\end{align*}
Also $Z \sim N(A\mu, A\Sigma A')$
\begin{align*}
A\Sigma A' &= AA' \text{ since } \Sigma = I\\
&= \begin{pmatrix}
\frac{n}{n^2} & 0 & 0 & \dots & 0 \\
0 & (1-\frac{1}{n})^2+\frac{n}{n^2} & -2/n(1-1/n) + \frac{n-2}{n} & \dots & -2/n(1-1/n) + \frac{n-2}{n}\\
\vdots \\
0 & -2/n(1-1/n) + \frac{n-2}{n} & -2/n(1-1/n) + \frac{n-2}{n} & \dots & (1-\frac{1}{n})^2+\frac{n}{n^2}\\
\end{pmatrix}\\
&= \begin{pmatrix}
\frac{1}{n} & 0 & 0 & 0 & \dots & 0\\
0 & \frac{1}{n}+(1-\frac{1}{n})^2 & -\frac{1}{n} & -\frac{1}{n} &\dots & -\frac{1}{n}\\
\vdots\\
0 & -\frac{1}{n} & -\frac{1}{n} & -\frac{1}{n} & \dots & \frac{1}{n}+(1-\frac{1}{n})^2
\end{pmatrix}\\
&=B
\end{align*}
%where $A=I-\frac{1}{n}1_n1_n'$
Thus M.g.f. of $Z=AY$ is (using Theorem 2.2 with $d=0$)
\begin{align*}
E[\exp(t'AY)] &= \exp(t'A\mu + \frac{1}{2}t'A\Sigma A't)\\
&= \exp(t'A\mu + \frac{1}{2}t'AA't)) \text{ using } \Sigma = I\\
&=\exp(t'A\mu + \frac{1}{2}t'Bt))
\end{align*}
And hence $Z=\begin{pmatrix} \bar{Y} & Y_1-\bar{Y}& Y_2-\bar{Y_3} \dots Y_n-\bar{Y}\end{pmatrix}'$ follows a multivariate distribution such that $Cov(\bar{Y}, Y_i-\bar{Y}) = 0$ $\implies$ $\bar{Y}$ and $Y_i-\bar{Y}$ are independent (for all $i$)
Let's call $X=\begin{pmatrix} Y_1-\bar{Y}& Y_2-\bar{Y_3} \dots Y_n-\bar{Y}\end{pmatrix}'$
Then, from above we have $\bar{Y}$ and $X$ are indepedent (also follows from rom theorem 2.4)
Then,
$$\sum_i(Y_i-\bar{Y})^2 = X'X$$
Since $\bar{Y},X$ are independent $\implies $ $\bar{Y},X'X$ are independent
\end{document}
| {
"alphanum_fraction": 0.5998695369,
"avg_line_length": 23.7306501548,
"ext": "tex",
"hexsha": "fc68c668b8d62975c39673d4afd791dabfb267c5",
"lang": "TeX",
"max_forks_count": 12,
"max_forks_repo_forks_event_max_datetime": "2022-02-10T03:21:09.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-09-25T19:06:45.000Z",
"max_forks_repo_head_hexsha": "c5cfa2410d47c7e43a476a8c8a9795182fe8f836",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "saketkc/hatex",
"max_forks_repo_path": "2016_Spring/MATH-542/HW05/hw05.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "c5cfa2410d47c7e43a476a8c8a9795182fe8f836",
"max_issues_repo_issues_event_max_datetime": "2015-09-23T21:21:52.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-09-16T23:11:00.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "NeveIsa/hatex",
"max_issues_repo_path": "2016_Spring/MATH-542/HW05/hw05.tex",
"max_line_length": 245,
"max_stars_count": 19,
"max_stars_repo_head_hexsha": "c5cfa2410d47c7e43a476a8c8a9795182fe8f836",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "NeveIsa/hatex",
"max_stars_repo_path": "2016_Spring/MATH-542/HW05/hw05.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-10T03:20:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-09-10T02:45:33.000Z",
"num_tokens": 3720,
"size": 7665
} |
\chapter{Internet Programs}
The \index{Internet}Internet is central to modern computing. Because
it is ubiquitous, programmers should be able to take it for granted.
Writing applications that use the Internet should be just as easy
as writing programs for a standalone desktop computer. In many respects
this ideal can be achieved in a modern programming language. The core
facilities for Internet programming were introduced with simple
examples as part of the system interface in Chapter 5. This chapter
expands on this important area of software development. This chapter
presents examples that show you how to
\begin{itemize}\itemsep0pt
\item Write Internet servers and clients
\item Build programs that maintain a common view of multiple
users' actions
\end{itemize}
\section{The Client-Server Model}
The Internet allows applications to run on multiple connected
computers using any topology, but the standard practice is to implement a
\index{client/server}client/server topology in which a
user's machine plays the role of a client, requesting
information or services from remote machines, each of which plays the
role of a server. The relationship between clients and servers is
many-to-many, since one client can connect to many servers and one
server typically handles requests from many clients.
Writing a \index{client}client can be easy. For simple read-only
access to a remote file, it is just as easy as opening a file on the
hard disk. Most clients are more involved, sending out requests
and receiving replies in some agreed-upon format called a
\index{protocol}\textit{protocol}. A protocol may be human readable
text or it may be binary, and can consist of any number of messages
back and forth between the client and server to transmit the required
information. The most common Internet protocols are built-in parts of
Unicon's messaging facilities, but some applications
define their own protocol.
Writing a server is more difficult. A server sits around in
an infinite loop, waiting for clients and servicing their requests.
When only one client is invoking a server, its job is simple enough,
but when many simultaneous clients wish to connect, the server program
must either be very efficient or else the clients will be kept waiting
for unacceptably long periods.
Although the following example programs emphasize how easy it is to
write Internet clients and servers in Unicon, writing
"industrial strength" applications requires
additional security considerations which are mostly beyond the scope of
this book. For example, user authentication and encryption are
essential in most systems, and many modern servers are carefully tuned
to maximize the number of simultaneous users they support, and minimize
their vulnerability to denial-of-service attacks.
\section{An Internet Scorecard Server}
Many games with numeric scoring systems feature a list of high scores.
This feature is interesting on an individual machine, but it is ten
times as interesting on a machine connected to the Internet! The
following simple \index{server}server program allows games to report
their high scores from around the world. This allows players to compete
globally. The scorecard server is called \texttt{scored}. By
convention, servers are often given names ending in
"d" to indicate that they are daemon
programs that run in the background.
\subsection*{The scorecard client procedure}
Before examining the server code, take a look at the client procedure
that a game calls to communicate with the \texttt{scored} server. To
use this client procedure in your programs, add the following
declaration to your program.
\iconcode{
link highscor
}
The procedure \texttt{highscore()} opens a network connection, writes
four lines consisting of the protocol name
"HSP", the name of the game, the
user's identification (which could be a nickname, a
number, an e-mail address, or anything else), and that
game's numeric score. Procedure \texttt{highscore()}
then reads the complete list of high scores from the server, and
returns the list. Most games write the list of high scores to a window
for the user to ponder.
\iconcode{
procedure highscore(game, userid, score, server) \\
\> if not find(":", server) then server
{\textbar}{\textbar}:= ":4578" \\
\> f := open(server, "n") {\textbar}
fail \\
\ \\
\> \# Send in this game's score \\
\> write(f, "HSP{\textbackslash}n", game,
"{\textbackslash}n", userid,
"{\textbackslash}n", score) {\textbar} \\
\> \ \ \ stop("Couldn't write:
", \&errortext) \\
\ \\
\> \# Get the high score list \\
\> L := ["High Scores"] \\
\> while line := read(f) do put(L, line) \\
\> close(f) \\
\> return L \\
end
}
\subsection*{The Scorecard server program}
The scorecard server program, \texttt{scored.icn} illustrates issues
inherent in all Internet servers. It must sit at a port, accepting
connection requests endlessly. For each connection, a call to
\texttt{score\_result()} handles the request. The \texttt{main()}
procedure given below allows the user to specify a port, or uses a
default port if none is supplied. If another server is using a given
port, it won't be available to this server, and the
client and server have to agree on which port the server is using.
\iconcode{
procedure main(av) \\
\> port := 4578 \# a random user-level port \\
\> if av[i := 1 to *av] == "-port" then
port := integer(av[i+1]) \\
\ \\
\> write("Internet Scorecard version
1.0") \\
\> while net := open(":"
{\textbar}{\textbar} port, "na") \ do \{ \\
\> \ \ \ score\_result(net) \\
\> \ \ \ close(net) \\
\> \ \ \ \} \\
\> (\&errno = 0) {\textbar} stop("scored net accept
failed: ", \&errortext) \\
end
}
The procedure \texttt{score\_result()} does all the real work of the
server, and its implementation is of architectural significance. Any
delay in handling a request implies the server will be unable to
handle other simultaneous client requests. For this reason, many servers
immediately spawn a separate process to handle each request.
You could do that with \texttt{system()}, as illustrated in Chapter 5,
or launch a thread for it,
but for \texttt{scored} this is overkill. The server handles each
request almost instantaneously.
Some small concessions to security are in order, even in a trivial
example such as this. If a bogus Internet client connects by accident,
it will fail to identify our protocol and be rejected. More subtly, if
a rogue client opens a connection and writes nothing, we do not want to
block waiting for input or the client will deny service to others. A
call to \texttt{select()} is used to guarantee the server receives data within
the first 1000 milliseconds (1 second). A last security concern is to
ensure that the "game" filename supplied is
valid; it must be an existing file in the current directory, not
something like \texttt{/etc/passwd} for example.
The \texttt{score\_result()} procedure maintains a static table of all
scores of all games that it knows about. The keys of the table are the
names of different games, and the values in the table are lists of
alternating user names and scores. The procedure starts by reading the
game, user, and score from the network connection, and loading the
game's score list from a local file, if it
isn't in the table already. Both the score lists
maintained in memory, and the high scores files on the server, are
sequences of pairs of text lines containing a userid followed by a
numeric score. The high score files have to be created and initialized
manually with some N available (userid,score) pairs of lines, prior to
their use by the server.
\iconcode{
procedure score\_result(net) \\
\> local s := "" \\
\> static t, gamenamechars \\
\> initial \{ \\
\>\> t := table() \\
\>\> gamenamechars :=
\&letters++\&digits++'-\_' \\
\>\> \} \\
\ \\
\> select(net, 1000) {\textbar} \{ write(net,
"timeout"); fail \} \\
\> (s ||:= ready(net)) ? \{ \\
\>\> = "HSP{\textbackslash}n" {\textbar} \{
write(net, "wrong protocol"); fail \} \\
\>\> game := tab(many(gamenamechars)) {\textbar} \{
write(net,"no game?"); fail \} \\
\>\> = "{\textbackslash}n" \\
\>\> owner := tab(many(gamenamechars)) {\textbar} \{
write(net,"no owner?"); fail \} \\
\>\> = "{\textbackslash}n" \\
\>\> score := tab(many(\&digits)) {\textbar} \{
write("no score?"); fail \} \\
\>\> \} \\
\ \\
\> if t[game] === \&null then \{ \\
\>\> if not (f := open(game)) then \{ \\
\>\>\> write(net, "No high scores here for ", game) \\
\>\>\> fail \\
\>\>\> \} \\
\>\> t[game] := L := [] \\
\>\> while put(L, read(f)) \\
\>\> close(f) \\
\>\> \} \\
\> else \\
\> \ \ \ L := t[game]
}
The central question is whether the new score makes an entry into the
high scores list or not. The new score is checked against the last
entry in the high score list, and if it is larger, it replaces that
entry. It is then "bubbled" up to the
correct place in the high score list by repeatedly comparing it with
the next higher score, and swapping entries if it is higher. If the new
score made the high score list, the list is written to its file on
disk.
\iconcode{
\> if score {\textgreater} L[-1] then \{ \\
\> \ \ \ L[-2] := owner \\
\> \ \ \ L[-1] := score \\
\> \ \ \ i := -1 \\
\> \ \ \ while L[i] {\textgreater} L[i-2] do \{ \\
\> \ \ \ \ \ \ L[i] :=: L[i-2] \\
\> \ \ \ \ \ \ L[i-1] :=: L[i-3] \\
\> \ \ \ \ \ \ i -:= 2 \\
\> \ \ \ \ \ \ \} \\
\> \ \ \ f := open(game,"w") \\
\> \ \ \ every write(f, !L) \\
\> \ \ \ close(f) \\
\> \ \ \ \}
}
{\sffamily\bfseries
Note}
{\sffamily
List \textit{L} and \textit{t[game]} refer to the same list, so the
change to L here is seen by the next client that looks at
\textit{t[game]}.}
Lastly, whether the new score made the high score list or not, the high
score list is written out on the network connection so that the game
can display it.
\iconcode{
\> every write(net, !L) \\
end
}
Is this high score application useful and fun? Yes! Is it secure and
reliable? No! It records any scores it is given for any game that has a
high score file on the server. It is utterly easy to supply false
scores. This is an honor system.
\section{A Simple ``Talk'' Program}
E-mail is the king of all Internet applications. After that, some of the
most popular Internet applications are real-time dialogues between
friends and strangers. Many on-line services rose to popularity because
of their "chat rooms," and Internet Relay
Chat (\index{IRC}IRC) is a ubiquitous form of free real-time
communication. These applications are evolving in multiple directions,
such as streaming multimedia, and textual and graphical forms of
interactive virtual reality. While it is possible to create arbitrarily
complex forms of real-time communication over the Internet, for many
purposes, a simple connection between two users'
displays, with each able to see what the other types, is all that is
needed.
The next example program, called \texttt{italk}, is styled after the
classic BSD UNIX \index{talk}\texttt{talk} program. The stuff you type
appears on the lower half of the window, and the remote
party's input is in the upper half. Unlike a chat
program, the characters appear as they are typed, instead of a line at
a time. In many cases this allows the communication to occur more
smoothly with fewer keystrokes.
The program starts out innocently enough, by linking in library
functions for graphics, defining symbolic constants for font and screen
size. Among global variables, \texttt{vs} stands for vertical space,
\texttt{cwidth} is column width, \texttt{wheight} and \texttt{wwidth}
are the window's dimensions, and \texttt{net} is the
Internet connection to the remote machine.
\iconcode{
link graphics
\ \\
\$define ps 10 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \# The size of the font to
use \\
\$define lines 48 \ \ \ \ \ \ \ \ \ \ \ \ \# No. of text lines in the
window \\
\$define margin 3 \ \ \ \ \ \ \ \ \ \ \ \ \# Space to leave around the
margins \\
\$define START\_PORT 1234
\$define STOP\_PORT \ 1299
\ \\
global vs, cwidth, wheight, wwidth, net
}
The \texttt{main()} procedure starts by calling \texttt{win\_init()} and
\texttt{net\_init()} to open up a local window and then establish a
connection over the network, respectively. The first command line
argument is the user and/or machine to connect to.
\iconcode{
procedure main(args) \\
\> win\_init() \\
\> net\_init(args[1] {\textbar} "127.0.0.1")
}
Before describing the window interaction or subsequent handling of
network and window system events, consider how \texttt{italk}
establishes communication in procedure \texttt{net\_init()}. Unlike
many Internet applications, \texttt{italk} does not use a conventional
client/server architecture in which a server daemon is always running
in the background. To connect to someone on another machine, you name
him or her in the format \texttt{user@host} on the command line. The
code attempts to connect as a client to someone waiting on the other
machine, and if it fails, it acts as a server on the local machine and
waits for the remote party to connect to it.
\iconcode{
procedure net\_init(host) \\
\> host ?:= \{ \\
\> \ \ \ if user := tab(find("@")) then move(1) \\
\> \ \ \ tab(0) \\
\> \ \ \ \} \\
\> net :=
(open\_client{\textbar}open\_server{\textbar}give\_up)(host, user) \\
end
}
The attempt to establish a network connection begins by attempting to
open a connection to an \texttt{italk} that is already running on the
remote machine. The \texttt{italk} program works with an arbitrary
user-level set of ports (defined above as the range 1234-1299). An
\texttt{italk} client wades through these ports on the remote machine,
trying to establish a connection with the desired party. For each port
at which \texttt{open()} succeeds, the client writes its user name,
reads the user name for the process on the remote machine, and returns
the connection if the desired party is found.
\iconcode{
procedure open\_client(host, user) \\
\> port := START\_PORT \\
\> if {\textbackslash}user then \{ \\
\> \ \ \ while net := open(host {\textbar}{\textbar}
":" {\textbar}{\textbar} port,
"n") do \{ \\
\> \ \ \ \ \ \ write(net, getenv("USER")
{\textbar} "anonymous") \\
\> \ \ \ \ \ \ if user == read(net) then return net \\
\> \ \ \ \ \ \ close(net) \\
\ \ \ \ \ \ \ \ \ port +:= 1 \\
\> \ \ \ \ \ \ \} \\
\> \ \ \ \} \\
\> else \{ \\
\> \ \ \ net := open(host {\textbar}{\textbar}
":" {\textbar}{\textbar} port,
"n") \\
\> \ \ \ write(net, getenv("USER")
{\textbar} "anonymous") \\
\> \ \ \ read(net) \# discard \\
\> \ \ \ return net \\
\> \ \ \ \} \\
end
}
The procedure \texttt{open\_server()} similarly cycles through the
ports, looking for an available one on which to wait. When it receives
a connection, it checks the client user and returns the connection if
the desired party is calling.
\iconcode{
procedure open\_server(host, user) \\
\> repeat \{ \\
\ \ \ \ \ \ port := START\_PORT \\
\> \ \ \ until net := open(":"
{\textbar}{\textbar} port, "na") do \{ \\
\> \ \ \ \ \ \ port +:= 1 \\
\> \ \ \ \ \ \ if port {\textgreater} STOP\_PORT then fail \\
\> \ \ \ \ \ \ \} \\
\> \ \ \ if not (them := read(net)) then \{ \\
\> \ \ \ \ \ \ close(net) \\
\> \ \ \ \ \ \ next \\
\> \ \ \ \ \ \ \} \\
\> \ \ \ if /user {\textbar} (them == user) then \{ \\
\> \ \ \ \ \ \ write(net, getenv("USER")
{\textbar} "anonymous") \\
\> \ \ \ \ \ \ WAttrib("label=talk: accepted call from
", them) \\
\> \ \ \ \ \ \ return net \\
\> \ \ \ \ \ \ \} \\
\> \ \ \ WAttrib("label=talk: rejected call from
", them) \\
\> \ \ \ write(net, getenv("USER")
{\textbar} "anonymous") \\
\> \ \ \ close(net) \\
\> \ \ \ \} \\
end
}
This connection protocol works in the common case, but is error prone.
For example, if both users typed commands at identical instants, both
would attempt to be clients, fail, and then become servers awaiting the
other's call. Perhaps worse from some
users' point of view would be the fact that there is
no real authentication of the identity of the users. The \texttt{italk}
program uses whatever is in the USER \index{environment
variable!USER}environment variable. The UNIX talk program solves both
of these problems by writing a separate talk server daemon that
performs the \textit{marshalling}. The daemons talk across the network
and negotiate a connection, check to see if the user is logged in and
if so, splash a message on the remote user's screen
inviting her to start up the talk program with the first
user's address.
The next part of \texttt{italk}'s code to consider is
the event handling. In reality, each \texttt{italk} program manages and
multiplexes asynchronous input from two connections: the window and the
network. The built-in function that is used for this purpose is
\texttt{select()}. The \texttt{select()} function will wait until some
(perhaps partial) input is available on one of its file, window, or
network arguments.
The main thing to remember when handling input from multiple
sources is that you must not block for I/O. This means: use listener
mode for new connections or a timeout parameter with \texttt{open()},
and when handling network connections, never use \texttt{read()},
only use \texttt{reads()} or better yet \texttt{ready()}.
For windows you must also avoid
\texttt{read()}'s library procedure counterpart,
\texttt{WRead()}. The code below checks which connection has input
available and calls \texttt{Event()} as events come in on the window,
and calls \texttt{reads()} on the network as input becomes available on
it. In either case the received input is echoed to the correct location
on the screen. Ctrl-D exits the program. To accept a command such as
"quit" would have meant collecting
characters till you have a complete line, which seems like overkill for
such a simple application.
\iconcode{
\> repeat \{ \\
\> \ \ \ *(L := select(net, \&window)){\textgreater}0 {\textbar}
stop("empty select?") \\
\> \ \ \ if L[1] === \&window then \{ \\
\> \ \ \ \ \ \ if \&lpress {\textgreater}= integer(e := Event())
{\textgreater}= \&rdrag then next \\
\> \ \ \ \ \ \ if string(e) then \{ \\
\> \ \ \ \ \ \ \ \ \ writes(net, e) {\textbar} break \\
\> \ \ \ \ \ \ \ \ \ handle\_char(2, e) {\textbar} break \\
\> \ \ \ \ \ \ \ \ \ WSync() \\
\> \ \ \ \ \ \ \ \ \ \} \\
\> \ \ \ \ \ \ \} \\
\> \ \ \ else \{ \\
\> \ \ \ \ \ \ s := reads(net) {\textbar} break \\
\> \ \ \ \ \ \ handle\_char(1, s) {\textbar} break \\
\> \ \ \ \ \ \ WSync() \\
\> \ \ \ \ \ \ \} \\
\> \ \ \ \} \\
\> close(net) \\
end
}
After such a dramatic example of input processing, the rest of the
\texttt{italk} program is a bit anticlimactic, but it is presented
anyhow for completeness sake. The remaining procedures are all
concerned with managing the contents of the user's
window. Procedure \texttt{handle\_char(w, c)}, called from the input
processing code above, writes a character to the appropriate part of
the window. If \texttt{w = 1} the character is written to the upper
half of the window. Otherwise, it is written to the lower half. The two
halves of the window are scrolled separately, as needed.
\iconcode{
procedure handle\_char(w, c) \\
\> \# Current horiz. position for each half of the window \\
\> static xpos \\
\> initial xpos := [margin, margin]
\ \\
\> if c == "{\textbackslash}\^{}d" then
fail \ \ \ \ \# EOF \\
\ \\
\> \# Find the half of the window to use \\
\> y\_offset := (w - 1) * wheight/2 \\
\ \\
\> if c ==
("{\textbackslash}r"{\textbar}'{\textbackslash}n')
{\textbar} xpos[w] {\textgreater} wwidth then \{ \\
\> \ \ \ ScrollUp(y\_offset+1, wheight/2-1) \\
\> \ \ \ xpos[w] := margin \\
\> \ \ \ \} \\
\> if c ==
("{\textbackslash}r"{\textbar}'{\textbackslash}n')
then return \\
\> \#handles backspacing on the current line \\
\> if c =="{\textbackslash}b" then \{ \\
\> \ \ \ if xpos[w] {\textgreater}= margin + cwidth then \{ \\
\> \ \ \ \ \ \ EraseArea(xpos[w]-cwidth, y\_offset+1+wheight/2-1-vs,cwidth,vs) \\
\> \ \ \ \ \ \ xpos[w] -:= cwidth \\
\> \ \ \ \ \ \ return \\
\> \ \ \ \ \ \ \} \\
\> \ \ \ \} \\
\> DrawString(xpos[w], wheight/2 + y\_offset - margin, c) \\
\> xpos[w] +:= cwidth \\
\> return \\
end
}
Scrolling either half of the window is done a line at a time. The
graphics procedure \texttt{CopyArea()} is used to move the existing
contents up one line, after which \texttt{EraseArea()} clears the line
at the bottom.
\iconcode{
procedure ScrollUp(vpos, h) \\
\> CopyArea(0, vpos + vs, wwidth, h-vs, 0, vpos) \\
\> EraseArea(0, vpos + h - vs, wwidth, vs) \\
end
}
The window is initialized with a call to the library procedure,
\texttt{WOpen()}, which takes attribute
parameters for the window's size and font. These
values, supplied as defined symbols at the top of the program, are also
used to initialize several global variables such as \texttt{vs}, which
gives the vertical space in pixels between lines.
\iconcode{
procedure win\_init() \\
\> WOpen("font=typewriter,"
{\textbar}{\textbar} ps, "lines="
{\textbar}{\textbar} lines, "columns=80") \\
\> wwidth := WAttrib("width") \\
\> wheight := WAttrib("height") \\
\> vs := WAttrib("fheight") \\
\> cwidth := WAttrib("fwidth") \\
\> DrawLine(0, wheight/2, wwidth, wheight/2) \\
\> Event() \\
end
}
Lastly, the procedure \texttt{give\_up()} writes a message and exits the
program, if no network connection is established. If \texttt{user} is
null and the non-null test (the backslash operator) fails, the
concatenation is not performed and \index{alternation operator (
{\textbar} )}alternation causes the empty string to be passed as the
second argument to \texttt{stop()}.
\iconcode{
procedure give\_up(host, user) \\
\> stop("no connection to ",
({\textbackslash}user {\textbar}{\textbar}
"@") {\textbar}
"", host) \\
end
}
What enhancements would make \texttt{italk} more interesting? An
obvious extension would be to use a standard network protocol,
such as that of UNIX \texttt{talk}, so that \texttt{italk} could
communicate with other users that don't have
\texttt{italk}. UNIX \texttt{talk} also offers a more robust connection
and authentication model (although you are dependent on the
administrator of a remote machine to guarantee that its \texttt{talkd}
server is well behaved). Another feature of UNIX \texttt{talk} is
support for multiple simultaneously connected users.
One neat extension you might implement is support for
graphics, turning \texttt{italk} into a distributed whiteboard
application for computer-supported cooperative work. To support
graphics you would need to extend the window input processing to
include a simple drawing program, and then you would need to extend the
network protocol to include graphics commands, not just keystrokes. One
way to do this would be to represent each user action (a keystroke or a
graphics command) by a single line of text that is transmitted over the
network. Such lines might look like:
\iconcode{
key H \\
key i \\
key ! \\
circle 100,100,25
}
\noindent
and so forth. At the other end, the program deciphering these commands
translates them into appropriate output to the window, which would be
pretty easy, at least for simple graphics. The nice part about this
solution is that this particular collaborative whiteboard application
would work fine across differing platforms (Linux, Microsoft Windows,
and so on) and require only a couple hundred lines of code!
\section{Summary}
Writing Internet programs can be easy and fun, although it is easy to
underestimate the security needed. There are several
different ways to write Internet programs in Unicon. The
\index{database}database interface presented in Chapter 6 allows you to
develop client/server applications without \textit{any} explicit
network programming when the server is a database. A \index{SQL}SQL
server is overkill for many applications such as the high score server,
and it is not appropriate for other non-database network applications
such as the \texttt{italk} program.
For these kinds of programs, it is better to "roll your
own" network application protocol. Once a connection is
established (perhaps using a client/server paradigm), the actual
communication between programs is just as easy as file input and
output. If you do roll your own network application, keep the protocol
simple; it is easy enough to write yourself into deadlocks, race
conditions, and all the other classic situations that make parallel and
distributed programming perilous.
| {
"alphanum_fraction": 0.6960379193,
"avg_line_length": 39.8129032258,
"ext": "tex",
"hexsha": "5e232d12ca2cb55093009b8e42330078a70477ab",
"lang": "TeX",
"max_forks_count": 16,
"max_forks_repo_forks_event_max_datetime": "2022-03-01T06:01:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-10-14T04:32:36.000Z",
"max_forks_repo_head_hexsha": "df79234dc1b8a4972f3908f601329591c06bd141",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "jschnet/unicon",
"max_forks_repo_path": "doc/book/internet.tex",
"max_issues_count": 83,
"max_issues_repo_head_hexsha": "29f68fb05ae1ca33050adf1bd6890d03c6ff26ad",
"max_issues_repo_issues_event_max_datetime": "2022-03-22T11:32:35.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-11-03T20:07:12.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "MatthewCLane/unicon",
"max_issues_repo_path": "doc/book/internet.tex",
"max_line_length": 83,
"max_stars_count": 35,
"max_stars_repo_head_hexsha": "29f68fb05ae1ca33050adf1bd6890d03c6ff26ad",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "MatthewCLane/unicon",
"max_stars_repo_path": "doc/book/internet.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-01T06:00:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-11-29T13:19:55.000Z",
"num_tokens": 6667,
"size": 24684
} |
\documentclass[11pt]{article}
\setlength{\topmargin}{-.5in}
\setlength{\textheight}{23.5cm}
\setlength{\textwidth}{17.0cm}
\setlength{\oddsidemargin}{.025in}
\setlength{\evensidemargin}{.025in}
\setlength{\textwidth}{6.25in}
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage{verbatim} % useful for program listings
\usepackage{color} % use if color is used in text
\usepackage{subfigure} % use for side-by-side figures
\usepackage{float}
\usepackage{Sweave}
\usepackage{url}
\newcommand{\mqm}{\emph{MQM}}
\newcommand{\MQM}{\mqm}
\newcommand{\qtl}{QTL}
\newcommand{\QTL}{\qtl}
\newcommand{\xqtl}{\emph{x}QTL}
\newcommand{\mqtl}{\emph{m}QTL}
\newcommand{\eqtl}{\emph{e}QTL}
\newcommand{\lod}{LOD}
\newcommand{\cM}{cM}
\newcommand{\rqtl}{\emph{R/qtl}}
\newcommand{\cim}{\emph{CIM}}
\newcommand{\At}{\emph{Arabidopsis thaliana}}
\newcommand{\FIXME}{({\bf FIXME!})}
\newcommand{\CHECK}{({\bf CHECK!})}
\newcommand{\NOTE}[1]{({\tt NOTE: #1 })}
\newcommand{\intro}[1]{\vspace{0.15in}#1:}
\newcommand{\code}{\texttt}
\newcommand{\etal}{\emph{et al.}}
\newcommand{\Atintro}{\At\ RIL mQTL dataset (multitrait) with 24 metabolites as phenotypes \cite{Keurentjes2006}}
\newcommand{\Atintrocolors}{\Atintro\ comparing \mqm\ (\code{mqmscan} in green) and
single \qtl\ mapping (\code{scanone} in black)}
\title { Tutorial - Multiple-QTL Mapping (MQM) Analysis for R/qtl }
\author { Danny Arends, Pjotr Prins, Karl W. Broman and Ritsert C. Jansen }
\begin {document}
\maketitle
\clearpage
\setkeys{Gin}{width=6.25in} %% <- change width of figures
\section{Introduction}
\input{mqm/description.txt}
\vspace{0.3in}
\input{mqm/advantages_latex.txt}
\input{mqm/limitations.txt}
Despite these limitations, \mqm\footnote{MQM should not be confused with
composite interval mapping (CIM) \cite{CIMa,CIMb}. The advantage of MQM
over CIM is reduction of type I error (a QTL is indicated at a location where there
is no QTL present) and type II error (a QTL is not detected) for QTL detection
\cite{jansen94b}.} is a valuable addition to the \qtl\ mapper's toolbox. It
is able to deal with QTL in coupling phase and QTL in repulsion phase. \mqm\
handles missing data and has higher power to detect QTL (linked and unlinked)
than other methods. R/qtl's \mqm\ is faster than other implementations and
scales on multi-CPU systems and computer clusters. In this tutorial we will
show you how to use \mqm\ for \qtl\ mapping.
\mqm\ is an integral part of the free \rqtl\
package \cite{rqtlbook,broman09,broman03} for the R statistical
language\footnote{We assume the reader knows how to load his data into R using
the R/qtl \code{read.cross} function; see also the R/qtl tutorials \cite{broman09}
and book \cite{rqtlbook}.}.
\section{A quick overview of \mqm}
These are the typical steps in an \mqm\ \qtl\ analysis:
\begin{itemize}
\item Load data into R
\item Fill in missing data, using either \code{mqmaugmentdata} or \code{fill.geno}
\item Unsupervised backward elimination to analyse \emph{cofactors}, using \code{mqmscan}
\item Optionally select \emph{cofactors\/} at markers that are thought to influence \qtl\ at, or near, the location
\item Permutation or simulation analysis to get estimates of significance, using \code{mqmpermutation} or \code{mqmscanfdr}
\end{itemize}
Using maximum likelihood (ML), or restricted maximum likelihood (REML), the
algorithm employs a backward elimination strategy to identify \qtl\ underlying
the trait. The algorithm passes through the following stages:
\begin{itemize}
\item Likelihood-based estimation of the full model using all cofactors
\item Backward elimination of cofactors, followed by a
genome scan for \qtl
\item If there are no \emph{cofactors\/} defined, the backward elimination of cofactors
is skipped and a genome scan for \qtl\ is performed, testing each genetic (interval)
location individually. In this case REML and ML will result in the same \qtl\ profile
because there is no full model.
\end{itemize}
The results created during the genome scan and the \qtl\ model are
returned as an (extended) R/qtl \code{scanone} object. Several special
plotting routines are available for \mqm\ results.
%\clearpage
\section{Data augmentation}
\label{augmentation}
In an ideal world all datasets would be complete (with the genotype
for every
individual at every marker determined), however in the real world datasets are often incomplete. That is, genotype
information is missing, or can have multiple plausible values. \mqm\
automatically expands the
dataset by adding all potential variants and attaching a probability to each. For
example, information is missing (unknown) at a marker location for one
individual. Based on the values of the neighbouring markers, and the
(estimated) recombination rate, a probability is attached to all possible
genotypes. With \mqm\ all possible genotypes with a probability above
the
parameter \code{minprob} are considered.
When encountering a missing marker genotype (possible genotypes {\bf A} and {\bf B} in a
RIL), all possible genotypes at the missing location are created. Thus at
the missing location two `individuals' are created in the \emph{augmentation} step,
one with genotype {\bf A}, and one with genotype {\bf B}. A probability is
attached to both \emph{augmented} individuals. The combined probability of all
missing marker locations tells whether a genotype is likely, or unlikely,
which allows for weighted analysis later.
To see an example of missing data with an F$_2$ intercross, we can
visualize the genotypes of the individuals using \code{geno.image}. In
Figure~\ref{missing data} there are 2\% missing values in white. The
other colors are genotypes at a certain position, for a certain
individual. Simulate an F$_2$ dataset with 2\% missing genotypes as
follows:
\intro{Simulate a dataset with missing data}
% set seed so that everything comes out exactly the same
\begin{Schunk}
\begin{Sinput}
> library(qtl)
> data(map10)
> simcross <- sim.cross(map10, type="f2", n.ind=100, missing.prob=0.02)
\end{Sinput}
\end{Schunk}
and plot the genotype data using \code{geno.image} (Figure~\ref{missing data}):
\begin{Schunk}
\begin{Sinput}
> geno.image(simcross)
\end{Sinput}
\end{Schunk}
\begin{figure}
| {
"alphanum_fraction": 0.7607930924,
"avg_line_length": 39.3333333333,
"ext": "tex",
"hexsha": "c55dab90c5cc7b3c116ee7965bfab3c17b9eaf31",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-11-04T05:34:16.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-04T05:34:16.000Z",
"max_forks_repo_head_hexsha": "a936847f6063cdf3d207e8364f05f2e13be2d878",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "solgenomics/R_libs",
"max_forks_repo_path": "qtl/doc/Sources/MQM/MQM-tour.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "a936847f6063cdf3d207e8364f05f2e13be2d878",
"max_issues_repo_issues_event_max_datetime": "2020-09-23T21:55:49.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-08-17T15:14:11.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "solgenomics/R_libs",
"max_issues_repo_path": "qtl/doc/Sources/MQM/MQM-tour.tex",
"max_line_length": 123,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a936847f6063cdf3d207e8364f05f2e13be2d878",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "solgenomics/R_libs",
"max_stars_repo_path": "qtl/doc/Sources/MQM/MQM-tour.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1802,
"size": 6254
} |
\documentclass{article}
\usepackage{algorithm} %format of the algorithm
\usepackage{algorithmic} %format of the algorithm
\usepackage{graphics}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\begin{abstract}
In our work, we explore the Steepest Descent method, which could be seen as an variation of the well-studied Gradient Descent method. First, we will give the exact definition of the Steepest Descent method, then we will give convergence analysis for both these two methods, which are very similar. \\
Then, through our experiment, we showed the drastic impact on the performance of the Steepest Descent method by the choice norm we use. Finally, we try to use the Hessian to automatically construct the norm, which we put the G-S descent, but sadly failed.
\end{abstract}
\section{INTRODUCTION}
In the field of solving unconstrained optimization problem, one most well-known and well-studied method, is the Gradient Descent method, we first briefly introduce the Gradient Descent, which is shown in Algorithm \ref{alg:A}.\\
In the step 2 of the Gradient Descent method, we must choose exact line search or backtracking line search, while exact line search is preferred, in our experiments we will use the backtracking line search, which is more easy to implement. The backtracking line search is shown in Algorithm \ref{alg:B}.
\begin{algorithm}\caption{\label{alg:A}Gradient descent method.}
\begin{algorithmic}
\STATE \textbf{Given} a starting point $x \in \textbf{dom} f$
\REPEAT
\STATE 1. $\Delta x:=-\nabla f\left( x\right) $.
\STATE 2. \textit{Line search.} Choose step size t via exact or backtracking line search.
\STATE 3. \textit{Update.} $x:=x+t\Delta x$
\UNTIL{stopping criterion is satisfied.}
\end{algorithmic}
\end{algorithm}
\begin{algorithm}\caption{\label{alg:B}Backtracking line search.}
\begin{algorithmic}
\STATE \textbf{Given} a starting point $\bigtriangleup x$ for f at $x \in \textbf{dom} f, \alpha \in (0,0.5), \beta \in (0,1)$
\STATE \textbf{While} $f(x + t\bigtriangleup x) > f(x) + \alpha t \nabla f(x)^T \bigtriangleup x$ \\
{$t = \beta t$}
\end{algorithmic}
\end{algorithm}
We have to mention due to some historical reasons the Gradient Descent is also called the Steepest Descent method$[1]$(Well, its direction is truly locally 'steepest'). However, the Steepest Descent method is another story.\\
Let $||\cdot||$ be any norm on $R^n$. We define a normalized steepest descent direction(with respect to the norm $||\cdot||$) as
\begin{displaymath}
\Delta x _{nsd} = argmin\{ \nabla f(x)^Tv| ||v|| = 1\}
\end{displaymath}
Now we can give the Steepest descent method in Algorithm \ref{alg:C}.
\begin{algorithm}\caption{\label{alg:C}Steepest descent method}
\begin{algorithmic}
\STATE \textbf{Given} a starting point $x \in \textbf{dom} f$
\REPEAT
\STATE 1. Compute steepest descent direction $\Delta x _{nsd}$.
\STATE 2. \textit{Line search.} Choose step size t via exact or backtracking line search.
\STATE 3. \textit{Update.} $x:=x+t\Delta x$
\UNTIL{stopping criterion is satisfied.}
\end{algorithmic}
\end{algorithm}
To get a better understanding of the Steepest Descent Method, let's look at some examples.
\subsection{Steepest Descent for Euclidean and quadratic norms}
If we take the norm $||\cdot||$ to be the Euclidean norm, then the steepest descent direction is simply the negative gradient.\\
Given a $P \in S^n_{++}$, now we consider the quadratic norm
\begin{displaymath}
||z||_P = (z^TPz)^{1/2} = ||P^{1/2}||_2
\end{displaymath}
The normalized steepest descent direction is given by
\begin{displaymath}
\Delta x_{nsd} = -(\nabla f(x) ^ T P ^{-1} \nabla f(x))^{-1/2} P ^{-1} \nabla f(x)
\end{displaymath}
\section{Discussion and examples}
\subsection{choice of norm for steepest descent}
One important thing for the steepest descent method is to choose a good norm. In our convergence analysis, we know that the gradient method works well when the condition number of the sublevel sets are moderate, and poorly otherwise. Let's begin with the quadratic $P$-norm, we know that using the quadratic $P$-norm means that we change coordinates by $\overline{x} = P^{1/2}x$. So, a good idea would be to make the ellipsoid
\begin{displaymath}
\varepsilon = \{ x | x^TPx \leq 1\}
\end{displaymath}
be a good approximation of the shape of the sublevel set.(In other words, it gives a good approximation after appropriate scaling and translation.) \\
Let's consider the following function :
\begin{displaymath}
g_1(x_1, x_2) = e ^ {x_1 + 3x_2 -0.1} + e ^ {x_1-3x_2-0.1} + e^{-x_1-0.1}
\end{displaymath}
For all three tries, we start at point$(-0.4,1)$, and use the backtracking line search with $\alpha=0.2$, $\beta=0.8$, and let the initial step be 1, the tolerance is 0.01. For the two steepest descent tries, we choose $P_1 = \begin{array}{cc} 1/2 & 0 \\ 0 & 1/4\end{array}$ and
$P_2 = \begin{array}{cc} 1/4 & 0 \\ 0 & 1/2\end{array}$, the results are shown in Figure \ref{fig:htx1}.\\
We also add the shape of the norms on the graphs. From the results, we can clearly see that the norm $P_1$, which is a better
approximate of the sublevel set, give the best result. And we we use $P_2$, it becomes worse than the original gradient descent. \\
\begin{figure}
\begin{tabular}{cc}
\includegraphics[width=40mm]{htx1-1.jpg} &
\includegraphics[width=40mm]{htx1-2.jpg} \\
G-descent : 11 iterations&
S-descent with $P_1$ : 6 iterations \\
\includegraphics[width=40mm]{htx1-3.jpg} \\
S-descent with $P_2$ : 29 iterations
\end{tabular}
\caption{\label{fig:htx1}Three methods for $g_1$.}
\end{figure}
To further illustrate the power of the steepest descent method, we choose a function which is exactly a ellipse:
\begin{displaymath}
g_2(x_1, x_2) = x_1 ^ 2 + 8 * x_2 ^ 2
\end{displaymath}
The result are shown in Figure \ref{fig:htx2}, from which we can see the drastic effect imposed by the choice of the norm, we omit the parameters here.
\begin{figure}
\begin{tabular}{cc}
\includegraphics[width=40mm]{htx2-1.jpg} &
\includegraphics[width=40mm]{htx2-2.jpg} \\
G-descent : 23 iterations&
S-descent with $P_1$ : 10 iterations \\
\includegraphics[width=40mm]{htx2-3.jpg} \\
S-descent with $P_2$ : 134 iterations
\end{tabular}
\caption{\label{fig:htx2}Three methods for $g_2$.}
\end{figure}
\subsection{attempts to construct the norm using Hessian}
Now we know how could we use norm to aid the gradient descent method. One important issue still remains: how could we find the best norm? From the Taylor series, we know that around the local optima $x^{\star}$,
\begin{displaymath}
f(y) \approx p^{\star} + \frac{1}{2} (y - x^\star)^T\nabla^2f(x^\star)(y-x^\star)
\end{displaymath}
So we know the sublevel set near the local optima is well approximated by an elliposid. Now we try to use the Hessian as the $P$, which means, let
\begin{displaymath}
P = \nabla^2f(x)
\end{displaymath}
However, this is sometimes invalid: we need the $P$ to be semi-definite.\\
Given this insight, let's consider the simplified $g_1$
\begin{displaymath}
g_3(x_1, x_2) = e ^ {x_1 + 3x_2} + e ^ {x_1-3x_2} + e^{-x_1}
\end{displaymath}
First we want to let the $P = \nabla^2f(x)$ every step, but we failed, when we peek at the results, we found that the $P$s are really ill-presented, we concluded that the reason is that we are not near the optima.\\
To get the hessian be a good approximation, a second thought is that we can exploit the long-tail property of the descent methods, that is: The start point is (0.9,0.9), At first we set the $P$ to be the identity matrix, i.e, exactly do the gradient descent, however, when $\nabla f(x) < tolerance * 10$, we switch the $P$ to be the local Hessian, let's call this the G-S descent.
\begin{figure}
\begin{tabular}{cc}
\includegraphics[width=40mm]{htx3-1.jpg} &
\includegraphics[width=40mm]{htx3-2.jpg} \\
G-descent : 15 iterations&
G-S-descent with Hessian : 19 iterations \\
\end{tabular}
\caption{\label{fig:htx3}Two methods for $g_3$.}
\end{figure}
Sadly, the G-S only slow downs the process, which is not hard to understand, when the x converges to the optima, the direction of the gradient becomes stable, so the norm won't do any good. So, if we choose to use the Steepest Descent, we should use the norm we choose from the beginning.
\section{CONCLUSIONS}
\section{REFERENCES}
Nonlinear Programming: Analysis and Methods Dover Publishing\\
Convex Optimization Stephen Boyd and Lieven Vandenberghe\\
\balancecolumns
% That's all folks!
\end{document} | {
"alphanum_fraction": 0.7160138249,
"avg_line_length": 60.6993006993,
"ext": "tex",
"hexsha": "ddce3bddfd7adde0c06f095a64975eed7c7752bd",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "283568d8ad5e388a3b598dbdeaa62f06382bfe92",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "satya77/Thesis_Entity_Embeddings",
"max_forks_repo_path": "images/NN copy.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "283568d8ad5e388a3b598dbdeaa62f06382bfe92",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "satya77/Thesis_Entity_Embeddings",
"max_issues_repo_path": "images/NN copy.tex",
"max_line_length": 426,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "283568d8ad5e388a3b598dbdeaa62f06382bfe92",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "satya77/Thesis_Entity_Embeddings",
"max_stars_repo_path": "images/NN copy.tex",
"max_stars_repo_stars_event_max_datetime": "2019-12-14T19:48:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-12-14T19:48:53.000Z",
"num_tokens": 2591,
"size": 8680
} |
\chapter{Introduction}
\label{chap:introduction}
\section{Emergence of the Distributed Ledger model}
Ledgers have values as \textit{archives}, or, in other words, their value is their capability of being consulted to check, verify and manage records.
Ledgers have been a central element of commerce since ancient times, and are used to record a variety of informations, ranging from financial assets to real estate properties, but most importantly how these change hands, that is, transactions.
The medium on which transactions have been stored may have changed from clay tablets to hardware storage, but in all this time there haven't been notable innovations to the underlying architecture of the system.
Each financial institution (i.e. banks, governments, investment funds) manages its own ledgers, each designed differently based on necessities, goals and customers (the would-be \textit{counterparts} in the transaction), and in turn, the counterparts keep recorded their own views of the transactions.
This duplication of information amongst all parties partecipating in the transaction drives a need for costly matching between each copy of the information, reconciliation and error fixing. The plurality of technology platforms upon which financial entities rely adds to that, creating more complexity and operational risks, some of which potentially systematic.
For example, let's consider the need for a party to transfer an asset, be it cash or a stock, to another party. The transaction itself can be executed in microseconds, but the settlement - the ownership transfer of the asset - usually takes more time, from days to weeks.
This length is due to different reasons: the parties don't have access to each other's ledgers, and can't automatically verify that the assets about to be transferred are in fact owned and not counterfeit. So a number of intermediaries are needed as guarantors of the assets and to verify the transaction.
A number of steps have to be added just for this trusting mechanism, and in addition, the differences between infrastructures and technologies of each party acting in the transaction can be such that there's always a need for reconciliation process between parties (ie adjusting each ledger to the transaction), increasing costs and length of the operations.
Centralized infrastructures were until recently an unavoidable model, as there were few ways to consolidate technologies without effectively consolidating the financial entities themselves. The industry has been moving toward the standardization and sharing of data and some of the business logic behind the architectures through the delegation of some part of the process to third-parties, but these steps are still lagging behind the evolution of the technology.
\begin{figure}[t]
\centering
\includegraphics[scale=0.3]{architectures-comparison.png}
\caption{
Comparison between architectures, \cite{cordawhitepaper}.
}
\end{figure}
The term Distributed Ledger Technology refers to the processes, protocols and technologies that enable nodes in a distributed network to share data (propose, validate and record) between multiple synchronized data stores, collectively maintaned. The emergence of this technologies has had a stimulating effect in the FinTech industry, prompting the reconsideration of the entities needed in a financial transaction, how should trust be established, the representation of the transaction, the securing of data, and many more. Even if DLT is not the answer in every case, asking these questions alone can be a force to drive progress forward.
\section{Brief history}
In 2008, a white paper (\cite{bitcoinpaper}) written by an as yet unidentified person using the pseudonym of Satoshi Nakamoto, outlined a novel approach of transferring cash from one party to another without the need for a known and trusted third-party in a P2P manner, claiming, amongst other things, to have solved the issue of double-spend for digitalized currencies.
The technology outlined in the paper was named Blockchain, referring to the way of organizing data and transactions. Bitcoin has soared in terms of popularity and value through its cryptocurrency market, but is just one element in its whole architecture. \\
The effort of the industry since the introduction of blockchains has been directed to exploring different ways of leveraging this technology beyond Bitcoin, focusing on the core architecture of distributed record management. This use has gathered significant attention, reflecting the financial industry traditional reliance on multiple ledgers to maintain transactions. The use of DLT would be particularly effective for payment, clearing and settling activities because of the potential for simplification of the settling and reconciliation process between the parties involved. \\
Some of the resulting implementations of DLTs have been on a steady rise, such as Ethereum, that similarly to Bitcoin has seen a steep rise to the value of its cryptocurrency, Ether, and unlike its predecessor, offers a more malleable enviroment (which is the main reason Vitalik Buterin created it), allowing for the transfer and recording of other assets like loans or contracts. Other rising implementations include R3 Corda, which showcases architecture heavily based on financial use-cases, IBM Hyperledger Fabric and Digital Asset Platform.
As the research and development of the technology progresses, real-world applications have highlighted some of the challenges associated with these use-cases, including the need for safe, secure and scalable systems. \\
As of 2018, the impact of DLTs in the financial sector seems still circumvented. Despite strong progress in the research, it would seem that in the near-to-medium term many of the benefits and efficiency gains of DLT are likely to be reaped by start-ups and financial institutions in the developing countries, such as ABRA,\nocite{abracompany} a company that offers instant P2P money transfers with no transaction fees through the Abra Network, combining cryptocurrencies with physical bank tellers; Ripple,\nocite{ripplecompany} that similarly deals in commercial cross-border and inter-bank payments with a peculiar dynamic approach towards transactions, where the flow of funds between a sender and receiver can go through a series of partecipating institutions that offer services (making customers, for example, find better foreign exchange transactions); ShoCard,\nocite{shocardcompany} a digital identity card that stores ID information on the Bitcoin blockchain, with the company currently being in the process of developing solution for different use cases like identity verification, fiancial services credentialing or automated registrations for online purchases. \\
\begin{figure}[h]
\begin{tcolorbox}[colframe=boxcolor]
\section*{What is a blockchain?}
The term blockchain refers to the most well-known configuration of DLTs, and refers to a distributed ledger architecture where the data is stored in entities called transaction blocks, linked with each other through chained encryption. The blockchain itself is the data structure formed by these linked blocks. Blockchains make use of algorithmic methods and encryption to ensure immutability, security and truthfulness of the information.
New additions are initiated by one of the nodes that creates a new block of data, containing the encrypted transactions. Information about the block is then shared on the network, and all participants collectively try to determine the block's validity according to a pre-defined alghoritmic validation method (consensus). After the validation, all the participant can add the block to their copy of the ledger. With this mechanism, every change to the ledger is replicated across the entire network, and each node has a full, identical copy of the entire ledger at any point in time. As the chain grows and new blocks are added, earlier blocks cannot be altered. \\
The cryptocurrency aspect is what has made Bitcoin garn the most fame. Bitcoin was designed specifically for creating a digital currency free of government control, while also anonymizing the identity of the participants.
The consensus process involves the generation of a reward to the node that validated the last block of the blockchain, that being the currency in itself.
\end{tcolorbox}
\end{figure}
\begin{figure}[h]
\begin{tcolorbox}[colframe=boxcolor]
\section*{Double-spending}
Double-spending is an issue unique to digital currencies, and is the risk that a digital currency can be spent twice. Physical currencies do not have this issue, as they're not easily reproduced, but digital information, on other hand, is easily replicated. \\
With digital currency, there is a risk that its holder could make a copy of the digital token and send it to another party, while retaining the original.
\end{tcolorbox}
\end{figure}
\newpage
\section{Distributed Ledger taxonomy}
It is emphasized that DLT is not a single, well-defined technology, but as of today there is a plurality of blockchains and distributed ledgers in active development.
DLs can be designed in a number of ways pertaining to main idea behind them and the use-cases they're designed to respond to. Such arrangements usually involve several key technical design concepts that specify how the information has to be kept on the ledger and how the latter has to be updated.
There usually are four core attributes of DLTs, these are:
\begin{enumerate}
\item The distributed nature of the ledger
\item The cryptographic mechanisms
\item The consensus mechanism
\item The network access permission level
\end{enumerate}
These four elements play are fundamental in ensuring the distributed ledger ability to store and exchange data across different, self-interested parties, without the need for a central record-keeper, without the need for trust amongst the concerned parties, as it is guaranteed by the system itself, and while assuring that no double-spending takes place. Each DLT addresses these attributes in their own specific way, but their abstract taxonomic aspects remain the same.
\subsection{Distributed nature of the ledger}
In its simplest form, a distributed ledger is a data store held and updated by each participant (or node) in a network. The control over the ledger does not lie within any single entity, but within several, if not all the network participants. This sets the technology apart from cloud computing or data replication, which are commonly used as shared ledgers.
There are different configurations to be analyzed regarding how the data is maintained over the ledger.
In blockchains, no single entity of the network can amend past data entries, and no single entity can approve new additions to the ledger, which have to go through a predefined consensus mechanism. At any point in time there exists only one version of the ledger, and each network participant owns a full and up-to-date copy of it.
After validation the new transaction(s) are added to all the ledgers to ensure data consistency across the network.
In configurations like Corda's, each node maintains a separate ledger. The entirety of the ledger is the union of these ledgers, but isn't public, each peer can only see a subset of the facts on the ledger, and no peer is aware of the ledger in its entirety. This is due to Corda's design, where data is shared only on a need-to-know basis and only to directly involved parties.
Generally, this distributed nature of DLs allows the removal of a trusted central party, increasing speed and potentially removing friction costs and inefficiencies associated with the matching and reconiculiation processes. It also improves security, removing the single point of attack and single point of failure that is represented by the central trusted entity. To potentially gain control over the network, a malicious third party would have to gain control over 50\%+1 nodes in the network.
Security risks aren't completely solved: the software layer built over the distributed ledger can become an additional attack surface.
\subsection{Cryptographic mechanisms}
Cryptography is at the core of the DLT. Asymmetric cryptography plays an important role by identifying and authenticating participants, confirming data entries and facilitating ledger updates.
Each data entry is hashed, producing the so-called digest. The data is in this way hidden to anyone that is not intended to look at it, as the digest, which looks random and unrelated to the original input, is in fact deterministic, meaning that from one original input there's only one hash possible. Digital signatures, which are a common and robust method used in a wide array of application are used as a means of authentication. Each network participant has a private key, that is used for signing digital messages and only known to the key owner, and a public key, which is public knowledge and used for validating the identity of the sender of the original message.
participants proposing changes will authenticate themselves using digital signatures, and the validators will use cryptographic tools to verify whether the participant has the proper credentials, and so on. The validators can be either a counterpart, a third party, or the whole network depending on the type of DL and operation the change refers to.
In the blockchain subset of DLs in particular, encryption plays a fundamental role, as they're essential in the chain encryption mechanism between the blocks that make up the blockchain itself.
\subsection{Consensus mechanism}
\label{sec:consensus}
The purpose of the consensus mechanism is to verify that the information being added to the DL is legitimate. It is fundamental in handling conflicts between multiple simultaneous competing entries (ie double spending), or take-overs by bad actors in the network. It's a derivative property of the distributed nature of the ledger, and it requires participants in the network to reach a consensus over the information being added. There exist many different consensus algorithms and mechanisms, with different purposes, advantages and disadvantages. \\
Consensus usually involves two steps:
\begin{enumerate}
\item Validation, where each validator involved identifies that the state change is consistent according to the rules of the ledger. This operation may rely on records of previous states or a last agreed state.
\item Agreement, where each validator agrees to the state changes to the ledger. This step involves the mechanisms to resolve eventual conflicts and ensuring that valid changes are made only once, thus ensuring that the whole network is synchronized.
\end{enumerate}
According to the DLT configuration, the mechanisms to avoid double-spendings fit in either of the two steps. \\
The Bitcoin blockchain uses Proof-of-Work (PoF) to establish consensus. To add a new block to the blockchain, a node has to provide a proof of work. This is a computationally taxing problem, but easy to verify, and is solved by brute-forcing cyptographic hashing algorithms until the correct string that satisfies certain conditions is generated. This process is called "mining". Each miner that produces a valid PoF is then rewarded Bitcoins, which serves as an economic incentive to maintain system operation and integrity.\\
The Ethereum blockchain uses Proof-of-Stake (PoF). Its process is quite different from the PoF of Bitcoin, as there's no mathematical problem to solve, but instead, the creator of the new block is chosen in a deterministic way based on their stake, that is, how many coins or tokens they possess.
A key advantage to this approach is the energy efficiency. The Bitcoin network, for example, requires an annual energy consumption comparable to that of Columbia (57.6 TWh annually).
Thus PoS systems are well suited to platforms where there is a static coin supply, without inflation from block rewards. The rewards consist only in the transaction fees. \\
\begin{figure}[h]
\centering
\includegraphics[scale=0.3]{bitcoin-energy.png}
\caption{
Data as at April 3rd, 2018. Retrieved from https://digiconomist.net/bitcoin-energy-consumption}
\end{figure}
The Corda distributed ledger, utilizes a unique "pluggable" consensus service, due to its peculiar distributed ledger architecture. It divides consensus in two types, validity consensus and uniqueness consensus. Given a proposed transaction, the first type of consensus checks whether the two parties satisfy a set of validity conditions, going back through all the parties' transaction histories, while the latter has the purpose to avoid double spend, and the consensus is provided by a Corda network service called "Notary", by attesting that, for a given transaction, there hasn't been proposed another competing one.\\
\subsection{Network access permission level}
The partecipation in the DL network can be open (permissionless) or permissioned. Bitcoin and Ethereum are the most prominent examples of completely permissionless blockchain, where participants can join or leave the network at will. This plays as one of their strengths, as a large, open permissionless system with a large number of nodes incentivized to validate new changes to the ledger and accurately and establishing a consensus is directly related to its network security (\ref{sec:consensus}).
In permissioned DLs its members are pre-selected by someone - an owner, or a network service - who controls network access and sets the rules of the ledger. The regulations of network access usually permit the use of a non-computationally expensive consensus mechanism, as there is no need for any trust between participants. This, however, means there's now a centralized trust entity playing a coordinating role and bearing the responsibility over the trusting mechanism.
In permissioned DLs it's possible to have different degrees of transparency over the ledger, and faster transaction processing (thanks to the lighter consensus algorithm) allows for higher transaction volumes.
The identity verification needed for the access solves some problems with governments and regulators with concerns about the identity verification and legal ownerships clarifications. \\
Permissionless DLs have open access to the network, so anyone can join and leave as they wish. There's no central owner or administrator, the ledger is wholly transparent and the security is established by having a large scale network. It is required to have a complex consensus algorithm to guarantee the integrity of the information, and there are some legal concerns over the lack of ownership, as no legal entity owns or controls the ledger.
Some industry players make distinctions between public/private, in term of access, and permissioned/permissionless, in term of roles in the network. For example, Ripple has a permissioned ledger, but the data is validated by all participants, therefore being a public, permissioned ledger. Corda, on the other hand, has a permissioned ledger, but the data is validated only by a set of participants (those which the data concerns), hence being a private, permissioned ledger.
\begin{figure}[h]
\centering
\includegraphics[scale=0.3]{permissioned-permissionless-taxonomy.png}
\caption{
Network Access Ledger Taxonomy, \cite{ukgovdltpaper}.
}
\end{figure}
\begin{figure}[b]
\begin{tcolorbox}[colframe=boxcolor]
\section*{What are smart contracts?}
Smart contracts are self-executing contracts with the terms of the agreement between two parties of a transactions, written as code. The code and the agreement exist distributed across a distributed ledger network. Smart contracts allow for trusted transactions and settlements to be carried out among different and possibly anonymous parties without the need for a central authority, legal system or enforcement mechanism. They render transaction transparent, irreversible and traceable.
\end{tcolorbox}
\end{figure}
\subsection{Roles}
Nodes in the network may play a variety of roles, depending on the participants intentions or technical arrangement of the DL. The Committee on Payment and Market Infrastructures of the Bank for Internation Settlements proposed a generalized framework, with the following different, non-exclusive roles for a node:
\begin{enumerate}
\item System Administrator: node controlling access to the system and provides dispute resolution and notary services. This role is not required in permissionless DLs.
\item Asset Issuer: node enabled to issue assets. In the Bitcoin blockchain, there's no entity playing this role as the system creates assets (Bitcoins) by itself, according to its rules.
\item Proposer: node enabled to propose ledger updates.
\item Auditor: node enabled to view the ledger, but not to make updates. Can be used by regulators or supervisors.
\item Validator: node enabled to validate requests for addition of transactions in the ledger. This role is performed by the consensus mechanism in permissionless DLs.
\end{enumerate}
| {
"alphanum_fraction": 0.8057641757,
"avg_line_length": 121.7371428571,
"ext": "tex",
"hexsha": "e88d4fd63d8e09cb50b44335fcae463ca052e4ff",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6e8d3f58cd21a833dcc40b643393cc241433f132",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Dodicin/bachelor-thesis",
"max_forks_repo_path": "chapters/00-introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6e8d3f58cd21a833dcc40b643393cc241433f132",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Dodicin/bachelor-thesis",
"max_issues_repo_path": "chapters/00-introduction.tex",
"max_line_length": 1177,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6e8d3f58cd21a833dcc40b643393cc241433f132",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Dodicin/bachelor-thesis",
"max_stars_repo_path": "chapters/00-introduction.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4160,
"size": 21304
} |
%\documentclass{beamer}
\documentclass[handout]{beamer}
\usetheme{Ilmenau}
\usepackage{graphicx,verbatim,hyperref}
\usepackage{textpos}
\usecolortheme{beaver}
\useinnertheme{default}
\setbeamertemplate{itemize item}[triangle]
\setbeamertemplate{itemize subitem}[triangle]
\setbeamertemplate{itemize subsubitem}[circle]
\setbeamertemplate{enumerate items}[default]
\setbeamertemplate{blocks}[upper=block head,rounded]
\setbeamercolor{item}{fg=black}
\usefonttheme{serif} %should allow ccfonts to take effect
\usepackage{cite}
\usepackage{xcolor,bm}
\usepackage{amsbsy,amssymb, amsmath, amsthm}
\usepackage{booktabs}
%David miller's fonts
\usepackage[T1]{fontenc}
\usepackage[boldsans]{ccfonts}
\usepackage[euler-hat-accent]{eulervm}
\newcommand{\al}{\alpha}
\newcommand{\expect}{\mathbb{E}}
\newcommand{\Bt}{B(\bm{\tau^a})}
\newcommand{\bta}{\bm{\tau^a}}
\newcommand{\btn}{\bm{\tau^{tw}}}
\newcommand{\ga}{\gamma}
\newcommand{\ve}{\varepsilon}
\newcommand{\ta}{\theta}
\newenvironment{changemargin}[2]{%
\begin{list}{}{%
\setlength{\topsep}{0pt}%
\setlength{\leftmargin}{#1}%
\setlength{\rightmargin}{#2}%
\setlength{\listparindent}{\parindent}%
\setlength{\itemindent}{\parindent}%
\setlength{\parsep}{\parskip}%
}%
\item[]}{\end{list}}
\let\Tiny=\tiny
\title[Explaining Gradualism in Trade Liberalization \hspace{2.5in}\insertframenumber/\inserttotalframenumber]{Explaining Gradualism in Trade Liberalization: \\A Political Economy Approach}
\author[Kristy Buzard]{\texorpdfstring{Kristy Buzard\newline Syracuse University \newline\url{[email protected]}}{Kristy Buzard}}
\date{November 18, 2017}
\begin{document}
\maketitle
%\insertpresentationendpage removed b/c of appendix
\section{Overview}
\subsection{Preview}
\begin{frame}{Average tariffs for U.S., Western Europe, and Japan}
\includegraphics[height=2in, width=4.25in]{linegraph-Bown-Irwin.png} \\
\scriptsize Source: Bown, C.P., Irwin, D.A., (2017) ``The GATT's Starting Point: Tariff Levels circa 1947,'' in Assessing the World Trade Organization: Fit for Purpose?, M. Elsig, B. Hoekman, and J. Pauwelyn eds., Cambridge University Press, forthcoming, fig. 1
%based on backcast estimates for 1947 average tariffs, computed from data on simple average tariffs in effect at the beginning of the Kennedy Round (1964), and reports on the size of average tariff cuts arising during the initial GATT negotiating rounds.
\end{frame}
\begin{frame}
\frametitle{The Questions}
\pause
\begin{enumerate}[<+->]
\item Why would liberization not be immediate? Why proceed in stages?
\item What are the frictions preventing free trade? %assuming free trade is efficient
\end{enumerate}
\end{frame}
\begin{frame}
\frametitle{Related Literature}
\pause
Export sector
\begin{itemize}
\item \footnotesize Benefits of trade integration to consumers (Devereau 1997)
\item \footnotesize Exporters increasingly depend on trade via capacity accumulation (Chisik 2003)
\end{itemize}
\pause
Import-competing sector
\begin{itemize}
\item \footnotesize Convex adjustment costs as workers leave import-competing sector (Mussa 1986); Furusawa $\&$ Lai similar for repeated game
\item \footnotesize Gradual reductions improve welfare when there's a minimum wage (Mehlum 1998)
\item \footnotesize Workers lose specialized skills as they leave (Staiger 1995)
\item \footnotesize Lobbying and capital mobility (MRC 2007)
\end{itemize}
\pause
Limitation of punishments to WEC (Zissimos 2007)
\end{frame}
\begin{frame}
\frametitle{Politics: Motivation}
\pause
Is there an explanation for gradualism that is \textit{fundamentally} rooted in political economy?
\pause
\begin{itemize}
\item i.e. a story that doesn't hinge on specific nature of trade
\pause
\item The hope: lessons could be applied to other issue areas
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Politics: Mechanism}
\pause
Inefficient tariffs maintained through the lobbying of import-competing industries
\pause
\begin{itemize}[<+->]
\item BUT ability to maintain protection reduced by shocks to political support
\begin{itemize}
\item a key politician losing an election or committee position
\end{itemize}
\item Immediate loss of protection / rents \textit{can} $\Rightarrow$ erosion of future political power and accompanying protection
\item Demonstrate with a dynamic model of political economy
\end{itemize}
\end{frame}
\begin{comment}
\begin{frame}{Institutional Detail}
\begin{itemize}[<+->]
\item
\begin{itemize}
\item
\item
\end{itemize}
\item
\item
\begin{itemize}
\item
\end{itemize}
\item
\begin{itemize}
\item
\item
\end{itemize}
\end{itemize}
\end{frame}
\end{comment}
%\begin{frame}{Preview of Results}
%\pause
%None yet :(
%\pause
%\begin{itemize}[<+->]
% \item I
% \item D
% \item I
%\end{itemize}
%\end{frame}
\section{Model}
\subsection{Economic and Political Structure}
\begin{frame}{Economy}
\begin{itemize}[<+->]
\item Small country (`home') and Rest of World (ROW, ${}^*$)
\item Separable in three goods: $X$ and $Y$ (traded) and numeraire
\begin{itemize}[<+->]
\item Home net importer of $X$, net exporter of $Y$
\end{itemize}
\item Home levies $\tau$ on $X$, Foreign levies $\tau^*$ on $Y$
\begin{itemize}[<+->]
\item $P_X=P_X^W + \tau$ and $\pi_X(P_X)$ increasing in $\tau$
\end{itemize}
\item Non-tradable specific factor ($F$) motivates political activity
\item Demand identical for both goods in both countries
\item $F_X(m_t,l_t) = A(m_t) F^{\alpha} l_t^{1 - \alpha}$
\end{itemize}
\end{frame}
\begin{frame}{Timeline}
\pause
Within each period $t$, taking initial wealth as given
\pause
\begin{enumerate}[<+->]
%\item[1a.] Firm productivity realized: $A(m_{t-1} + \mu_{t-1})$
\item[1.] Election occurs (reduced form based on $e_{t-1}$)
\item[2.] Lobby chooses $l_t$ and makes investments in technology $\mu_t$ and politics $e_t$
\item[3.] Government chooses tariff ($\tau_t$)
\item[4.] Production takes place, workers are paid (profits realized)
\item[5.] Tariff revenue is distributed and consumption takes place (not explicitly modeled)
\end{enumerate}
\end{frame}
\begin{frame}{Political Structure}
In Home country (foreign is passive):
\pause
\begin{itemize}[<+->]
\item Non-unitary government
\begin{itemize}[<+->]
\item Members re-elected each period
\item Composition impacted by lobby's investment
\item Sets tariff by majority rule
%\begin{itemize}
%\item $\frac{\partial \tau}{\partial e} > 0$
%\end{itemize}
\end{itemize}
%\pause
\item A Single Lobby
\begin{itemize}
\item Represents import-competing sector, $X$
\end{itemize}
\end{itemize}
\end{frame}
\subsection{The Players}
\begin{frame}
\frametitle{``Government''}
\pause
Decision determined by complex process. Reduced form:
\pause
\[
W_{G,t} = \mathit{CS}_X(\tau) + \ga_t \pi_X(\tau) + \mathit{CS}_Y(\tau^*) + \pi_Y(\tau^*) + \mathit{TR}(\tau)
\]
\pause
\begin{itemize}[<+->]
\item $\mathit{CS_i(\cdot)}$: consumer surplus
\item $\pi_X(\tau)$: profits of import-competing industry
\item $\pi_Y(\tau^*)$: profits of exporting industry
\item $\mathit{TR}(\tau)$: tariff revenue
\item $\ga_t = \ga(e_{t-1},\ta_{t-1})$
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{``Government''}
\[
W_{G,t} = \mathit{CS}_X(\tau) + \ga_t \pi_X(\tau) + \mathit{CS}_Y(\tau^*) + \pi_Y(\tau^*) + \mathit{TR}(\tau)
\]
\pause
\begin{itemize}
\item $\ga_t$: weight on import-competing industry profits. Determined via election, influenced by
\begin{itemize}
\pause
\item $e_{t-1}$: lobbying effort
\pause
\item $\ta_{t-1}$: uncertain element in electoral process
\end{itemize}
\end{itemize}
\pause
\vskip.1in
\begin{beamerboxesrounded}[upper=palette tertiary, shadow=true]{Assumption 1}
$\ga(e_{t-1},\ta_{t-1})$ is increasing and concave in $e_{t-1}$ for all $\ta_{t-1} \in \Theta$.
\end{beamerboxesrounded}
\end{frame}
\begin{frame}
\frametitle{Lobby}
\pause
\begin{multline*}
\max_{e_t,m_t,l_t} \ \sum_{t=1}^\infty \left\{ A(m_t) \cdot F^\alpha \cdot l_t^{1-\alpha}\left[P^W + \tau\left(\gamma(e_{t-1})\right)\right] - l_t - \mu_t - e_t \right\} \\ \hskip.2in \text{s.t.} \hskip.2in m_t = m_{t-1} + \mu_{t} %, \ W_t \geq 0
\end{multline*}
\pause
where
\pause
\begin{itemize}[<+->]
\item $\mu_t$: Investment in productivity
\begin{itemize}
\item Assume $A(\cdot)$ increasing and concave in $m_t$
\end{itemize}
\item $l_t$: Labor
\item $e_t$: Lobbying effort
\item $\tau_t$: home tariff on good $X$
%\item $W_t$ is total wealth
\end{itemize}
\end{frame}
\section{Political Shocks}
\subsection{}
\begin{frame}
\frametitle{Two-Period Model}
\pause
Given $\ga_0$
\begin{multline*}
\max_{l_1,e_1,\mu_1,l_2,\mu_2} \ \left\{ A(m_0,\mu_1) F^\alpha l_1^{1-\alpha}\left[P^W + \tau\left(\gamma_0\right)\right] - l_1 - \mu_1 - e_1 \right\} \\
\left\{ A(m_0,\mu_1,\mu_2) \cdot F^\alpha \cdot l_2^{1-\alpha}\left[P^W + \tau\left(\gamma(e_1)\right)\right] - l_2 - \mu_2 \right\}
\end{multline*}
\vskip.2in
\pause
What happens when $\ga_0$ decreases? Two cases:
\pause
\begin{enumerate}
\item $\mu_1 \! \uparrow$ and $l_1 \! \uparrow$ (increase investment in productivity)
\pause
\item $\mu_1 \! \downarrow$ and $l_1 \! \downarrow$ (reduce investment in productivity)
\end{enumerate}
\end{frame}
\begin{frame}
\frametitle{Two-Period Model}
\pause
In both cases (reduction/increased investment in productivity)
\pause
\begin{itemize}
\item investment in politics $e_1$ can increase or
\pause
\item investment in politics $e_1$ can decrease
\end{itemize}
\vskip.2in
\pause
When investment in politics ($e_1$) $\downarrow$
\pause
\textbf{This is gradualism!}
\end{frame}
\section{Next Steps}
\subsection{}
\begin{frame}{Next Steps}
\pause
\begin{itemize}[<+->]
\item Determine what separates cases of $\mu_1 \! \uparrow$ from $\mu_1 \! \downarrow$?
\item Add wealth constraint
\item Fully dynamic model
\item Comparative statics on $A(m_t)$
%\item CRS production
\end{itemize}
\end{frame}
\begin{comment}
\section{Uncertainty}
\subsection{}
\begin{frame}{Timeline}
\begin{enumerate}[<+->]
\item \textbf{Import-competing firms lobby ...}
\item {\color{gray} \textbf{Uncertainty is resolved}}
\item \textbf{Government ...}
\item {\color{gray} Private actors make production, consumption decisions}
\end{enumerate}
\end{frame}
\begin{frame}{Why uncertainty?}
\pause
\textbf{Government}
\pause
\begin{itemize}
\item Renews AD duties if $G$ prefers $\tau^{\mathit{ad}}$ to $\tau^a$
\end{itemize}
\pause
\vskip.1in
\textbf{Lobby}
\pause
\begin{itemize}[<+->]
\item Given $(\tau^a,\tau^{*a})$ and $\tau^{\mathit{ad}}$, lobby knows what $e$ is required to induce renewal
\item Lobby pays this $e$ if: \hskip.2in $\pi(\tau^{\mathit{ad}}) - e > \pi(\tau^a)$
\end{itemize}
\pause
\vskip.1in
\textbf{In Equilibrium}
\pause
\begin{itemize}[<+->]
\item Firms only put forth effort when they know renewal will be granted
\end{itemize}
\end{frame}
\begin{frame}{What's this uncertainty about?}
\pause
Lobby
\begin{itemize}[<+->]
\item But
\item But
\end{itemize}
\pause
\vskip.1in
So what's the uncertainty about?
\pause
\begin{itemize}[<+->]
\item Probability foreign will retaliate or initiate dispute (indirect)
\item $G$'s valuation of harm to industry, e.g. how politically important is industry?
\end{itemize}
\end{frame}
\end{comment}
\begin{comment}
\section{Results}
\subsection{}
\begin{frame}{Timeline}
\begin{enumerate}[<+->]
\item \textbf{Import-competing firms lobby DOC/ITC to renew AD duties}
\item \textbf{Uncertainty is resolved}
\item \textbf{DOC/ITC decide whether to renew duties}
\item Private actors make production, consumption decisions
\end{enumerate}
\end{frame}
\begin{frame}{Government}
$G$ renews AD duties if its utility is higher under AD duties than trade agreement tariff
\pause
\begin{itemize}
\item Preferences are ex-ante uncertain through $\ta$
\pause
\item When does $G$ renew AD duties? \\
\pause
\vskip.1in
$b(e,\tau^a,\tau^{\textit{ad}})$: probability $G$ prefers $\tau^{ad}$ to $\tau^a$ for a given effort level $e$
\end{itemize}
\pause
\vskip.25in
\begin{beamerboxesrounded}[upper=palette tertiary, shadow=true]{Lemma 1}
The probability that $G$ renews AD duties is increasing and concave in lobbying effort $e$ $\left(\text{i.e. } \frac{\partial b}{\partial e} \geq 0, \ \frac{\partial^2 b}{\partial e^2} \leq 0 \right)$.
\end{beamerboxesrounded}
\end{frame}
\begin{frame}{Home's Trade Agreement Tariff}
\pause
\begin{beamerboxesrounded}[upper=palette tertiary, shadow=true]{Result 1}
The total probability that $G$ renews AD duties is decreasing in the home trade agreement tariff $\tau^a$.
\end{beamerboxesrounded}
\vskip.25in
\pause
There's both a direct effect and an indirect effect through lobby's incentives, and both are negative:
\[
\frac{\partial b}{\partial e} \frac{\partial e}{\partial \tau^a} + \frac{\partial b}{\partial \tau^a}
\]
\end{frame}
\begin{frame}{Foreign's Trade Agreement Tariff}
Assuming trading partner does not retaliate
\pause
\begin{itemize}
\item No difference in foreign tariff under AD duty and $\tau^a$. So no effect on $G$'s incentives (either direct or indirect)
\end{itemize}
\vskip.25in
\pause
\begin{beamerboxesrounded}[upper=palette tertiary, shadow=true]{Result 2}
The total probability that $G$ renews AD duties is unaffected by foreign's trade agreement tariff $\tau^a$.
\end{beamerboxesrounded}
\end{frame}
\begin{frame}{Profitability of Import-Competing Sector}
\pause
\textit{NOTE: this is not quite right, but some version of it will be} \\
Assume $\pi(\cdot)$ shifts up uniformly for all $\tau$.
\pause
\begin{itemize}[<+->]
\item Convexity of profits $\Rightarrow$ $G$'s marginal benefit of providing protection goes up
\item Convexity of profits $\Rightarrow$ return from lobbying increases
\end{itemize}
\vskip.25in
\pause
\begin{beamerboxesrounded}[upper=palette tertiary, shadow=true]{Result 3}
The total probability that $G$ renews AD duties is increasing in the profitability of the import-competing sector.
\end{beamerboxesrounded}
\end{frame}
\begin{frame}{Exogenous Shifts in $\ga(e,\ta)$}
\pause
Assume $\ga(\cdot,\cdot)$ shifts up uniformly for all $(e,\ta)$ pairs.
\pause
\begin{itemize}[<+->]
\item $G$ gives more weight to firms' benefit
\item Lobbying incentives are unchanged
\end{itemize}
\vskip.25in
\pause
\begin{beamerboxesrounded}[upper=palette tertiary, shadow=true]{Result 3}
The total probability that $G$ renews AD duties increases when the weighting function shifts up exogenously and uniformly.
\end{beamerboxesrounded}
\end{frame}
\begin{frame}{Protection from AD Duties}
\pause
When $\tau^{\textit{ad}}$ increases, two effects on $G$'s incentives:
\pause
\begin{itemize}[<+->]
\item Social welfare decreases, pushes for decrease in renewal probability
\item (Over-weighted) import-competing profits increase, pushes for increase in renewal probability
\end{itemize}
\vskip.2in
\pause
Indirect effect is of same sign as direct effect
\pause
\begin{itemize}[<+->]
\item When $\tau^{\textit{ad}}$ (i.e. close to social optimum), second effect dominates $\Rightarrow$ increase in renewal probability
\item Effect may be concave
\end{itemize}
\end{frame}
\section{Conclusion}
\subsection{}
\begin{frame}{Future Work}
\pause
\begin{itemize}[<+->]
\item Comparative static
\item Empirical
\item Extend model
\end{itemize}
\end{frame}
\end{comment}
\end{document} | {
"alphanum_fraction": 0.7171437761,
"avg_line_length": 27.3198594025,
"ext": "tex",
"hexsha": "b9f121ecfe2659a34468e701cfd4fe1ed051e2af",
"lang": "TeX",
"max_forks_count": 16,
"max_forks_repo_forks_event_max_datetime": "2020-12-15T14:12:56.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-20T19:10:12.000Z",
"max_forks_repo_head_hexsha": "adf31293bf46a1afe2f4280e4f8b7289eeceb70f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "MatthewRGonzalez/gradualism",
"max_forks_repo_path": "presentation.tex",
"max_issues_count": 29,
"max_issues_repo_head_hexsha": "adf31293bf46a1afe2f4280e4f8b7289eeceb70f",
"max_issues_repo_issues_event_max_datetime": "2020-12-01T18:04:57.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-22T20:07:50.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "MatthewRGonzalez/gradualism",
"max_issues_repo_path": "presentation.tex",
"max_line_length": 262,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "adf31293bf46a1afe2f4280e4f8b7289eeceb70f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "MatthewRGonzalez/gradualism",
"max_stars_repo_path": "presentation.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-23T15:01:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-07-15T01:17:58.000Z",
"num_tokens": 5025,
"size": 15545
} |
\section{Conclusion}
With the information gathered from our case studies, we can now attempt to answer the questions we posed ourselves in the introduction. \\
Open-source software projects generally seem to be started by a group or a single individual that is capable of performing most of the work to get an initial prototype off the ground. From there on out, thanks to the capabilities of the internet to allow cost-effective and rapid sharing, the project can be distributed to other people all over the world. Interested parties can then decide to contribute back. \\
The assurance of quality and the organisation of a project are realised by rather flat hierarchies of involved people, whose standing is rather dynamically decided upon by the community itself. Leaders emerge due to their respective contributions and interactions with the rest of the community. In particular, quality of code is enforced by a required consensus, where patches are only allowed into the mainline distribution of the project should responsible parties agree with it. \\
However, thanks to the permission to modify and redistribute in open source licenses, even in the case of disagreements, development is not at a standstill. Individuals can create forks of a project and drive it ahead in whatever way they see fit. Since the bulk of contributions comes from people who are doing it out of their own volition, it is also practically impossible to enforce your own ideas upon others and the project, lest you do the work yourself. \\
Despite this, the lack of a unified vision for the project can also lead to an irregularity in terms of quality and presentation. As there is no controlling agency that can ensure coherence and decide on a direction, different parts of a project can be in wildly different conditions. \\
As such, open source software vastly benefits from its fluent and free model of community involvement in that it makes it difficult for any individual's effort to go to waste, and in that it can exploit the vast number of interested parties and developers in order to create a potentially much more capable driving force than a corporation could be. However, at the same time this openness can be a detriment, as, despite the loose models of hierarchy and quality-checking, the visions of the individual contributors can differ vastly, leading to a lack of unity in design and presentation overall.
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "foss-governance"
%%% End:
| {
"alphanum_fraction": 0.804643715,
"avg_line_length": 138.7777777778,
"ext": "tex",
"hexsha": "41bc38733de9ea299239b63fb4afcfcafc18a973",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "789b6a619b75726ed7e8f65fa56bf5e79ac302e9",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "Shinmera/foss-governance",
"max_forks_repo_path": "5-conclusion.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "789b6a619b75726ed7e8f65fa56bf5e79ac302e9",
"max_issues_repo_issues_event_max_datetime": "2017-02-13T10:24:50.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-10-26T09:32:52.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "Shinmera/foss-governance",
"max_issues_repo_path": "5-conclusion.tex",
"max_line_length": 598,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "789b6a619b75726ed7e8f65fa56bf5e79ac302e9",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "Shinmera/foss-governance",
"max_stars_repo_path": "5-conclusion.tex",
"max_stars_repo_stars_event_max_datetime": "2020-12-29T11:05:04.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-10-05T14:50:05.000Z",
"num_tokens": 478,
"size": 2498
} |
%%% lorem.tex ---
%%
%% Filename: lorem.tex
%% Description:
%% Author: Ola Leifler
%% Maintainer:
%% Created: Wed Nov 10 09:59:23 2010 (CET)
%% Version: $Id$
%% Version:
%% Last-Updated: Wed Nov 10 09:59:47 2010 (CET)
%% By: Ola Leifler
%% Update #: 2
%% URL:
%% Keywords:
%% Compatibility:
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%%% Commentary:
%%
%%
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%%% Change log:
%% Completed Language Reading MBS
%%
%% RCS $Log$
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%%% Code:
\chapter{Conclusions and Future Work}
\label{cha:conclusionsandfuturework}
\section{Conclusions}
\label{sec:conclusions}
The main goal of this thesis was to design and implement tools and methods, up to the point of proof of concept and prototype demonstrations, which would increase the efficiency and quality of model-based development of complex and multi-domain cyber-physical systems.
With respect to the objective “to ensure automatic solution of dynamic optimization problems by reusing simulation models for optimization”, we have developed a model-based dynamic optimization approach by integrating optimization into the model development process. The feasibility of our approach was demonstrated by a prototype implementation that was employed in the solution of industrially-relevant optimal control problems, including a diesel engine model. While the parameter sweep static design optimization method uses many simulation runs, the dynamic optimization approach presented in this thesis uses a direct optimization of a whole solution trajectory iteratively to obtain the optimal solution with minimum computation and time. OpenModelica coupling with CasADi has shown that it’s possible to use an XML-based model exchange format for model-based dynamic optimization with state of the art optimization algorithms. The approach contributes to enabling mathematical simulation models expressed in Modelica with the Optimica language extension to be used efficiently for simulation-based optimization. The use of a language-neutral model exchange format simplifies tool interoperability and allows modelers to conduct experiments with different optimization algorithms and choose the one that is best suited for their particular problem, without the need to re-encode the problem formulation. As compared to traditional optimization frameworks, which typically require modelers to encode the model, the cost function, and the constraints in an algorithm-specific manner, the approach presented in this thesis significantly increases flexibility.
With respect to the objective “to ensure reusing and combining existing simulation models formalized by different experts in different modeling languages and tools for a unified system simulation”, we have developed a general open-source graphical and textual editor, and a co-simulation framework for composite modeling and simulation of several connected subsystems using detailed models. Several tool-specific simulation sub-models can be integrated and connected by means of a composite model, represented in XML, which defines the physical interconnections between them. The approach is based on a general external interface definition that can be implemented for many different simulation tools using the TLM method. This enables de-coupling of sub-models from the full system and allows them to be independently simulated and coupled in a numerically stable way via co-simulation techniques. Currently, most simulation tools for model-based development of cyber-physical systems are bound to a specific tool vendor. An open-source modeling and co-simulation environment for composite models will change that, since it enables integration of models defined in a specific language from many different simulation tool vendors in the design process. It has been successfully implemented and tested for several simulation tools.
With respect to the objective “to support advanced simulation modeling analysis”, we have enhanced the Python interface to simulate and access EOO Modelica models using Python objects for further simulation modeling analysis. Our tool, OMPython, is developed as a library using a standard distribution of Python and targeted to the OpenModelica modeling and simulation environment. However, general concepts can be applied to any other language and tool. In order to ensure reusability, only the standard Python libraries were used. From a modeler’s perspective, the Python interface makes scripting, plotting, and analysis of results straightforward. This gives the modeler the possibility to use EOO simulation models together with the more powerful and easier to use API and Python libraries, e.g., for tasks such as control design and post processing of simulation results.
We have also extended the list of simulator plugins for PySimulator by implementing a plugin for Wolfram’s SystemModeler. The integration of Wolfram SystemModeler simulator plugin uses the simulation result analysis tools within PySimulator. Hence, comparing simulation results of the same model generated from SystemModeler with several other tools or different versions of the same model from the SystemModeler tool can be applied. Comparing results of model simulations is very important for model portability and model evolution. This makes it possible for simulation models from SystemModeler to be safely utilized and integrated into different tools in the design process.
With respect to the objective “to ensure automatic traceability between requirements, simulation models, FMUs, and simulation results artifacts and keep track of changes and integration of product design tools with modeling and simulation tools”, we have developed a tool-supported method for multi-domain collaborative modeling and traceability support throughout the developments in CPSs. A design and implementation for seamless tracing and interoperability of lifecycle artifacts in OpenModelica, integrated with the INTO-CPS tool-chain of CPS design, has been developed based on a linked data approach. A tool interoperability approach based on the Linked data method for traceability improves the reusability of simulation models between tools in distributed collaborative development flows. Hence, system designers and analysts with expertise in different domains can effectively collaborate on the design of complex systems. The approach presented in this thesis contributes to an important step in the integration of different modeling tools that are used in the whole tool-chain of CPS design, from systems modeling down to co-simulation and test automation. This can be used to support several activities such as impact analysis, component reuse, verification, and validation.
The message format and schema for the traceability information has been standardized in order to ensure that all tools use the same format for sending their trace data. This schema, together with the use of standardized specifications and formats, allows other tool vendors to easily integrate their tools into the INTO-CPS tool-chain traceability environment. Currently, the traceability data is stored in a graph database that can be queried in order to generate various reports, such as impact analysis. Furthermore, users can easily query this database to retrieve specific information about the links between different entities, such as requirements, users, test results or models (FMUs).
\section{Future Work}
\label{sec:futurework}
Seamless traceability throughout the development life cycle, such as from the design model to simulation results, for CPSs are active research areas.
Our work on traceability came about through integration with the INTO-CPS tool-chain of CPS design.
It is our intention to explore seamless tracing of the requirements and associating them with the models and the simulation results
in the same language and with a single tool. This reduces the semantic gap in the terminology used between the requirement verification engineers and
the system modelers, which in turn simplifies the modeling effort and allows for automated combination between the requirement models.
For impact analysis, we would like to develop queries over the traceability data starting from requirement verification reports.
Ultimately, we plan to conduct larger industrial use case to assess the extent to which system modelers benefit from our framework.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% lorem.tex ends here
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "demothesis"
%%% End:
| {
"alphanum_fraction": 0.7909610984,
"avg_line_length": 121.3888888889,
"ext": "tex",
"hexsha": "ecc91c33a3063a01d0559c215d29ac0011874129",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1dce77ac23d71017c6bf728efbb910850d43e8c2",
"max_forks_repo_licenses": [
"Linux-OpenIB"
],
"max_forks_repo_name": "alash325/lictest",
"max_forks_repo_path": "conclusion_future_work.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1dce77ac23d71017c6bf728efbb910850d43e8c2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Linux-OpenIB"
],
"max_issues_repo_name": "alash325/lictest",
"max_issues_repo_path": "conclusion_future_work.tex",
"max_line_length": 1665,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1dce77ac23d71017c6bf728efbb910850d43e8c2",
"max_stars_repo_licenses": [
"Linux-OpenIB"
],
"max_stars_repo_name": "alash325/lictest",
"max_stars_repo_path": "conclusion_future_work.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1580,
"size": 8740
} |
\section{Techniques}
\begin{breakbox}
\boxtitle{Overview} \\
\includegraphics[width=.25\textwidth]{figures/ucdIssuesTechniques.png}
\end{breakbox}
\begin{breakbox}
\boxtitle{Process}
\begin{itemize}
\item
Going in direction \textbf{from Down to Up}
\item
Which strategy do you want to go for? Which personas are your target?
\item
Which features do you want to support? Explain it with future
scenarios.
\item
Once you know the target group and the features, you can think about
the structure of the app. How do I get to which feature?
\item
After the structure, we need to think about different screens and
their layout.
\item
And finally color, fonts and animations need to be defined.
\end{itemize}
\end{breakbox}
\subsection{Techniques for a good structure}
\begin{breakbox}
\boxtitle{Card Sort}
Card Sort is a useful technique to determine navigation hierarchies and
naming of menu item.
\textbf{Open Card Sort}: Start with content cards. Let future users
create groups and name them (5+ users).\\
\textbf{Closed Card Sort}: Start with content cards and group labels.
Let future users match content cards to group labels.
\end{breakbox}
\columnbreak
\begin{breakbox}
\boxtitle{Screen Map}
The ``Screen Map`` for an App lists all screens of an App, groupings and
major navigation links.
\includegraphics[width=.25\textwidth]{figures/screenMap.png}
A screen Map is not the same as a screen flow. You can see the
Screen Map as a class diagram in UML, while the Screen Flow would be a
object diagram. In the Screen Flow, all instances of each view are
visible.
\end{breakbox}
\begin{breakbox}
\boxtitle{Prototyping / Usability Testing}
\begin{itemize}
\tightlist
\item
Defining good Scenarios with plausible needs, goals, context, trigger,
persona (skill profile)
\item
Creating inexpensive/quickly the needed screen flows for testing
(paper or interactive)
\item
Creating matching task-descriptions that communicate needs, goals,
context, trigger, but not steps
\item
Inviting the right test-persons
\item
Making test persons understand that the system/concept is tested
\item
Make test persons ''Think Aloud''
\end{itemize}
\end{breakbox}
\begin{breakbox}
\boxtitle{Usability Testing - Test Tasks}
\begin{itemize}
\item Scenarios are the basis for creating screen flows and description of the test tasks.
\item Test tasks specify the users context, need, goal and trigger
\item Tasks do NOT specify specific steps that should be taken
\end{itemize}
\end{breakbox}
\begin{breakbox}
\boxtitle{Usability Testing - Test Tasks Example}
\textbf{Invited Test Person:} \\
Should be bread-lovers ... \\
\textbf{Task 1 Description (includes more, but also)} \\
This morning you finished the last piece of bread. You
made a mental note that you should remember buying
a fresh loaf of your favorite St
Galler
bread in the
evening.
Assume that you have been using the Breadwinner
App already for a while. The app is tracking you buying
patterns.
Assume that it is lunch time and before starting your
afternoon's work you check your phone. Your home
screens shows the following message. Proceed to
preorder (???) your bread for the evening. \\
\textbf{Task 1 Step 2:} \\
Assume that it is 6PM you just exited the train to
return home
-
please check your phone.
[Do you understand why it was sent?] \\
\textbf{Task 1 Step 3:} \\
You just arrived at the bakery. Please pick up (???)
your preordered (???) bread. Please also buy and pay
for the pastry that is offered as "special offer" to regular
customers.
\end{breakbox}
\begin{breakbox}
\boxtitle{Usability Testing - Mistakes}
\begin{itemize}
\tightlist
\item
Recruiting unsuitable participants
\item
Not testing early and often during the project lifecycle
\item
Following too rigid a test plan
\item
Not rehearsing your setup
\item
Using a one-way mirror
\item
Not meeting participants in reception
\item
Asking leading questions
\item
Undertaking two roles in a testing session
\item
Not considering external influences
\end{itemize}
\end{breakbox}
\subsection{Surface \& Skeleton}
\begin{breakbox}
\boxtitle{Designing for the human body / Physiology}
The screen size is getting bigger, but your finger is still the same.
The region you don't reach is getting bigger, so you need to rearrange
the things according to this fact (e.g.~menu at the bottom).
\end{breakbox}
\begin{breakbox}
\boxtitle{Designing for the human mind}
Place things there where a user expects to see something (e.g.~error
message at a reasonable place).
\end{breakbox}
\begin{breakbox}
\boxtitle{Mobile design patterns}
\begin{itemize}
\tightlist
\item
Using empty screens for the introduction into the app
\item
Coach marks to show the usable gestures or actions
\item
Try to replace dropdowns by other menus, like a switch or a slider
\item
Using skeletons to make the app feel faster?
\end{itemize}
\end{breakbox}
\hypertarget{platform-guidelines-for-android}{%
\subsection{Platform Guidelines for
Android}\label{platform-guidelines-for-android}}
\begin{breakbox}
\boxtitle{Material Design}
Material Design stands for paper form, or how it should look
like.
\end{breakbox}
\columnbreak
\begin{breakbox}
\boxtitle{Elevation hirarchy}
Elevation hirarchy means, that components are physically stacked. So some elements are over the other elements and create for instance with that shadows.
\includegraphics[width=0.2\textwidth]{figures/elevationHirarchy.png}
\end{breakbox}
\begin{breakbox}
\boxtitle{Motions}
Motions are described as well. How shold the screen move or
behave?\\
\textbf{Bottom navigation} is the new way to go.\\
More things which are described:
\begin{itemize}
\item Permissions
\item Widgets
\item About sections
\end{itemize}
\end{breakbox}
\begin{breakbox}
\boxtitle{Android Anti-Patterns}
\begin{itemize}
\tightlist
\item
The splash screen\\
better: use image placeholders
\item
The tutorial screen\\
better: explain just in time, in context
\item
The (pre-operation) confirmation window\\
better: provide undo (notification)
\item
On-screen Back button\\
If necessary provide on-screen up button
\item
Menu button (outdated)
\item
Hiding the status bar
\item
Swipe overlay quick actions
\item
Using non-Android designs
\end{itemize}
\end{breakbox}
| {
"alphanum_fraction": 0.7617181376,
"avg_line_length": 24.8210116732,
"ext": "tex",
"hexsha": "4de5d5defb275fb8c87f7b4346e9fdb2a73b9e87",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-09-15T07:10:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-09-15T07:10:24.000Z",
"max_forks_repo_head_hexsha": "cc67637785237d630f077a863edcd5f49aa52b59",
"max_forks_repo_licenses": [
"Beerware"
],
"max_forks_repo_name": "nortismo/mse-documentations",
"max_forks_repo_path": "TSM_MobOp/Design/MobileDesign2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cc67637785237d630f077a863edcd5f49aa52b59",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Beerware"
],
"max_issues_repo_name": "nortismo/mse-documentations",
"max_issues_repo_path": "TSM_MobOp/Design/MobileDesign2.tex",
"max_line_length": 153,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cc67637785237d630f077a863edcd5f49aa52b59",
"max_stars_repo_licenses": [
"Beerware"
],
"max_stars_repo_name": "nortismo/mse-documentations",
"max_stars_repo_path": "TSM_MobOp/Design/MobileDesign2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1683,
"size": 6379
} |
\chapter*{Abstract}
\addcontentsline{toc}{chapter}{Abstract}
With the rollout of robots in pedestrian-rich environments, the capability for robots to navigate among them safely, anticipative and efficiently becomes increasingly important. However, while intuitive to humans, socially-aware navigation still poses a difficult problem, mainly due to the stochastic and highly dynamic nature of human movement. Despite of the rise of the predictive capabilities of state-of-the-art, deep generative, multi-modal and probabilistic pedestrian prediction models, existing works mostly either use strong simplifying assumptions for human movement or non-interpretable methods for robot decision-making. This work combines the predictive power of deep learned prediction models and the certifiability of constrained trajectory optimization methods. Therefore, it proposes an optimization formulation to efficiently leverage generative models for trajectory optimization beyond their outputs, while guaranteeing a safe robot-human-interaction. The proposed algorithm is shown to enable anticipative, safe, but not overly conservative movements among many pedestrians.
\vspace{1cm}
\textbf{Keywords}: Human-Robot Interaction, Crowd-Navigation
| {
"alphanum_fraction": 0.8396761134,
"avg_line_length": 176.4285714286,
"ext": "tex",
"hexsha": "528af24151d50704a3041d3aa81a4073b066ca8e",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2022-03-03T10:39:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-12-09T00:03:26.000Z",
"max_forks_repo_head_hexsha": "9a2b3f32a0005cc0cb79bb78924f09da5a94587d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "StanfordASL/mantrap",
"max_forks_repo_path": "report/thesis/abstract.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9a2b3f32a0005cc0cb79bb78924f09da5a94587d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "StanfordASL/mantrap",
"max_issues_repo_path": "report/thesis/abstract.tex",
"max_line_length": 1097,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "9a2b3f32a0005cc0cb79bb78924f09da5a94587d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "simon-schaefer/mantrap",
"max_stars_repo_path": "report/thesis/abstract.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-09T02:52:48.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-11T18:13:27.000Z",
"num_tokens": 218,
"size": 1235
} |
\section{Conclusions}
\label{sec:conclusions}
In the following table we collect congruences proved in previous sections,
respect to $\equiv_{2}$ over coefficients $c_{n,k}=\frac{k+1}{n+1}{{2n-k}\choose{n-k}}\in\mathcal{C}$ :
\begin{displaymath}
\begin{split}
& \frac{1}{n+1}{{2n}\choose{n}} \equiv_{2} 1 \leftrightarrow \exists \alpha\in\mathbb{N}: n=2^{\alpha}-1 \\
& \forall\alpha\in\mathbb{N}:\frac{k+1}{2^{\alpha}}{{2^{\alpha+1}-2-k}\choose{2^{\alpha}-1-k}} \equiv_{2} 1 \\
& \forall\alpha\in\mathbb{N}:\frac{k+1}{2^{\alpha}+1}{{2^{\alpha+1}-k}\choose{2^{\alpha}-k}} \equiv_{2} 0 \leftrightarrow \exists j\in\mathbb{N}: k=2j+1 \\
& \forall\alpha\in\mathbb{N}:\frac{2^{\alpha}}{s+1}{{2s-2^{\alpha}+1}\choose{s-2^{\alpha}+1}} \equiv_{2} 0 \leftrightarrow
s\in\lbrace 2^{\alpha}, \ldots, 2^{\alpha+1}-2\rbrace \\
& \forall\alpha\in\mathbb{N}:\frac{s-2^{\alpha}+2}{s+1}{{s+2^{\alpha}-1}\choose{2^{\alpha}-1}} \equiv_{2}
\frac{2^{\alpha}}{s+1}{{2s-2^{\alpha}+1}\choose{s-2^{\alpha}+1}} \leftrightarrow
s\in\lbrace 2^{\alpha}, \ldots, 2^{\alpha+1}-2\rbrace \\
& \frac{k+1}{n+1}{{2n-k}\choose{n-k}} \equiv_{2} 0 \leftrightarrow
n \in\lbrace 2^{\alpha}, \ldots, 2^{\alpha+1} - 2\rbrace \wedge k \in \lbrace 0, \ldots, n - 2^{\alpha}\rbrace \\
\end{split}
\end{displaymath}
The following two congruences hold if and only if $\alpha\in\mathbb{N} \wedge e\in\lbrace1,\ldots,s-2^{{\alpha}}\rbrace$:
\begin{displaymath}
\begin{split}
& {{2s-e-2^{{\alpha}}+1}\choose{s-e}} - {{2s-e-2^{{\alpha}}+1}\choose{s-e+1}} \equiv_{2}
{{2s-2^{{\alpha}}+1-e}\choose{s}} - {{2s-2^{{\alpha}}+1-e}\choose{s+1}}\\
& {{2s-2^{{\alpha}}+1-e}\choose{s}} - {{2s-2^{{\alpha}}+1-e}\choose{s+1}} \equiv_{2}
{{2s-2^{{\alpha}+1}-e+1}\choose{s-2^{{\alpha}}}} - {{2s-2^{{\alpha}+1}-e+1}\choose{s-2^{{\alpha}}+1}}\\
\end{split}
\end{displaymath}
To summarize our work, we provide a study of the congruence
$\equiv_{2}$ mapped to the Catalan array $\mathcal{C}$, both a formal
approach to characterize different regions and implementations are
given. Even though array $\mathcal{P}_{\equiv_{2}}$ is very
interesting and deeply studied, is our opinion that
$\mathcal{C}_{\equiv_{2}}$ should deserve interest as well because of
its recursive flavor and connection to universal counting Catalan
numbers and combinatorial objects.
We finish pointing out directions for further studies: we would like
to understand how a generalization to congruence $\equiv_{p}$, for $p$
prime, can be proved using either a closed formula for the generic
coefficient or algebraic generating function manipulations; at last,
find a modular characterization of $\mathcal{C}_{\equiv_{2}}^{-1}$,
the inverse group element of $\mathcal{C}_{\equiv_{2}}$, depicted in
\autoref{fig:catalan-traditional-inverse-ignore-negatives-centered-colouring-127-rows-mod2-partitioning-triangle}.
\input{catalan/catalan-traditional-inverse-ignore-negatives-centered-colouring-127-rows-mod2-partitioning-include-figure.tex}
| {
"alphanum_fraction": 0.6469643431,
"avg_line_length": 62.26,
"ext": "tex",
"hexsha": "2d780341d6e4dc29470bc4f04effd229421e6714",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0d82bfcc82c92512d0795f286256a19f39b9b1f9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "massimo-nocentini/master-thesis",
"max_forks_repo_path": "modular-article/conclusions.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0d82bfcc82c92512d0795f286256a19f39b9b1f9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "massimo-nocentini/master-thesis",
"max_issues_repo_path": "modular-article/conclusions.tex",
"max_line_length": 165,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0d82bfcc82c92512d0795f286256a19f39b9b1f9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "massimo-nocentini/master-thesis",
"max_stars_repo_path": "modular-article/conclusions.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1112,
"size": 3113
} |
% This is part of the TFTB Reference Manual.
% Copyright (C) 1996 CNRS (France) and Rice University (US).
% See the file refguide.tex for copying conditions.
\markright{tfrscalo}
\section*{\hspace*{-1.6cm} tfrscalo}
\vspace*{-.4cm}
\hspace*{-1.6cm}\rule[0in]{16.5cm}{.02cm}
\vspace*{.2cm}
{\bf \large \sf Purpose}\\
\hspace*{1.5cm}
\begin{minipage}[t]{13.5cm}
Scalogram, for Morlet or Mexican hat wavelet.
\end{minipage}
\vspace*{.5cm}
{\bf \large \sf Synopsis}\\
\hspace*{1.5cm}
\begin{minipage}[t]{13.5cm}
\begin{verbatim}
[tfr,t,f,wt] = tfrscalo(x)
[tfr,t,f,wt] = tfrscalo(x,t)
[tfr,t,f,wt] = tfrscalo(x,t,wave)
[tfr,t,f,wt] = tfrscalo(x,t,wave,fmin,fmax)
[tfr,t,f,wt] = tfrscalo(x,t,wave,fmin,fmax,N)
[tfr,t,f,wt] = tfrscalo(x,t,wave,fmin,fmax,N,trace)
\end{verbatim}
\end{minipage}
\vspace*{.5cm}
{\bf \large \sf Description}\\
\hspace*{1.5cm}
\begin{minipage}[t]{13.5cm}
{\ty tfrscalo} computes the scalogram (squared magnitude of a
continuous wavelet transform). Its expression is the following\,:
\[SC_x(t,a;h)=\left|T_x(t,a;h)\right|^2=\frac{1}{|a|}\
\left|\int_{-\infty}^{+\infty} x(s)\ h^*\left(\dfrac{s-t}{a}\right)\
ds\right|^2.\] This time-scale expression has an equivalent time-frequecy
expression, obtained using the formal identification $a=\dfrac{\nu_0}{\nu}$,
where $\nu_0$ is the central frequency of the mother wavelet $h(t)$.\\
\hspace*{-.5cm}\begin{tabular*}{14cm}{p{1.5cm} p{8.5cm} c}
Name & Description & Default value\\
\hline
{\ty x} & signal to be analyzed ({\ty Nx=length(x)}). Its
analytic version is used ({\ty z=hilbert(real(x))})\\
{\ty t} & time instant(s) on which the {\ty tfr} is evaluated & {\ty (1:Nx)}\\
{\ty wave} & half length of the Morlet analyzing wavelet at coarsest
scale. If {\ty wave=0}, the Mexican hat is used
& {\ty sqrt(Nx)}\\
{\ty fmin, fmax} & respectively lower and upper frequency bounds of
the analyzed signal. These parameters fix the equivalent
frequency bandwidth (expressed in Hz). When unspecified, you
have to enter them at the command line from the plot of the
spectrum. {\ty fmin} and {\ty fmax} must be $>${\ty 0} and $\leq${\ty 0.5}\\
{\ty N} & number of analyzed voices & auto\footnote{This value,
determined from {\ty fmin} and {\ty fmax}, is the
next-power-of-two of the minimum value checking the non-overlapping
condition in the fast Mellin transform.}\\
\hline\end{tabular*}\end{minipage}
\hspace*{1.5cm}\begin{minipage}[t]{13.5cm}
\hspace*{-.5cm}\begin{tabular*}{14cm}{p{1.5cm} p{8.5cm} c}
Name & Description & Default value\\\hline
{\ty trace} & if nonzero, the progression of the algorithm is shown
& {\ty 0}\\
\hline {\ty tfr} & time-frequency matrix containing the coefficients of the
decomposition (abscissa correspond to uniformly sampled time,
and ordinates correspond to a geometrically sampled
frequency). First row of {\ty tfr} corresponds to the lowest
frequency. \\
{\ty f} & vector of normalized frequencies (geometrically sampled
from {\ty fmin} to {\ty fmax})\\
{\ty wt} & Complex matrix containing the corresponding wavelet
transform. The scalogram {\ty tfr} is the squared modulus of {\ty wt}\\
\hline
\end{tabular*}
\vspace*{.2cm}
When called without output arguments, {\ty tfrscalo} runs {\ty tfrqview}.
\end{minipage}
\vspace*{1cm}
{\bf \large \sf Example}
\begin{verbatim}
sig=altes(64,0.1,0.45);
tfrscalo(sig);
\end{verbatim}
\vspace*{.5cm}
{\bf \large \sf See Also}\\
\hspace*{1.5cm}
\begin{minipage}[t]{13.5cm}
all the {\ty tfr*} functions.
\end{minipage}
\vspace*{.5cm}
{\bf \large \sf Reference}\\
\hspace*{1.5cm}
\begin{minipage}[t]{13.5cm}
[1] O. Rioul, P. Flandrin ``Time-Scale Distributions : A General Class
Extending Wavelet Transforms'', IEEE Transactions on Signal Processing,
Vol. 40, No. 7, pp. 1746-57, July 1992.
\end{minipage}
| {
"alphanum_fraction": 0.6385512883,
"avg_line_length": 36.0877192982,
"ext": "tex",
"hexsha": "f5920cb60aba7ed66bdbc944e414d3e381136f49",
"lang": "TeX",
"max_forks_count": 21,
"max_forks_repo_forks_event_max_datetime": "2022-03-24T09:09:40.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-03-28T01:50:04.000Z",
"max_forks_repo_head_hexsha": "a3c874538a7262b895b60d3c4d493e5b34cf81f8",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "sangyoonHan/extern",
"max_forks_repo_path": "tftb/refguide/tfrscalo.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a3c874538a7262b895b60d3c4d493e5b34cf81f8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "sangyoonHan/extern",
"max_issues_repo_path": "tftb/refguide/tfrscalo.tex",
"max_line_length": 88,
"max_stars_count": 50,
"max_stars_repo_head_hexsha": "a3c874538a7262b895b60d3c4d493e5b34cf81f8",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "sangyoonHan/extern",
"max_stars_repo_path": "tftb/refguide/tfrscalo.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-01T07:24:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-03-28T01:50:19.000Z",
"num_tokens": 1347,
"size": 4114
} |
%%%% Time-stamp: <2014-03-23 13:40:59 vk>
%%%% === Disclaimer: =======================================================
%% created by
%%
%% Karl Voit
%%
%% using GNU/Linux, GNU Emacs & LaTeX 2e
%%
%doc%
%doc% \section{\texttt{pdf\_settings.tex} --- Settings related to PDF output}
%doc% \label{sec:pdf}
%doc%
%doc% The file \verb#template/pdf_settings.tex# basically contains the definitions for
%doc% the \href{http://tug.org/applications/hyperref/}{\texttt{hyperref} package}
%doc% including the
%doc% \href{http://www.ctan.org/tex-archive/macros/latex/required/graphics/}{\texttt{graphicx}
%doc% package}. Since these settings should be the last things of any \LaTeX{}
%doc% preamble, they got their own \TeX{} file which is included in \texttt{main.tex}.
%doc%
%doc% \paragraph{What should I do with this file?} The settings in this file are
%doc% important for \myacro{PDF} output and including graphics. Do not exclude the
%doc% related \texttt{input} command in \texttt{main.tex}. But you might want to
%doc% modify some settings after you read the
%doc% \href{http://tug.org/applications/hyperref/}{documentation of the \texttt{hyperref} package}.
%doc%
%% Fix positioning of images in PDF viewers. (disabled by
%% default; see https://github.com/novoid/LaTeX-KOMA-template/issues/4
%% for more information)
%% I do not have time to read about possible side-effect of this
%% package for now.
% \usepackage[hypcap]{caption}
%% Declarations of hyperref should be the last definitions of the preamble:
%% FIXXME: black-and-white-version for printing!
\pdfcompresslevel=9
\usepackage[%
unicode=true, % loads with unicode support
%a4paper=true, %
pdftex, %
backref, %
pagebackref=false, % creates backward references too
bookmarks=false, %
bookmarksopen=false, % when starting with AcrobatReader, the Bookmarkcolumn is opened
pdfpagemode=UseNone,% UseNone, UseOutlines, UseThumbs, FullScreen
plainpages=false, % correct, if pdflatex complains: ``destination with same identifier already exists''
%% colors: https://secure.wikimedia.org/wikibooks/en/wiki/LaTeX/Colors
urlcolor=DispositionColor, %%
linkcolor=DispositionColor, %%
%pagecolor=DispositionColor, %%
citecolor=DispositionColor, %%
anchorcolor=DispositionColor, %%
colorlinks=\mycolorlinks, % turn on/off colored links (on: better for
% on-screen reading; off: better for printout versions)
]{hyperref}
%% all strings need to be loaded after hyperref was loaded with unicode support
%% if not the field is garbled in the output for characters like ČŽĆŠĐ
\hypersetup{
pdftitle={\mytitle}, %
pdfauthor={\myauthor}, %
pdfsubject={\mysubject}, %
pdfcreator={Accomplished with: pdfLaTeX, biber, and hyperref-package. No animals, MS-EULA or BSA-rules were harmed.},
pdfproducer={\myauthor},
pdfkeywords={\mykeywords}
}
%\DeclareGraphicsExtensions{.pdf}
%%%% END
%%% Local Variables:
%%% TeX-master: "../main"
%%% mode: latex
%%% mode: auto-fill
%%% mode: flyspell
%%% eval: (ispell-change-dictionary "en_US")
%%% End:
%% vim:foldmethod=expr
%% vim:fde=getline(v\:lnum)=~'^%%%%'?0\:getline(v\:lnum)=~'^%doc.*\ .\\%(sub\\)\\?section{.\\+'?'>1'\:'1':
| {
"alphanum_fraction": 0.7115628971,
"avg_line_length": 37.4761904762,
"ext": "tex",
"hexsha": "26b5307c690f342899ada6d98fc031992a0997fd",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "aa39d62cb68f8721412977f03f3a0fc0ebe8644f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sigttou/db_master_thesis",
"max_forks_repo_path": "thesis/template/pdf_settings.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "aa39d62cb68f8721412977f03f3a0fc0ebe8644f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sigttou/db_master_thesis",
"max_issues_repo_path": "thesis/template/pdf_settings.tex",
"max_line_length": 117,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "aa39d62cb68f8721412977f03f3a0fc0ebe8644f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sigttou/db_master_thesis",
"max_stars_repo_path": "thesis/template/pdf_settings.tex",
"max_stars_repo_stars_event_max_datetime": "2019-09-07T09:04:22.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-09-07T09:04:22.000Z",
"num_tokens": 876,
"size": 3148
} |
% This file contains the content for a main section
\regularsectionformat % Change formatting to that of "Introduction" section
%% Modify below this line %%
\chapter{Specification}
\section{Naming conventions}
The encoding of ACES specified in \autoref{sec:ACEScg} shall be known as ACEScg.
\section{Color component value encoding}
ACEScg shall be stored as either 16-bit (IEEE binary16) or 32-bit (IEEE binary32) floating point values.
\section{Color component value range}
The value range for ACEScg color component values is [-65504.0, +65504.0].
The chromaticity coordinates of the defined ACEScg RGB primaries (AP1) form a triangle on the CIE chromaticity diagram. ACEScg RGB values which express visible colors are represented by points within this triangle that also lie within the visual gamut.
The set of valid ACEScg RGB values also includes members whose projection onto the CIE chromaticity diagram falls outside the region of the AP1 primaries. These ACEScg RGB values include those with one or more negative ACEScg color component values; Ideally these values would be preserved through any compositing operations done in ACEScg space but it is recognized that keeping negative values is not always practical, in which case it will be acceptable to replace negative values with zero.
Values well above 1.0 are expected and should not be clamped except as part of the color correction needed to produce a desired artistic intent.
\section{Color component transfer function}
The color component transfer function directly encodes relative exposure values and is defined as
\begin{center}
$R = E_r, \quad G = E_g, \quad B = E_b$
\end{center}
where $E_r$, $E_g$ and $E_b$ represent relative exposure values that would be captured from the scene by the ACES Reference Image Capture Device (RICD) and $R$, $G$ and $B$ are the resulting ACES color component values transformed to ACEScg using the methods specified in section 4.1.6.
\section{Color space chromaticities}
\label{sec:colorspace}
ACEScg uses a different set of primaries than ACES RGB primaries defined in SMPTE ST 2065-1. The CIE 1931 colorimetry of the ACEScg RGB primaries and white are specified below.
\subsection{Color primaries}
The RGB primaries chromaticity values, known as AP1, shall be those found in \autoref{table:AP1rgb}.
\begin{center}
\begin{tabularx}{4.5in}{XlllXll}
& R & G & B & & CIE x & CIE y \\ \hline
Red & 1.00000 & 0.00000 & 0.00000 & & 0.713 & 0.293 \\
Green & 0.00000 & 1.00000 & 0.00000 & & 0.165 & 0.830 \\
Blue & 0.00000 & 0.00000 & 1.00000 & & 0.128 & 0.044 \\
\end{tabularx}
\captionof{table}{ACEScg RGB primaries chromaticity values}
\label{table:AP1rgb}
\end{center}
\subsection{White Point}
The white point shall be that found in \autoref{table:AP1w}.
\begin{center}
\begin{tabularx}{4.5in}{XlllXll}
& R & G & B & & CIE x & CIE y \\ \hline
White & 1.00000 & 1.00000 & 1.00000 & & 0.32168 & 0.33767 \\
\end{tabularx}
\captionof{table}{ACES RGB white point chromaticity values}
\label{table:AP1w}
\end{center}
\note{The ACEScg white point is the same as the white point of ACES 2065-1.}
\section{ACEScg}
\label{sec:ACEScg}
The following functions shall be used to convert between ACES values, encoded according to SMPTE ST 2065-1, and ACEScg.
\subsection{Converting ACES2065-1 RGB values to ACEScg RGB values}
\label{sec:aces2acescg}
ACES $R$, $G$, and $B$ values shall be converted to ACEScg $R$, $G$, and $B$ values using the transformation matrix ($TRA$) calculated and applied using the methods provided in Section 4 of SMPTE RP 177:1993.
\note{Equation \ref{eq:aces2acescg} shows the relationship between ACES $R$, $G$, and $B$ values and ACEScg $R$, $G$, and $B$ values. $TRA_{1}$, rounded to 10 significant digits, is derived from the product of $NPM_{AP1}$ inverse and $NPM_{AP0}$ calculated using methods provided in Section 3.3 of SMPTE RP 177:1993. AP0 are the primaries of ACES specified in SMPTE ST 2065-1:2012. AP1 are the primaries of ACEScg specified in \autoref{sec:colorspace}.}
\begin{floatequ}
\begin{gather}
\begin{bmatrix}
R_{ACEScg}\\
G_{ACEScg}\\
B_{ACEScg}
\end{bmatrix}
=
TRA_{1}
\cdot
\begin{bmatrix}
R_{ACES}\\
G_{ACES}\\
B_{ACES}
\end{bmatrix} \\
\\
TRA_{1} =
\begin{bmatrix*}[r]
1.4514393161 & -0.2365107469 & -0.2149285693 \\
-0.0765537734 & 1.1762296998 & -0.0996759264 \\
0.0083161484 & -0.0060324498 & 0.9977163014 \\
\end{bmatrix*} \\
\\
TRA_{1} = NPM^{-1}_{AP1} \cdot NPM_{AP0}
\end{gather}
\caption{ACES2065-1 to ACEScg}
\label{eq:aces2acescg}
\end{floatequ}
\subsection{Converting ACEScg RGB values to ACES2065-1 RGB values}
ACEScg $R$, $G$, and $B$ values shall be converted to ACES2065-1 $R$, $G$ and $B$ using the transformation matrix ($TRA$) calculated and applied using the methods provided in Section 4 of SMPTE RP 177:1993.
\note{Equation \ref{eq:acescg2aces} shows the relationship between ACES $R$, $G$, and $B$ values and ACEScg $R$, $G$, and $B$ values. $TRA_{2}$, rounded to 10 significant digits, is derived from the product of $NPM_{AP0}$ inverse and $NPM_{AP1}$ calculated using methods provided in Section 3.3 of SMPTE RP 177:1993. AP0 are the primaries of ACES specified in SMPTE ST 2065-1:2012. AP1 are the primaries of ACEScg specified in \autoref{sec:colorspace}.}
\begin{floatequ}
\begin{gather}
\begin{bmatrix}
R_{ACES}\\
G_{ACES}\\
B_{ACES}
\end{bmatrix}
=
TRA_{2}
\cdot
\begin{bmatrix}
R_{ACEScg}\\
G_{ACEScg}\\
B_{ACEScg}
\end{bmatrix} \\
\\
TRA_{2} =
\begin{bmatrix*}[r]
0.6954522414 & 0.1406786965 & 0.1638690622 \\
0.0447945634 & 0.8596711185 & 0.0955343182 \\
-0.0055258826 & 0.0040252103 & 1.0015006723 \\
\end{bmatrix*} \\
\\
TRA_{2} = NPM^{-1}_{AP0} \cdot NPM_{AP1}
\end{gather}
\caption{ACEScg to ACES2065-1}
\label{eq:acescg2aces}
\end{floatequ} | {
"alphanum_fraction": 0.7046503958,
"avg_line_length": 46.2900763359,
"ext": "tex",
"hexsha": "330c61605c1a48e1df5d4b73a6e76c939b73fb87",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "191f7d7d711cb75390deca3aca470050946aa343",
"max_forks_repo_licenses": [
"AMPAS"
],
"max_forks_repo_name": "KevinJW/aces-dev",
"max_forks_repo_path": "documents/LaTeX/S-2014-004/specification.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "191f7d7d711cb75390deca3aca470050946aa343",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"AMPAS"
],
"max_issues_repo_name": "KevinJW/aces-dev",
"max_issues_repo_path": "documents/LaTeX/S-2014-004/specification.tex",
"max_line_length": 494,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "28f08367e192391acacfaf8d8bee66123dfad506",
"max_stars_repo_licenses": [
"AMPAS"
],
"max_stars_repo_name": "kdt3rd/aces-dev",
"max_stars_repo_path": "documents/LaTeX/S-2014-004/specification.tex",
"max_stars_repo_stars_event_max_datetime": "2017-03-26T05:44:31.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-03-26T05:44:31.000Z",
"num_tokens": 1908,
"size": 6064
} |
%!TEX root = ../main.tex
\subsection{Derivatives of Exponentials}
\objective{Describe and use the special status of $e^x$ amongst exponential equations}
As we move past power function, derivatives become a lot harder to compute. If we
begin with the premise that there is some exponential function which is its own
derivative --- that the height of the function at every point is the same as its slope
--- then we should find constant for the base. Empirically, it is easy to see that
such a number is between 2 and 3, but how can we be more precise? We
begin with the definition
$$
f'(x) = \lim_{h\rightarrow0}\frac{f(x+h)-f(x)}{h}
$$
We will use the letter $e$ next, but assume we do not know its exact value. In
the problem set, you saw that it's definition is
$$
e = \lim_{h\rightarrow\infty}\left(1+\frac{1}{h}\right)^h
$$
and so right away we see we are dealing with opposite limits. We can convert
from a limit at infinity to a limit at zero by taking the reciprocal of the variable
at every instance. This leads to a modified definition of $e$:
$$
e = \lim_{h\rightarrow0}\left(1+\cfrac{1}{\frac{1}{h}}\right)^{\frac{1}{h}}
$$
Armed wth compatible limits, let us return to the definition of a derivative.
$$
(e^x)' = \lim_{h\rightarrow0}\frac{e^{x+h}-e^x}{h}
$$
By the properties of exponents, a sum in the degree must come
from a multiplication of the bases (i.e. $e^{x+h}=e^x\cdot{}e^h$).
Factoring out $e^x$, we get
$$
(e^x)' = e^x \cdot{}\lim_{h\rightarrow0}\frac{e^h-1}{h}
$$
Will our definition of $e$ work here? Substituting it in is very messy, but cleans up
perfectly. Just evaluating the limit,
\begin{align*}
\lim_{h\rightarrow0} & \frac{\left[\left(1+\cfrac{1}{\frac{1}{h}}\right)^{\frac{1}{h}}\right]-1}{h} &\\
& \frac{1+\left(\cfrac{1}{\frac{1}{\frac{1}{h}}}\right) - 1}{h} &\\
& \frac{\cfrac{1}{\frac{1}{h}}}{h} & \Rightarrow \frac{h}{h} \\
& = 1
\end{align*}
\begin{derivation}{Derivative of $e^x$}
$$
(e^x)' = e^x
$$
\end{derivation}
\personfeature[0in]{\chapdir/pics/Charles_Hermite_circa_1901_edit}{Charles Hermite}{1822
- 1901}{was a French mathematician who did research on number theory,
quadratic forms, invariant theory, orthogonal polynomials, elliptic functions, and algebra.
He was the first to prove that $e$, the base of natural logarithms, is a transcendental number.
His methods were later used by Ferdinand von Lindemann to prove that $\pi$ is transcendental.
\href{https://en.wikipedia.org/wiki/Charles_Hermite}{Wikipedia}}
\subsection{Implications}
How does this explain the behavior of $2^x, 3^x$ or any other base? The limit portion of
the work shown above was equal to 1, but if we had substituted any other number in,
we would have obtained some constant. We can extend the definition of exponential
derivative like so:
\begin{derivation}{Derivative of $b^x$}
$$
(b^x)' = b^x \cdot{} \ln{b}
$$
\end{derivation}
Often we will need to apply the Chain Rule, since the exponent is rarely just $x$
$$
\left(e^{f(x)}\right)' = e^{f(x)} \cdot f'(x)
$$
Lastly, if $e^x$ is it's own derivative, then it is its own anti-derivative as well:
$$
\int e^xdx = e^x + C
$$
The TI-8* has an $e^x$ function (2nd-LN) and most computer programs (e.g. MS Excel)
have a function \texttt{exp()}, which is the same thing.
\begin{figure}
\begin{centering}
\includegraphics[width=\textwidth]{\chapdir/pics/exponentialderivatives}
\caption[Exponential tangent lines at (0,1)]{A set of exponential equations tangent lines at (0,1), with original function dotted. $1.4^x$ in green, $e^x$ in blue, $20^x$ in red.}
\end{centering}
\end{figure} | {
"alphanum_fraction": 0.708518931,
"avg_line_length": 39.0434782609,
"ext": "tex",
"hexsha": "0ea01ea3d75205c9a6c73e5042ad0d772d49c656",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "011c16427ada1b1e3df8e66c02566a5d5ac8abcf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "aquatiki/AnalysisTextbook",
"max_forks_repo_path": "ch08/0801.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "011c16427ada1b1e3df8e66c02566a5d5ac8abcf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "aquatiki/AnalysisTextbook",
"max_issues_repo_path": "ch08/0801.tex",
"max_line_length": 180,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "011c16427ada1b1e3df8e66c02566a5d5ac8abcf",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "aquatiki/AnalysisTextbook",
"max_stars_repo_path": "ch08/0801.tex",
"max_stars_repo_stars_event_max_datetime": "2019-07-07T12:32:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-10-08T15:05:17.000Z",
"num_tokens": 1145,
"size": 3592
} |
\hypertarget{class_language}{}\section{Language Class Reference}
\label{class_language}\index{Language@{Language}}
{\ttfamily \#include $<$language.\+h$>$}
\subsection*{Public Slots}
\begin{DoxyCompactItemize}
\item
static void \hyperlink{class_language_a1be8a8900bc2e8511f68cdadb460fa65}{set\+Language} ()
\end{DoxyCompactItemize}
\subsection*{Static Public Member Functions}
\begin{DoxyCompactItemize}
\item
static void \hyperlink{class_language_aeebc39360aef6d5fd053fd60b6ddc99a}{load\+Translations} ()
\begin{DoxyCompactList}\small\item\em \hyperlink{class_language_aeebc39360aef6d5fd053fd60b6ddc99a}{Language\+::load\+Translations} Look for available translations and load them. \end{DoxyCompactList}\item
static bool \hyperlink{class_language_a41e794bd617d77954179da57c2093412}{main\+Window\+Direction} (\hyperlink{class_q_main_window}{Q\+Main\+Window} $\ast$w)
\begin{DoxyCompactList}\small\item\em \hyperlink{class_language_a41e794bd617d77954179da57c2093412}{Language\+::main\+Window\+Direction} Sets the Layout direction to R\+TL or L\+TR. \end{DoxyCompactList}\item
static Q\+String \hyperlink{class_language_a166c93720b89d805481cd70f6fe333a2}{get\+Config\+Language} ()
\begin{DoxyCompactList}\small\item\em \hyperlink{class_language_a166c93720b89d805481cd70f6fe333a2}{Language\+::get\+Config\+Language} Get the language ID from the Config file. \end{DoxyCompactList}\item
static Q\+String \hyperlink{class_language_a8b7392bc644966ab95e5e18f3ef01c68}{get\+Current\+Language} ()
\begin{DoxyCompactList}\small\item\em \hyperlink{class_language_a8b7392bc644966ab95e5e18f3ef01c68}{Language\+::get\+Current\+Language} Get the current language ID. \end{DoxyCompactList}\item
static void \hyperlink{class_language_a95f2608b1e56df6a792b24120a800399}{set\+Config\+Language} (Q\+String lang\+ID)
\begin{DoxyCompactList}\small\item\em \hyperlink{class_language_a95f2608b1e56df6a792b24120a800399}{Language\+::set\+Config\+Language} Set the language ID to the Config file. \end{DoxyCompactList}\item
static Q\+Hash$<$ Q\+String, Q\+String $>$ \hyperlink{class_language_ac56915d59897ed05717af793b5aae7fb}{get\+Languages\+Info} ()
\begin{DoxyCompactList}\small\item\em \hyperlink{class_language_ac56915d59897ed05717af793b5aae7fb}{Language\+::get\+Languages\+Info} Get ID\textquotesingle{}s and names of available languages. \end{DoxyCompactList}\end{DoxyCompactItemize}
\subsection{Detailed Description}
This class is used to manage the localization
\subsection{Member Function Documentation}
\index{Language@{Language}!get\+Config\+Language@{get\+Config\+Language}}
\index{get\+Config\+Language@{get\+Config\+Language}!Language@{Language}}
\subsubsection[{\texorpdfstring{get\+Config\+Language()}{getConfigLanguage()}}]{\setlength{\rightskip}{0pt plus 5cm}Q\+String Language\+::get\+Config\+Language (
\begin{DoxyParamCaption}
{}
\end{DoxyParamCaption}
)\hspace{0.3cm}{\ttfamily [static]}}\hypertarget{class_language_a166c93720b89d805481cd70f6fe333a2}{}\label{class_language_a166c93720b89d805481cd70f6fe333a2}
\hyperlink{class_language_a166c93720b89d805481cd70f6fe333a2}{Language\+::get\+Config\+Language} Get the language ID from the Config file.
\begin{DoxyReturn}{Returns}
The ID of the specified language ~\newline
If the language ID is not available, it returns \char`\"{}??\char`\"{}
\end{DoxyReturn}
\index{Language@{Language}!get\+Current\+Language@{get\+Current\+Language}}
\index{get\+Current\+Language@{get\+Current\+Language}!Language@{Language}}
\subsubsection[{\texorpdfstring{get\+Current\+Language()}{getCurrentLanguage()}}]{\setlength{\rightskip}{0pt plus 5cm}Q\+String Language\+::get\+Current\+Language (
\begin{DoxyParamCaption}
{}
\end{DoxyParamCaption}
)\hspace{0.3cm}{\ttfamily [static]}}\hypertarget{class_language_a8b7392bc644966ab95e5e18f3ef01c68}{}\label{class_language_a8b7392bc644966ab95e5e18f3ef01c68}
\hyperlink{class_language_a8b7392bc644966ab95e5e18f3ef01c68}{Language\+::get\+Current\+Language} Get the current language ID.
\begin{DoxyReturn}{Returns}
The ID of the language used currently
\end{DoxyReturn}
\index{Language@{Language}!get\+Languages\+Info@{get\+Languages\+Info}}
\index{get\+Languages\+Info@{get\+Languages\+Info}!Language@{Language}}
\subsubsection[{\texorpdfstring{get\+Languages\+Info()}{getLanguagesInfo()}}]{\setlength{\rightskip}{0pt plus 5cm}Q\+Hash$<$ Q\+String, Q\+String $>$ Language\+::get\+Languages\+Info (
\begin{DoxyParamCaption}
{}
\end{DoxyParamCaption}
)\hspace{0.3cm}{\ttfamily [static]}}\hypertarget{class_language_ac56915d59897ed05717af793b5aae7fb}{}\label{class_language_ac56915d59897ed05717af793b5aae7fb}
\hyperlink{class_language_ac56915d59897ed05717af793b5aae7fb}{Language\+::get\+Languages\+Info} Get ID\textquotesingle{}s and names of available languages.
\begin{DoxyReturn}{Returns}
A Q\+Hash of language-\/\+ID as key, and language-\/name as value. ~\newline
Example\+: $<$en, English$>$, $<$ar, Arabic$>$, etc.
\end{DoxyReturn}
\index{Language@{Language}!load\+Translations@{load\+Translations}}
\index{load\+Translations@{load\+Translations}!Language@{Language}}
\subsubsection[{\texorpdfstring{load\+Translations()}{loadTranslations()}}]{\setlength{\rightskip}{0pt plus 5cm}void Language\+::load\+Translations (
\begin{DoxyParamCaption}
{}
\end{DoxyParamCaption}
)\hspace{0.3cm}{\ttfamily [static]}}\hypertarget{class_language_aeebc39360aef6d5fd053fd60b6ddc99a}{}\label{class_language_aeebc39360aef6d5fd053fd60b6ddc99a}
\hyperlink{class_language_aeebc39360aef6d5fd053fd60b6ddc99a}{Language\+::load\+Translations} Look for available translations and load them.
It looks for available languages in an X\+ML file, containing id, name, location. Then, it loads the languages (translations) from their locations. It creates, also, the Q\+Hash$<$\+Q\+String, Q\+String$>$ languages\+Info that can be returned using the function \hyperlink{class_language_ac56915d59897ed05717af793b5aae7fb}{Language\+::get\+Languages\+Info} \index{Language@{Language}!main\+Window\+Direction@{main\+Window\+Direction}}
\index{main\+Window\+Direction@{main\+Window\+Direction}!Language@{Language}}
\subsubsection[{\texorpdfstring{main\+Window\+Direction(\+Q\+Main\+Window $\ast$w)}{mainWindowDirection(QMainWindow *w)}}]{\setlength{\rightskip}{0pt plus 5cm}bool Language\+::main\+Window\+Direction (
\begin{DoxyParamCaption}
\item[{{\bf Q\+Main\+Window} $\ast$}]{w}
\end{DoxyParamCaption}
)\hspace{0.3cm}{\ttfamily [static]}}\hypertarget{class_language_a41e794bd617d77954179da57c2093412}{}\label{class_language_a41e794bd617d77954179da57c2093412}
\hyperlink{class_language_a41e794bd617d77954179da57c2093412}{Language\+::main\+Window\+Direction} Sets the Layout direction to R\+TL or L\+TR.
Sets the direction of a given \hyperlink{class_q_main_window}{Q\+Main\+Window} to R\+TL or L\+TR according to current local
\begin{DoxyItemize}
\item R\+TL\+: If the local first two characters are in\+: ar, dv, ha, he, fa, ps, ur, yi
\item L\+TR\+: Otherwise
\begin{DoxyParams}{Parameters}
{\em w} & A \hyperlink{class_q_main_window}{Q\+Main\+Window} which we want to set the direction. \\
\hline
\end{DoxyParams}
\begin{DoxyReturn}{Returns}
true if rtl, false if ltr
\end{DoxyReturn}
\end{DoxyItemize}\index{Language@{Language}!set\+Config\+Language@{set\+Config\+Language}}
\index{set\+Config\+Language@{set\+Config\+Language}!Language@{Language}}
\subsubsection[{\texorpdfstring{set\+Config\+Language(\+Q\+String lang\+I\+D)}{setConfigLanguage(QString langID)}}]{\setlength{\rightskip}{0pt plus 5cm}void Language\+::set\+Config\+Language (
\begin{DoxyParamCaption}
\item[{Q\+String}]{lang\+ID}
\end{DoxyParamCaption}
)\hspace{0.3cm}{\ttfamily [static]}}\hypertarget{class_language_a95f2608b1e56df6a792b24120a800399}{}\label{class_language_a95f2608b1e56df6a792b24120a800399}
\hyperlink{class_language_a95f2608b1e56df6a792b24120a800399}{Language\+::set\+Config\+Language} Set the language ID to the Config file.
\begin{DoxyParams}{Parameters}
{\em lang\+ID} & \\
\hline
\end{DoxyParams}
\index{Language@{Language}!set\+Language@{set\+Language}}
\index{set\+Language@{set\+Language}!Language@{Language}}
\subsubsection[{\texorpdfstring{set\+Language}{setLanguage}}]{\setlength{\rightskip}{0pt plus 5cm}static void Language\+::set\+Language (
\begin{DoxyParamCaption}
{}
\end{DoxyParamCaption}
)\hspace{0.3cm}{\ttfamily [static]}, {\ttfamily [slot]}}\hypertarget{class_language_a1be8a8900bc2e8511f68cdadb460fa65}{}\label{class_language_a1be8a8900bc2e8511f68cdadb460fa65}
The documentation for this class was generated from the following files\+:\begin{DoxyCompactItemize}
\item
jpconj/\+Ui\+Helper/\hyperlink{language_8h}{language.\+h}\item
jpconj/\+Ui\+Helper/\hyperlink{language_8cpp}{language.\+cpp}\end{DoxyCompactItemize}
| {
"alphanum_fraction": 0.7876563038,
"avg_line_length": 62.2642857143,
"ext": "tex",
"hexsha": "2d2d7bd1380aec7fb78ceb82101a8455438694ec",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2017-08-05T10:56:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-08-05T10:56:53.000Z",
"max_forks_repo_head_hexsha": "34321a1bd09df4e2d6dc576f424b7006afdf570d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "anqafalak/japkatsuyou",
"max_forks_repo_path": "docs/latex.jpconj/class_language.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "34321a1bd09df4e2d6dc576f424b7006afdf570d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "anqafalak/japkatsuyou",
"max_issues_repo_path": "docs/latex.jpconj/class_language.tex",
"max_line_length": 434,
"max_stars_count": 9,
"max_stars_repo_head_hexsha": "34321a1bd09df4e2d6dc576f424b7006afdf570d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "anqafalak/japkatsuyou",
"max_stars_repo_path": "docs/latex.jpconj/class_language.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-16T01:41:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-03-15T06:45:22.000Z",
"num_tokens": 3020,
"size": 8717
} |
% Created 2014-08-28 Thu 17:51
\documentclass[presentation]{beamer}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{fixltx2e}
\usepackage{graphicx}
\usepackage{longtable}
\usepackage{float}
\usepackage{wrapfig}
\usepackage{rotating}
\usepackage[normalem]{ulem}
\usepackage{amsmath}
\usepackage{textcomp}
\usepackage{marvosym}
\usepackage{wasysym}
\usepackage{amssymb}
\usepackage{hyperref}
\tolerance=1000
\hypersetup{pdfauthor="Vasilij Schneidermann", pdftitle="Emacs Lisp or Why Emacs' Extension Language Is Worth Another Look", colorlinks, linkcolor=black, urlcolor=blue}
\usetheme{Rochester}
\usecolortheme[RGB={87,83,170}]{structure}
\author{Vasilij Schneidermann}
\date{August 24, 2014}
\title{Emacs Lisp or Why Emacs' Extension Language Is Worth Another Look}
\hypersetup{
pdfkeywords={},
pdfsubject={},
pdfcreator={Emacs 24.3.1 (Org mode 8.2.7c)}}
\begin{document}
\maketitle
\begin{frame}{Outline}
\tableofcontents
\end{frame}
\AtBeginSection{\frame{\sectionpage}}
\section{Introduction}
\label{sec-1}
\begin{frame}[label=sec-1-1]{Speaker}
\begin{itemize}
\item Vasilij Schneidermann, 22
\item Information systems student
\item Working at bevuta IT, Cologne
\item \texttt{[email protected]}
\item \url{https://github.com/wasamasa}
\end{itemize}
\end{frame}
\begin{frame}[label=sec-1-2]{Preliminary notes}
\begin{itemize}
\item Pretty subjective at times
\item Prepare for dogfooding
\end{itemize}
\end{frame}
\begin{frame}[label=sec-1-3]{What this talk will be about}
\begin{itemize}
\item Emacs features
\item Demonstrations of what Emacs can do
\item The community
\end{itemize}
\end{frame}
\begin{frame}[label=sec-1-4]{What this talk will not be about}
\begin{itemize}
\item Teaching you how to use Emacs
\item Editor wars
\end{itemize}
\end{frame}
\section{How I got started with Emacs and Emacs Lisp}
\label{sec-2}
\begin{frame}[label=sec-2-1]{How I got started with Emacs and Emacs Lisp}
\begin{itemize}
\item Started out with switching text editors constantly
\item Became curious, learned Vim
\item Wanted more, tried Emacs
\item Stuck with Emacs, didn't want to learn Emacs Lisp at first
\item Curiosity took over, read sources of small packages
\item Learned to prefer reading source over docs
\item Small fixes at first, wrote own packages later
\item Eventually dug in deep enough to hold a talk about it
\end{itemize}
\end{frame}
\section{Why I didn't want to learn Emacs Lisp at first}
\label{sec-3}
\begin{frame}[label=sec-3-1]{It's a Lisp, Lisps are functional languages!}
\begin{itemize}
\item Lisp doesn't mean it's a functional language
\item Emacs Lisp itself is rather procedural
\item \href{https://github.com/magnars/dash.el}{dash.el} helps if you want it to be more functional
\end{itemize}
\end{frame}
\begin{frame}[label=sec-3-2]{It's a Lisp, therefore it must be useless!}
\begin{itemize}
\item Emacs is (probably) the largest open Lisp project out there
\item There's a few thousand packages one can install
\end{itemize}
\end{frame}
\begin{frame}[label=sec-3-3]{So, there must be nothing useful left to write anymore!}
\begin{itemize}
\item There's more than enough things lacking
\item Add your own ideas and you'll have something useful to write
\end{itemize}
\end{frame}
\begin{frame}[label=sec-3-4]{I want to learn a real Lisp first!}
\begin{itemize}
\item It is a real Lisp and a good starting point
\item If you can't decide which one to go for, learn it first, then
proceed depending on how much you like it
\end{itemize}
\end{frame}
\begin{frame}[label=sec-3-5]{I don't want to learn a completely different language just to customize a text editor!}
\begin{itemize}
\item Starting out is very simple
\item Transition to more complex code is gradual
\end{itemize}
\end{frame}
\begin{frame}[label=sec-3-6]{The existing tutorials and the manual are too intimidating, I want something more approachable!}
\begin{itemize}
\item Introduction to reading code and customization:
\url{http://sachachua.com/blog/series/read-lisp-tweak-emacs/}
\item Minimal tutorial, REPL-centric:
\url{http://bzg.fr/learn-emacs-lisp-in-15-minutes.html}
\item More traditional introduction to concepts:
\url{http://harryrschwartz.com/2014/04/08/an-introduction-to-emacs-lisp.html}
\item Exactly what it says on the tin:
\url{http://steve-yegge.blogspot.com/2008/01/emergency-elisp.html}
\end{itemize}
\end{frame}
\section{History}
\label{sec-4}
\begin{frame}[label=sec-4-1]{History}
\begin{itemize}
\item RMS disliked Unix, had the idea to create a completely free OS
\item He started writing his own compiler, didn't like Vi
\item He started writing an extensible editor that was able to do more than a
mere text editor would
\item He chose Lisp as the extension language everything apart the
fundamentals would be implemented in
\item He also made it free to distribute and added a clause that people
had to contribute improvements back, way before they were using DVCS
\item Later development moved from the cathedral to the bazaar style
\end{itemize}
\end{frame}
\section{Strengths}
\label{sec-5}
\begin{frame}[label=sec-5-1]{Rich runtime}
\begin{itemize}
\item Lots of Emacs Lisp tooling
\item Serialization/Unserialization of XML, HTML, JSON
\item Datetime/Calendar, Color, Unmarshaling
\item File handling, recoding
\item Numerical analysis, graphing
\item Parsers, DBus, Terminal Emulation
\item Wrappers for Mail, IRC, Printing, VCS, GPG, \ldots{}
\item Network processes and access/requests
\item Process control
\item \ldots{}
\end{itemize}
\end{frame}
\begin{frame}[label=sec-5-2]{Event-driven}
\begin{itemize}
\item Color selection with mouse (vivid-rodent.el)
\end{itemize}
\end{frame}
\begin{frame}[label=sec-5-3]{Event loop}
\begin{itemize}
\item Play back frames with timeout, control playback (flipbook.el)
\end{itemize}
\end{frame}
\begin{frame}[label=sec-5-4]{Buffers are powerful}
\begin{itemize}
\item State visualization (svg-2048.el, svg-2048-animation-demo.el)
\end{itemize}
\end{frame}
\begin{frame}[label=sec-5-5]{Complex UI is possible}
\begin{itemize}
\item Trigger evaluation in different buffer with keyboard input (dial.el)
\item Magit and \href{https://github.com/mickeynp/makey}{makey}, org-export UI
\end{itemize}
\end{frame}
\begin{frame}[label=sec-5-6]{More productivity}
\begin{itemize}
\item Access often used functionality in a simpler way (helm-fkeys.el)
\end{itemize}
\end{frame}
\begin{frame}[label=sec-5-7]{Better workflow}
\begin{itemize}
\item Switch window configurations in a simpler way (eyebrowse)
\end{itemize}
\end{frame}
\begin{frame}[label=sec-5-8]{Immediate feedback loop}
\begin{itemize}
\item \emph{commence fixing/writing code to make a more practical point}
(svg-2048.el)
\end{itemize}
\end{frame}
\section{Weaknesses}
\label{sec-6}
\begin{frame}[label=sec-6-1]{No APIs / Crufty APIs}
\begin{itemize}
\item Very little or weird abstraction
\end{itemize}
\end{frame}
\begin{frame}[label=sec-6-2]{Speed}
\begin{itemize}
\item Need to escape to external processes / FFI
\item Byte-compilation helps a bit (with macros)
\end{itemize}
\end{frame}
\begin{frame}[label=sec-6-3]{Historical mistakes}
\begin{itemize}
\item The C codebase is scary
\item Complexity of the display engine
\item No namespaces
\item BZR
\item Weird naming conventions
\end{itemize}
\end{frame}
\begin{frame}[label=sec-6-4]{There's still a lot to be fixed}
\includegraphics[width=.9\linewidth]{./images/fixed.jpg}
\end{frame}
\section{What do?}
\label{sec-7}
\begin{frame}[label=sec-7-1]{Programmers}
\begin{itemize}
\item Join the Mailing List, hang out on \emph{\#emacs} at Freenode
\item Improve your Emacs Lisp skills
\item Understand existing code, discuss and question it
\item Write demos to find better approaches to a problem
\end{itemize}
\end{frame}
\begin{frame}[label=sec-7-2]{Designers \& Writers}
“Design is about pulling things apart.” - Rich Hickey
\begin{itemize}
\item \href{https://github.com/chrisdone/structured-haskell-mode}{Gifcasts}
\item Clearer documentation
\item Suggest (UI) ideas, discuss them
\item Devise APIs and better abstractions
\end{itemize}
\end{frame}
\begin{frame}[label=sec-7-3]{Rewrite proponents}
See \href{http://www.emacswiki.org/emacs/GuileEmacs}{Guile} \href{http://git.hcoop.net/?p=bpt/emacs.git}{Emacs}
\end{frame}
\begin{frame}[label=sec-7-4]{Possible stuff to hack on}
\begin{itemize}
\item A “native” torrent client
\item Guile Emacs and things using Guile bindings (graphical browser,
video player, OpenGL, \ldots{})
\item dired
\item Window management library
\item Input methods
\item helm
\item dash.el, s.el, f.el, b.el, \ldots{}
\item my stuff
\item other people's stuff (see next slide)
\end{itemize}
\end{frame}
\begin{frame}[label=sec-7-5]{Hackers to collaborate with}
\begin{itemize}
\item \href{https://github.com/Fuco1}{Fuco1}
\item \href{https://github.com/magnars}{magnars}
\item \href{https://github.com/skeeto}{skeeto}
\item \href{https://github.com/chrisdone}{chrisdone}
\item \href{https://github.com/purcell}{purcell}
\item \href{https://github.com/thierryvolpiatto}{thierryvolpiatto}
\item \href{https://github.com/bbatsov}{bbatsov}
\item \href{https://github.com/technomancy}{technomancy}
\item \href{https://github.com/dgutov}{dgutov}
\item \ldots{}
\end{itemize}
\end{frame}
\begin{frame}[label=sec-7-6]{Conclusion}
\begin{itemize}
\item Emacs is pretty cool
\item You should totally learn to mold it to your likings
\item If you do, help out while you're at it
\item There's more than enough to be fixed
\end{itemize}
\end{frame}
\begin{frame}[label=sec-7-7]{Questions?}
“<technomancy> not making sense never stopped an intrepid elisper!”
\end{frame}
% Emacs 24.3.1 (Org mode 8.2.7c)
\end{document} | {
"alphanum_fraction": 0.7591255929,
"avg_line_length": 30.1180124224,
"ext": "tex",
"hexsha": "0cd21339c1190676faa1c370a9f3ddd9afdad544",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-04-19T03:31:18.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-04-19T03:31:18.000Z",
"max_forks_repo_head_hexsha": "fe87e348661b01e4a511e0bc8a8642121ed9cf9f",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "wasamasa/quasiconf-2014",
"max_forks_repo_path": "presentation.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fe87e348661b01e4a511e0bc8a8642121ed9cf9f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "wasamasa/quasiconf-2014",
"max_issues_repo_path": "presentation.tex",
"max_line_length": 168,
"max_stars_count": 8,
"max_stars_repo_head_hexsha": "fe87e348661b01e4a511e0bc8a8642121ed9cf9f",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "wasamasa/quasiconf-2014",
"max_stars_repo_path": "presentation.tex",
"max_stars_repo_stars_event_max_datetime": "2020-04-19T03:31:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-03-06T18:25:32.000Z",
"num_tokens": 2967,
"size": 9698
} |
%
% File acl2019.tex
%
%% Based on the style files for ACL 2018, NAACL 2018/19, which were
%% Based on the style files for ACL-2015, with some improvements
%% taken from the NAACL-2016 style
%% Based on the style files for ACL-2014, which were, in turn,
%% based on ACL-2013, ACL-2012, ACL-2011, ACL-2010, ACL-IJCNLP-2009,
%% EACL-2009, IJCNLP-2008...
%% Based on the style files for EACL 2006 by
%%[email protected] or [email protected]
%% and that of ACL 08 by Joakim Nivre and Noah Smith
\documentclass[11pt,a4paper]{article}
\usepackage[hyperref]{acl2019}
\usepackage{times}
\usepackage{latexsym}
\usepackage{url}
\aclfinalcopy % Uncomment this line for the final submission
%\def\aclpaperid{***} % Enter the acl Paper ID here
%\setlength\titlebox{5cm}
% You can expand the titlebox if you need extra space
% to show all the authors. Please do not make the titlebox
% smaller than 5cm (the original size); we will check this
% in the camera-ready version and ask you to change it back.
\newcommand\BibTeX{B\textsc{ib}\TeX}
\title{Literature Review - Document Summarization}
\author{Haojun Li \\
\texttt{[email protected]}\\
\texttt{Department of Computer Science, Stanford University}
}
\date{}
\begin{document}
\maketitle
\section{Introduction/Task Definition}
The document summarization task is defined as compressing and summarizing a long article into a short paragraph of several sentences. There are currently 2 main ways of achieving document summarization, extractive and abstractive summary. Extractive summary casts this problem as a binary classification problem where the model aims to predict whether a source sentence will be part of the summary. Predicting the membership of source sentences in summary has achieved great results as most sentences that are part of the summary either directly came from the source document or is a paraphrase of a sentence that is part of the source document with minimal modification. The main problem with extractive summary methods is that it tends to produce sentences that are not coherent. Since they restrict the summary sentences to be directly extracted from the source document, it limits the coherency of the summary. However, this limitation has proven to be beneficial comparing to abstractive summary methods. Abstract summary aims to improve extractive methods in that it not only select the most relevant details from the source sentence, but also generates new tokens that is not present in the source document. Thus, abstractive methods could "paraphrase" sentences found in the source document and form a more coherent summary. Earlier works on abstractive summary cast this problem as a machine translation problem, where given a source document the model should be able to "translate" the source document into a succinct summary. However, as multiple author soon realized, this does not necessarily improves extractive methods since the model sometimes generates repeated paraphrasing, and replaces words present in the source document with related but ultimately incorrect phrases. Thus, extractive methods are still achieving better results than any current state-of-the-art abstractive methods.
The most common method of evaluation is the ROGUE scores (ROGUE-1, ROGUE-2, ROGUE-L) where it calculates the unigram/bigram/longest common sequence overlap between the gold summary and the generated summary. However, as with QA and MT task's BLEU score, this metric does not capture key metrics such as coherency and gramaticallity. Thus, these metrics heavily biases against abstractive methods since abstractive summary tends to produce more "novel" words in a more coherent manner.
The dataset that is commonly used for this task is the CNN/DM dataset and the NYT dataset, both are very rich datasets. The CNN/DM dataset consists of stories collected from CNN and Daily Mail news articles with several bullet points that "highlights" the news articles. The NYT dataset consists of stories collected from New York Times with a short summary written by librarians. These dataset also have the problem that most sentences in the summary are copied from the source text with minimum modification, which again biases against abstractive methods.
\section{Summary of Articles}
In this literature review I will summarize 6 articles, 4 of which are major contributions to abstract text summarization task and the remaining 2 are recent works in extractive summarization tasks. I intend to focus my project on the abstractive summary task while borrowing ideas from extractive summarization tasks.
\subsection{Seq-seq model}
The first major contribution in abstractive text summarization using neural methods comes from IBM Watson team \cite{lead}. Following the advancements in neural machine translation (NMT) systems with sequence-to-sequence models, this paper describes a way to apply these models to the text summarization task with several improvements. The main idea is that they first used an off-the-shelf attentional encoder-decoder RNN, and it turns out that it already out perform the current state-of-the-art systems. This will be used as their baseline model which they will improve on. Details of encoder-decoder RNN models are fairly well known so will not be described here (and it is also not described in any of the papers). They introduced several improvements listed below
\begin{enumerate}
\item They restricted the output vocabulary to only contain words present in the source document in an attempt to limit the computational capacity required, and turns out this works fairly well since most words in the summary are present in the source document at some position.
\item They introduced several new embeddings in addition to word embeddings (which they used word2vec). These embeddings corresponds to part-of-speech, named-entity-tags, and TF-IDF statistics of the words. They then concatenated these embeddings into a single embedding for each word before feeding it into the encoder bi-directional LSTM. The decoder uni-directional LSTM will produce words one by one while attending to all source document positions. This allows the decoder to not only take into account the state of each output word conditioned on all previous words, but also attend to all source sentences and use those hidden states to inform the decoder's decision for the next word.
\item To solve the well-known OOV (Out Of Vocabulary) issue, they provided a "switch" mechanism, which is activated when the model predicts the UNK token, and the generator will calculate a "pointer" distribution over all source sentences to pick the best word to replace this UNK token with. This allows the model to produce rare words as well as named entities such as names. To train the model, they provide the explicit pointer position when the model predicts an UNK, and the explicit pointer points to the first occurrence of the gold word.
\item They also added hierarchical attention mechanism in which they have attentions at word-level and sentence-level informing the decision of the decoder.
\end{enumerate}
They achieved great results with this model, but it require a lot of computational resources and the summary outputs are not always ideal. They noted that even though the words may appear to have high overlap with the gold summaries, it tends to not be factually correct and it also tends to repeat itself. Several papers after this one attempt to solve these problems with more advanced techniques, but it turns out that none of the papers are able to beat the extractive methods in terms of ROGUE score metrics. Again, the higher ROGUE scores does not necessarily mean a model is better.
\subsubsection{Compare and Contrast}
This is the founding article in which they introduced several new methods using neural networks for this summary generation task. Most other papers will use the results in this paper as a baseline model. Although the results achieved in this paper is surpassed many times over by later papers, the methods used in this paper is still significantly influential in later papers. They also achieved state-of-the-art results on both datasets.
\subsection{Pointer Genreator}
A year after the previous paper, a new challenger emerges with a pointer generator network \cite{pointer-generator}. The authors have noted that there are many existing undesirable behaviors such as factually inaccurate details, inability to deal with OOV, and repeating themselves. The authors proposed a new pointer-generator model in which, at each position of the output, they calculate an additional "generation probability" which is then used to bias the attention distribution of all source positions. This generation probability essentially serves as a soft "switch" at every position of the output, comparing to the hard switch only at OOV presented in the previous paper. The authors also proposed reusing the attention distribution calculated by the encoder with the generation probability, which also improved the results.
To solve the repetition problem, the authors proposed a coverage mechanism commonly seen in machine translation systems where they calculate a "coverage loss" aimed to penalize repeated attention to the same locations. This effectively reduced repetitions.
Note that their model took 3 days and 4 hours to train on a Tesla GPU.
\subsubsection{Compare and Contrast}
Unlike the previous paper, they do not have pre-trained embeddings and they train their embeddings from scratch. The reason why is beyond me. During test time, they used beam search, another common decoding mechanism, to find the best decoding sentences with the highest likelihood. This method alone could have improved previous systems but they did not have ablation studies on how beam search is able to improve results. The results of this model is significantly higher than that presented in the previous paper, but they are still unable to beat the metrics from extractive methods. They noted the reason is due to the inflexibility of ROGUE scores, which, as I have described before, favors extractive methods due to the high overlap between summary sentences and source sentences.
The author also noted that since they could copy from the source text at any position of the output due to their soft switch mechanism presented in the previous paper, their system has a lower degree of abstraction. In particular, their system copies whol article sentences 35\% of the time while reference summaries do so only 1.3\% of the time. They realized that their model learned to copy instead of generate due to the fact that copying can easily reduce the loss significantly than generating new words.
A new addition to this paper is the unanonymized version of the dataset. Previous papers which operates on the CNN/DM dataset will first use entity extractors to remove all entities from the document and replace them with tags. The author of this paper believes that learning to recognize entities is also an important objective of this task, so they propose working on the unanonymized dataset. They achieved state-of-the-art scores on both datasets.
\subsection{Bottom-Up Attention}
A year after the previous paper, several students from Harvard \cite{bottom-up} made significant improvements on the pointer generator model presented above with a simple content selector. They frame the selection problem as a sequence-tagging problem, identifying tokens from a document that are part of its summary. Then, they experimented to either use a hard mask to mask out sentences and words that the content selector deem unimportant according to some tunable threshold, or multiply that probability into the attention distribution to effectively "bias" the distribution of the pointer generator network to favor words that the content selector deem worthy. This approach can be seen as one of the more successful approaches to combine extractive methods and abstractive methods. Here, they have a content selector that essentially "extracts" words that seems important and should be present in the summary, while a pointer-generator network then ultimately decides whether the word should be included in the summary.
\subsubsection{Compare and Contrast}
Unlike the previous paper, the author proposed a 2-step method instead of end-to-end training. The reasoning behind this argument is that when human summarizes texts they also look at what to include first and then paraphrases the sentences that are important, instead of generating sentences as you read through the document. However, the author proposed several end-to-end alternatives in which they jointly train the content selector and pointer-generator network with a soft mask as described above.
The author of this paper also added 2 penalty terms to force the model to produce better results.
\begin{enumerate}
\item They added a length penalty term which penalizes shorter sentences and prompt the model to produce longer sentences. They additionally set a minimum length based on the training data.
\item They added a summary-specific coverage penalty different from the penalty term from the previous paper. Again, they do not have ablation studies on how well this change alone could have benefited their model
\end{enumerate}
As with previous paper, they also evaluated on how abstractive the model is. It turns out that the model is mostly extracting sentences and only occasionally generating novel words and paraphrasing. This is in line with the analysis from the previous paper since the ROGUE score heavily biases against abstractive methods that generates new words since gold summaries also copies a lot from the source text.
They achieved state-of-the-art results on these datasets.
\subsection{DCA}
In a different domain, several advances was made in the deep reinforcement learning areas and several people see it as a way to improve current extractive summary models. During the last 3 years, multiple papers have came out that combines the MLE-based models with reinforcement learning methods to achieve better results through conditioning agents with better reward systems instead of just simply minimizing a loss function. DCA is one such agents \cite{dca}. The key idea of the model is to divide the hard task of encoding a long text into several smaller tasks of summarizing paragraphs by several agents. Then, these agents will "communicate" with each other by sharing their hidden states of their summary with other agents. This communication goes through multiple layers. Essentially, agents are able to summarize their own subsection of the text and "talk" to other agents about their own summarization. This method is similar to the hierarchical attention structure proposed by \cite{lead}. Lastly, they will produce hidden states for the decoder to use, and the decoder will attend to the agent attention states. Additionally, they allow the decoder to copy words from different paragraphs by calculating a generation probability similar to \cite{pointer-generator}, but this attention is across multiple agents which attends to the agent hidden states.
The main contribution of this paper is that it formulates this problem as a reinforcement learning problem. Thus, they can better tune the objective function. Instead of simply doing MLE learning, thus turning their model into a hierarchical attention model, they introduced reinforcement learning objectives such as cosine distance between two consecutively generated sentences, reinforcement learning loss to promote exploration vs exploitation, and combining these losses with the MLE loss during training. They also introduced intermediate rewards to promote the agents to produce sentences that maximizes the ROGUE score.
In their experiments, 3 agents performs the best comparing to 2 or 5 agents and they achieved state of the art results on the datasets. Since multiple researchers have realized that ROGUE score does not mean better models, they added human evaluations and achieved much better results from human evaluators.
\subsection{Compare and Contrast}
Comparing to 3 previous papers, this paper introduces the novel method of "communicating agents" which allows them to formulate this problem as an RL problem. The introduction of RL loss and reward system allows them to produce results that are better than MLE based methods. However, these methods are still unable to achieve better results when comparing to extractive summary methods in terms of ROGUE scores, but they are able to produce better summaries according to "human evaluations".
Another interesting point here is that this method is essentially the same as hierarchical attention structure proposed by multiple papers before, and I believe that those papers can also benefit from reinterpreting their model and cast them as an RL problem. Thus, introducing these RL-loss and reward systems might be able to improve those models as well.
Like the pointer generator model, the authors of this paper also uses beam search, but with a depth of 5 instead of 4. This is an indication that computation power has increased throughout the years that allows these authors to have better models simply because they have access to better computational resources. Thus, the achievements of the models might not be due to the models themselves but rather due to the ability to efficiently train and test them.
\subsection{Hierarchical Extractive Model}
As described above, even the current state-of-the-art abstract summarization model is unable to beat the extractive summarization models due to the fact that most sentences that are present in the summary are also present in the source document. Thus, it is worth summarizing papers in the extractive summarization space. The current best performing model before BERT (Introduced by a team at Google \cite{bert}) is a hierarchical structured self-attentive model (HSSAS) \cite{al2018hierarchical}. In this model, they proposed a simple hierarchical structured self-attention mechanism to create the words, sentence, and document embeddings. Then, they cast this problem as a classification task in which the model computes the respective probabilities of sentence-summary membership. This way of framing the problem is standard in extractive document summarization methods.
Sentence-level attention is similar to the abstractive methods described above, in which sentence level hidden states from LSTM or GRU units are used to calculate an attention distribution over all the sentences. These attention states can be interpreted as "relatedness" between sentences, and thus inform the next layer or the decoder of whether the sentence should be included in the summary or not. LSTM and GRU are notoriously hard in remembering longer-term information. Thus, the author introduced word level self-attention which allows the model to use an weighted average of all previous states as extra input to the recurrent unit, which allows the recurrent unit to effectively "access" information from several positions away instead of remembering them. The author argued that this is more similar to how human understand document, where we tend to refer back to what we read several positions away in order to write the next word.
\subsubsection{Compare and Contrast}
The model is not so different from the abstractive models that we have seen so far. For example, the sentence level attention is used in all previous abstractive papers to either calculate a coverage penalty in pointer-generator network \cite{pointer-generator} or used as during content selection in bottom-up summarization \cite{bottom-up}. However, the main contribution of this paper is that the attended hidden output of the word-level embeddings is used both in the final classification problem as well as input to a sentence-level LSTM recurrent layer. The output of the sentence-level LSTM recurrent layer is then used in the final classification problem as well. Thus, to determine whether the model should select a sentence or not, it will take into account not only the word-attended sentence embedding themselves, but also inter-sentence relations through sentence LSTM. This model has the current state-of-the-art ROGUE scores due to the fact that it is able to correctly identify the sentences that should be in the summary.
One key problem with extractive summary is that the summary written by human are not necessarily extractive for obvious reasons. To solve this problem, the authors have to "discretize" the summary by generating a gold label for each sentence based on the summary. The most common way of doing this is to greedily maximize the ROGUE score of the "gold" sentences. Essentially, the authors find the rogue score of all the sentences in the source document against the summary written by humans, and select the top 3 sentences that have the highest ROGUE scores. Then, this problem becomes a simple classification task in identifying those sentences. However, there are obvious limitations to this approach due to the fact that there are inherent differences between paraphrased sentences in the summary vs the source sentences, which abstractive summary is trying (and failing) to solve.
\subsection{Fine-tuned BERT}
Due to the recent advances with BERT \cite{bert}, which is a large pretrained contextual embedding using large scale high dimensional LSTM with self-attention, it has dramatically improved many NLP tasks including Machine Translation, Sentiment analysis, and text classification. It is no surprise that this monstrosity is being used in document summarization task as well. Yang is able to modify BERT and produce much better results in extractive summarization than previous authors \cite{bert-sum}. The major contribution of this short paper is simply modify BERT's architecture to allow training on multiple sentences instead of the 2 sentence default used by BERT to perform a entailment task. Then, the author did fine-tuning with summarization layer using either a simple classifier which consists of a single linear layer with sigmoid output for each sentence, the inter sentence transformer which allow sentences to attend to each other, as well as an additional recurrent neural network that encodes information between sequences of sentences to produce more informed encodings before sending it to a linear layer with sigmoid output.
The author also included several methods used by previous papers, most notably the trigram-blocking, which is a simple way of reducing redundancy by skipping sentences that have a trigram overlapping existing predicted summaries. During inference time, the author calculated the scores of each of the sentences, which are the sigmoid output from the model for each sentence, and picked the top 3 sentences to include in the summary. This is a valid approach since the sigmoid output can be interpreted as the probability that the sentence is included in the summary, and the highest 3 sentences will maximize the likelihood of the sentences being included in the summary. This paper currently holds the state-of-the-art result in document summarization of the CNN/DM dataset.
\subsubsection{Compare and Contrast}
This paper shows that with enough computational power (the author used 3 GPUs) and a strong enough pretrained contexual embeddings (BERT is trained for couple days on several TPUs), any basic model can outperform existing baseline models. However, this paper provides a very strong baseline to compare against other models which uses BERT in place of their existing contextual embeddings. Several previous papers either used GloVe or Word2Vec embeddings, which can be swapped out to use BERT embeddings and improve their results. However, doing so also requires huge computational resources and GPUs which are pretty expensive and time-consuming.
\section{Future Work}
The document summarization task is far from being solved. Here are proposed future works that I'm considering working on.
\begin{enumerate}
\item Since extractive methods perform so well, we could have a 2-step method to first select the sentences using ideas proposed by \cite{al2018hierarchical}. The content selector proposed by \cite{bottom-up} can also be used in this case, but with a weaker result. Then, we can treat the selected sentences to the abstract summary as a translation task, forming coherent summaries from the selected sentences.
\item There are several paper proposing sentence enhancements and using no neural networks at all in performing summarization tasks. They mostly rely on Entity extraction and co-reference resolution to replace pronouns and references with its original entity. This can be used to augment the extractive methods.
\item Since BERT is so popular, we can also modify BERT and put several networks we have summarized earlier on top of BERT and observe results, but that would be less interesting since most of the work would be engineering efforts and waiting for training.
\end{enumerate}
\newpage
\bibliography{lit}
\bibliographystyle{acl_natbib}
\end{document}
| {
"alphanum_fraction": 0.8169999602,
"avg_line_length": 174.6736111111,
"ext": "tex",
"hexsha": "cfa97e30ec61ed869690efe0363d18670d540009",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6049ccca3a2c33a77d9a6d5f44b2755301e18891",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "LithiumH/cs224u-final-project",
"max_forks_repo_path": "lit_review/lit.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6049ccca3a2c33a77d9a6d5f44b2755301e18891",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "LithiumH/cs224u-final-project",
"max_issues_repo_path": "lit_review/lit.tex",
"max_line_length": 1905,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6049ccca3a2c33a77d9a6d5f44b2755301e18891",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "LithiumH/cs224u-final-project",
"max_stars_repo_path": "lit_review/lit.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4956,
"size": 25153
} |
% !TeX root = ../solution.tex
\hypertarget{he22.12}{%
\chapter{[HE22.12] Copy Protection Pioneers}\label{he22.12}}
\begin{marginfigure}
\includegraphics[width=49mm]{level4/challenge12.jpg}
\end{marginfigure}
\subsection{Intro}
IThe copy protection pioneers were really creative and lived the jet set life.
\url{http://46.101.107.117:2209}
Note: The service is restarted every hour at x:00.
\section{Solution}\label{hv22.12solution}
The web site is asking for the copy protection code of the old ``Jet Set
Willy'' game. A search leads to the
\url{https://github.com/aycock/jsw/blob/master/jswdecode.py} that prints the
codes for the grid location and gives us the flag.
\includegraphics[width=100mm]{level4/solution12.png}
The flag \verb+he2022{J3t-53t-W1llY-f0r3v3R}+.
| {
"alphanum_fraction": 0.7554140127,
"avg_line_length": 25.3225806452,
"ext": "tex",
"hexsha": "94f96eaa3025a8c626c46ed97fb85d453548061b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "dfac11abb3051af657ed3384c3c389c14a40c10e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tbrup/ctf-writeups",
"max_forks_repo_path": "HackyEaster/he2022/level4/ch12.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dfac11abb3051af657ed3384c3c389c14a40c10e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tbrup/ctf-writeups",
"max_issues_repo_path": "HackyEaster/he2022/level4/ch12.tex",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "dfac11abb3051af657ed3384c3c389c14a40c10e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tbrup/ctf-writeups",
"max_stars_repo_path": "HackyEaster/he2022/level4/ch12.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 240,
"size": 785
} |
%
% File naacl2019.tex
%
%% Based on the style files for ACL 2018 and NAACL 2018, which were
%% Based on the style files for ACL-2015, with some improvements
%% taken from the NAACL-2016 style
%% Based on the style files for ACL-2014, which were, in turn,
%% based on ACL-2013, ACL-2012, ACL-2011, ACL-2010, ACL-IJCNLP-2009,
%% EACL-2009, IJCNLP-2008...
%% Based on the style files for EACL 2006 by
%%[email protected] or [email protected]
%% and that of ACL 08 by Joakim Nivre and Noah Smith
\documentclass[11pt,a4paper]{article}
\usepackage[hyperref]{naaclhlt2019}
\usepackage{times}
\usepackage{latexsym}
\usepackage{url}
%\aclfinalcopy % Uncomment this line for the final submission
%\def\aclpaperid{***} % Enter the acl Paper ID here
%\setlength\titlebox{5cm}
% You can expand the titlebox if you need extra space
% to show all the authors. Please do not make the titlebox
% smaller than 5cm (the original size); we will check this
% in the camera-ready version and ask you to change it back.
\newcommand\BibTeX{B{\sc ib}\TeX}
\title{Perturbation learning for general-purpose text validation}
\author{Vadim Liventsev \\
Center for Data-Intensive Science and Engineering \\
Skolkovo Institute of Science and Technology\\
3 Nobelya st., Moscow 121205 \\
{\tt [email protected]} \\\And
Mariya Sheyanova \\
School of Linguistics \\
Higher School of Economics \\
21/4 Staraya Basmannaya Ulitsa, Moscow 105066 \\
{\tt [email protected]} \\}
\date{}
\begin{document}
\maketitle
\begin{abstract}
Language learners and generative models alike are often in need of text validation: checking how natural a certain sentence sounds within a given language or style.
In this paper, we propose an approach to training a statistical validation model on a text corpus with no supervision.
This is achieved by applying random perturbations to sentences from the corpus and training a recurrent neural network to discriminate between the original sentences and the perturbed ones.
Choosing the right perturbation model, however, is far from trivial: the resulting validation model has to generalize beyond the specific perturbation we introduced and be able to recognize previously unseen kinds of deviations from the norm it learned from the corpus.
We develop several perturbation models, demonstrate and compare their generalization ability.
\end{abstract}
\section{Background}
\label{sec:background}
Text validation is the problem of discriminating between text that belongs to a certaing domain (a language or a subdomain of a language, such as a certain author's style) from text that contains errors.
Common applications of text validation include software that suggests improvements and error corrections for user-written text\footnote{for instance, Grammarly, \url{https://www.grammarly.com}} and as a quality control mechanism for generative models \cite{eval-genmodels}.
One way to develop a text validator is to manually implement a rule-based checker: an algorithm for text validation based on expert knowledge of the language at hand.
Early models, like \citet{easyenglish} and \citet{english-checker} for English, \citet{swedish-checker} for Swedish) worked like this: TODO.
Modern solutions can make use of extensive research that has been done into \emph{constrained-based grammars}: there are frameworks like HPSG, many resource grammars like \cite{russian-grammar} for Russian and initiatives like DELPH-IN \footnote{Deep Linguistic Processing with HPSG Initiative, \url{http://www.delph-in.net/}}.
\citet{grammar-for-stylistics} shows how formal grammar can be used for stylistic validation.
The obvious downside of using rule-based grammars is that they are language-specific and for every new language a new grammar has to be developed if not from scratch than with significant adaptation effort.
% Language modelling
% Using mistake data
% But here's why you need perturbation learning
\section{Methodology}
\label{sec:methodology}
We hypothesise that there exists a mechanism of applying random perturbations to sentences such that a discriminator trained to detect sentences that have been perturbed from intact ones can be used to detect mistakes more generally.
To that end, we introduce several \emph{perturbation models}.
For each of them, we train a binary classifier (\emph{validation model}), test its performance on a holdout validation dataset and then on the datasets used to train other \emph{validation models}.
Our hypothesis can be considered confirmed if a \emph{validation model} trained with \emph{perturbation model} correctly detect sentences modified with other \emph{perturbation models}.
\subsection{Perturbation models}
\begin{figure}
\end{figure}
\subsubsection{Word-order perturbations}
The first model we employ is \emph{random word flip}: a randomly selected word in the sentence is moved to a randomly selected location in the sentence.
All words and locations have equal probability to be selected.
\emph{Shuffle} perturbation means reordering the entire sentence according to a random permutation.
Note that neither of the models guarantees that the pertubed setence will be ungrammatical and, in fact, can leave the setence unchanged entirely.
\subsubsection{Word-form perturbations}
This kind of perturbation is performed using \texttt{pymorphy2} \cite{pymorphy2} and includes two types of transformations, based on morphological analysis and generation.
\begin{itemize}
\item During \emph{random lemmatization}, each token in a sentence is either lemmatized with some probability (we use 50\% probability) or left as it is.
\item \emph{Random inflection} is similar to \emph{random lemmatization}, but instead of replacing a token with its normal form, we take some other grammatical form of this word. For nouns, adjectives and personal pronouns, we randomly change case; for verbs, person is changed. Tokens with other parts of speech remain unchanged.
\end{itemize}
\subsubsection{Markov chain perturbations}
This type of perturbations differs from others in that instead of doing changes to an initially grammatical sentence, we train a generative n-gram language model to produce some ill-formed sentences. To create the language model, we used the \texttt{markovfy} \footnote{\url{https://github.com/jsvine/markovify}} implementation of Markov chain.
It is worth noting that not all of the sentences generated by markov chain are ungrammatical, but a significant part of them is, since the n-gram model cannot see further than n tokens into the past. In order to increase the number of ungrammatical sentences generated by the model we suppress any generated sentences that exactly overlap the original text by 50\% of the sentence's word count.
\subsection{Validation model}
Neural network-based approaches have the additional benefit that the validation function $f(s)$ ($s$ - sentence) is differentiable ($\frac{df}{ds}$ can be easily calculated) and thus can be used as perceptual loss \cite{perceptualloss} to train a generative neural network that outputs natural-sounding text.
\section{Experimental setup}
\label{sec:setup}
\section{Results}
\label{sec:results}
\bibliographystyle{acl_natbib}
\bibliography{../../references/refs}
\end{document}
| {
"alphanum_fraction": 0.7919923214,
"avg_line_length": 58.814516129,
"ext": "tex",
"hexsha": "7bcbacf8c451d56e820c8966f98245f3d54b0a05",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "66c8eaf8f98400b167a5e099c16cac882d547724",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vadim0x60/perturb-validate",
"max_forks_repo_path": "reports/naacl-paper/perturb.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "66c8eaf8f98400b167a5e099c16cac882d547724",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vadim0x60/perturb-validate",
"max_issues_repo_path": "reports/naacl-paper/perturb.tex",
"max_line_length": 394,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "66c8eaf8f98400b167a5e099c16cac882d547724",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vadim0x60/perturb-validate",
"max_stars_repo_path": "reports/naacl-paper/perturb.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1745,
"size": 7293
} |
\documentclass[]{article}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\usepackage[margin=1in]{geometry}
\usepackage{hyperref}
\hypersetup{unicode=true,
pdftitle={Real-Time Targeted Vector Mosquito Monitoring},
pdfauthor={Global Mosquito Alert Consortium},
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{natbib}
\bibliographystyle{apalike}
\usepackage{longtable,booktabs}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{5}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
%%% Use protect on footnotes to avoid problems with footnotes in titles
\let\rmarkdownfootnote\footnote%
\def\footnote{\protect\rmarkdownfootnote}
%%% Change title format to be more compact
\usepackage{titling}
% Create subtitle command for use in maketitle
\providecommand{\subtitle}[1]{
\posttitle{
\begin{center}\large#1\end{center}
}
}
\setlength{\droptitle}{-2em}
\title{Real-Time Targeted Vector Mosquito Monitoring}
\pretitle{\vspace{\droptitle}\centering\huge}
\posttitle{\par}
\subtitle{Best Practices Guide}
\author{Global Mosquito Alert Consortium}
\preauthor{\centering\large\emph}
\postauthor{\par}
\predate{\centering\large\emph}
\postdate{\par}
\date{2020-02-18}
\usepackage{booktabs}
\usepackage{amsthm}
\makeatletter
\def\thm@space@setup{%
\thm@preskip=8pt plus 2pt minus 4pt
\thm@postskip=\thm@preskip
}
\makeatother
\begin{document}
\maketitle
{
\setcounter{tocdepth}{2}
\tableofcontents
}
\hypertarget{section}{%
\section*{}\label{section}}
\addcontentsline{toc}{section}{}
\includegraphics{images/by-nc-sa.png}\\
The online version of this book is licensed under the \href{http://creativecommons.org/licenses/by-nc-sa/4.0/}{Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License}.
\hypertarget{intro}{%
\section{Introduction}\label{intro}}
The Global Mosquito Alert Consortium's (GMAC's) best practices guides offer experiences gained from a variety of projects that use citizen science to better understand and combat disease-vector mosquitoes. The goal is to create a growing repository of information about how best to use and customize the GMAC's citizen science toolkit for local implementation.
The present guide encompasses Pillar 1 of the GMAC toolkit: real-time targeted vector mosquito monitoring. This is a set of tools that enable citizen scientists to identify and report adult mosquitoes, and that facilitate the subsequent validation and analysis of these reports. As references, this guide draws on several existing projects: Mosquito Alert, iNaturalist, Muggen Radar, and Abuzz.
\hypertarget{objectives}{%
\section{Pillar Objectives}\label{objectives}}
The objectives of this pillar are to:
\begin{itemize}
\tightlist
\item
facilitate identification and reporting of targeted adult disease-vector mosquitoes by citizen scientists;
\item
validate and analyze this citizen science data;
\item
produce real time and reliable scientific data and models for management and risk evaluation.
\end{itemize}
In addition to these objectives, this pillar includes the cross-cutting objective of
\begin{itemize}
\tightlist
\item
engaging citizen scientists in the fight against disease vector mosquitoes by helping them to better understand the problem and solutions;
\item
obtaining feedback from citizen scientists about monitoring and control strategies.
\end{itemize}
\hypertarget{citizen-scientist-roles-and-motivations}{%
\section{Citizen Scientist Roles and Motivations}\label{citizen-scientist-roles-and-motivations}}
This pillar involves citizen scientists primarily as collectors and validators of mosquito data. Citizen scientists form a massive network of sensors with the goal of expanding the geographic coverage of monitoring programs without the costs associated with traditional surveillance methods. Citizen scientists participate in data validation by reviewing others' reports, using practical identification tools and relying on both validator proficiency scores (each citizen scientist's proficiency in the validation process) and redundancy (multiple citizen scientists reviewing each report) to reduce errors. Experts are also involved in the validation stage to improve accuracy and provide a point of comparison for generating the citizen scientists' proficiency scores. The analysis is done by experts as well, but mechanisms for better involving citizen scientists as experts will be explored in the future.
Citizen scientists participating in this Pillar are likely to have a variety of motivations depending on local conditions and socio-demographic factors. There has yet to be a systematic study of participant motivations in the existing GMAC projects but a number of conclusions can be drawn from anecdotal evidence coming out of these projects.
The citizen scientists who participate in Mosquito Alert in areas of Spain with high mosquito prevalence appear to be motivated primarily by the annoyance of mosquitoes and a desire to ``do something'' in response to being bitten. Many are also motivated by concerns about the spread of mosquito-borne diseases, although this is more of an abstract concern in Spain as autochthonous transmission of such diseases has not yet been detected. It is likely that participants in areas of Spain with low mosquito prevalence are motivated more by this latter concern -- by the desire to act as sentinels against the arrival of a potentially dangerous species. In addition, it is likely that many participate due to the more traditional citizen science motivations of curiosity and interest in science.
These motivations are likely to vary in other areas of the word and to shift depending on season, local mosquito distribution and biting patterns, changing disease risks, and experiences with the toolkit or with other citizen science projects. Understanding how this variation and shifting occurs, how different motivations may blend into one another is important both for ensuring sufficient participation levels and for correcting sampling bias. In addition, it should be stressed that the primary motivation of annoyance in high-prevalence areas makes mosquito-focused citizen science projects somewhat unique: At least in these areas, the target of the project itself provides participants with a continuous reminder to act.
\hypertarget{ethics}{%
\section{Ethics}\label{ethics}}
Ethical considerations take on heightened relevance when citizen scientists are involved in disease-vector monitoring -- and particularly when they are in the vicinity of organisms that could potentially carry dangerous diseases. These concerns fall into two categories: health risks and privacy. In addressing these, autonomy must also be used as a guiding value.
The risk of participants' health being harmed by being bitten by the mosquitoes they are reporting is the most obvious concern in this pillar. Although people in high-prevalence areas are likely exposed to mosquito biting regardless of participation, projects falling within this pillar must be fully transparent about the risks and must be careful not to encourage increased risk-taking. Indeed, projects should ideally help participants reduce their risks, not only by harnessing their reports to provide better control, but also by teaching them how best to avoid bites and remove mosquito breeding sites.
Privacy concerns also take on new dimensions when disease vectors are involved. Participants' locations, while inherent in reporting and important for correcting sampling bias, can reveal a wide variety of information that individuals may prefer to keep private -- including disease risk. It is critical that people be fully informed about the location information they are revealing when they consent to participation and that they have control over how much information they share. The Mosquito Alert project, for example, relies on background location tracking to correct sampling bias, but it explains this to participants when they register and it allows them to turn the feature off or on at any time. It is also important to limit the amount of information collected: The Mosquito Alert app, for instance, collects only 5 background locations per day from each participant who has not opted out, and it obscures these locations by placing them in predefined cells of approximately 10 sq. kilometers and sharing only the cell identifiers with the central server. The iNaturalist app collects no background tracking information and it gives participants the option of obscuring report locations on the public webmap.
\hypertarget{data-collection}{%
\section{Data Collection}\label{data-collection}}
Data collection for this pillar involves, at a minimum, citizen scientists reporting adult mosquitoes as they observe them. This may be as simple as providing information about the date, time, and location of observations. Of greater use, however, are reports that (1) are limited to targeted species, (2) include the observer's taxonomic identification, (3) include the observer's description, through a brief questionnaire, of key features necessary for identification, and (4) are accompanied by a photograph of the mosquito. The last two pieces of information are particularly important, as they allow for subsequent data validation by other citizen scientists or experts. At the same time, not every report needs to include all this information to have value: As long as complete information is included in some reports, they can be used to estimate individual participants' proficiency, which can then serve as a basis for assessing the reliability of their other reports.
The most efficient mechanism for transmitting reports is currently through smart phones or other mobile devices. This is the approach taken by all of the existing projects in this pillar and it allows for near real-time collecting and analysis of data. The applications used for transmitting reports also serve as tools to help participants identify mosquitoes and take protective measures, to validate others' reports, and to view global project information and otherwise interact with project managers. At the same time, other reporting mechanisms, including SMS or even simply paper and regular mail, are clearly possible and might be explored for use in areas with low smartphone penetration.
\hypertarget{data-processing-and-validation}{%
\section{Data Processing and Validation}\label{data-processing-and-validation}}
Data collected by participants must be processed and validated before it can be usefully analyzed. Processing entails, at a minimum, combining reports into a centralized database; ideally it should also include some mechanism for validation and error-correction. Mosquito Alert relies on a central server with a set of Django/Python-based web applications that handle this process as well as providing front-end portals for interface by various types of participants (general public, expert validators, etc.).
\hypertarget{participant-error-checking-and-revision}{%
\subsection{Participant Error-Checking and Revision}\label{participant-error-checking-and-revision}}
One component related to error-checking is ensuring that participants have the ability to change or even delete reports after sending them. Participants may at times accidentally mark the wrong location or enter some other erroneous information and the system should enable them to make changes. Mosquito Alert does this by allowing multiple versions of each report; all are stored for future reference but only the most recent is used in analysis and dissemination.
\hypertarget{sampling-effort-collection}{%
\subsection{Sampling Effort Collection}\label{sampling-effort-collection}}
Another component is the collection and analysis of information about participants' sampling effort. This information can be used to correct biases resulting from the uneven distribution of sampling activity across space and time. It is important to be able to determine, for example, whether no reports have been received from a particular town because there are no target species there or because there are no participants looking there; conversely, one must determine if a town that has lots of reports has elevated mosquito presence or simply many participants.
In Mosquito Alert, this is done by collecting a small amount of location information from participants. Unless they opt out of this feature, the Mosquito Alert application uses the network and satellite location services on participants' mobile devices to detect their locations 5 times per day. The times are randomly selected independently by each device each day during the hours when targeted species are most likely to be biting. As noted above in the ethics section, the device does not share the detected location itself with the central server, but instead shares only the identifier of the pre-defined sampling cell into which it falls. For computational efficiency (to reduce battery drain on the device), the sampling cell grid is defined simply by evenly spaced latitude and longitude divisions (initially 0.05 degrees each; currently 0.025 degrees each).
One lesson learned by the Mosquito Alert project is that participant location provides only part of the picture in terms of sampling effort. If it also important to know when participants are actually in a position to observe and report targeted mosquitoes. Our experience is that most people install the application on their device and use it briefly but stop interacting with it after a relatively short period of time. There is also large variation among participants in the amount of time they spend using the application. We therefore model what we call ``reporting propensity'' as a function of time elapsed since installation of the application and intrinsic motivation. We then adjust sampling effort based on the reporting propensity of each participant. The process is complicated by the fact that we do not link background tracking information with reporting information (for privacy reasons). The results, however, have proven to be effective.
\hypertarget{report-validation}{%
\subsection{Report Validation}\label{report-validation}}
The validation stage is important as a way to directly check that the reported mosquitoes are targeted species and to provide a basis for assessing participants' proficiency. The latter outcome improves the possibilities for making accurate inferences about the reliability of reports from these participants that are not validated: Many participants will send some reports that include a photograph that can be used for validation and others that do not (either no photograph or none in which the specimen can be clearly seen). The expert validation of the first category of report facilitates inferences about the second.
There are a number of different ways of carrying out validation, and the choice among them will depend on local circumstances as well as evolving research on what works best. The primary approach of Mosquito Alert is to rely on a team of 9 entomologists who review reports from citizen scientists through a special expert validation portal. Each report is reviewed independently by 3 of these entomologists. In addition to selecting a category indicating their level of confidence in the report being of a targeted species, the entomologists are also able to write internal notes and notes to the citizen scientist. They are also able to flag the report for review by the entomology team's leader, who can override any final decision.
Another approach to validation is to rely on other citizen scientists to review photographs. Mosquito Alert also uses this approach, sending each photograph to 30 different citizen scientists using the Crowd Crafting platform (which is accessed through the application directly on through a web browser).
Another project that has had success with this type of crowd-based validation approach is iNaturalist. That platform also allows much more interaction between citizen-science validators and the person making the original report (known as an observation) in a threaded conversation at the record level. Validated records arise from agreement between `identifiers' leading the record to gain a data quality classification. It also provides an effective mechanism for cultivating citizen scientists to develop expertise in identifying certain specifies and thus improve their validation proficiency.
\hypertarget{data-presentation-and-use}{%
\section{Data Presentation and Use}\label{data-presentation-and-use}}
\hypertarget{end-users-and-requirements}{%
\subsection{End-users and requirements}\label{end-users-and-requirements}}
The pillar and the data generated can be of interest to 4 main types of end-users: the general public, public health managers, educators, and academics. Each targeted audience requires a different mode of data presentation and use.
\hypertarget{data-presentation-to-public-and-specific-end-users}{%
\subsection{Data presentation to public and specific end users}\label{data-presentation-to-public-and-specific-end-users}}
\hypertarget{general-public}{%
\subsubsection{General Public:}\label{general-public}}
\begin{itemize}
\tightlist
\item
General informative contents on main web and social networks, and also through app notifications (entomology, public health, distribution, prevention measures, etc.). Content should be distributed in a way that maximizes potential reuse while also protecting participants' privacy and respecting their preferences. Where possible, this should be done by placing it in the public domain with the Creative Commons ``no rights reserved'' mark (CC0). Where attribution is required, the Creative Commons Attribution License (CC BY) is recommended. This can be combined with privacy protections by listing the author as an ``anonymous'' citizen scientist, as is done for participant photos shared in Mosquito Alert. Another option is to give participants the choice of multiple licenses and let them choose how to list themselves, as is done in iNaturalist.
\item
Annual reports (freely downloadable from web in pdf): Summary of the project current results and achievements within the year, encompassing science, mosquito management, education and communication.
\item
Interactive Map: A public map with all the reports and validations, with filters (temporal and spatial) and from which anyone can directly download data in CSV, KML, and other formats. This can be done directly on project websites and/or by publishing the data on GBIG.org, which provides a web map, download options, dataset metrics, a download-DOI, and statistics on downloads and citations. As explained in the first bullet-point above, content should be distributed in a way that maximizes potential reuse while also protecting participants' privacy and respecting their preferences.
\item
Direct access to data download may also be provided to the public separately from the map (for example, as is done in iNaturalist and Mosquito Alert). As explained in the first bullet-point above, content should be distributed in a way that maximizes potential reuse while also protecting participants' privacy and respecting their preferences.
\end{itemize}
\hypertarget{public-health-managers}{%
\subsubsection{Public Health Managers:}\label{public-health-managers}}
\begin{itemize}
\tightlist
\item
``Enrollment Kit'' for Managers: An open and downloadable document with all the necessary information for stakeholders surveilling and controlling mosquito populations to use the mosquito alert platform on their own benefit. From the most simple type of information exploitation to more complex and committed ones.
\item
Private Portal: A digital platform with a private account where managers can see real-time information by citizens (without even being expert-validated). Data classification is more exhaustive than in the public map. The interface includes a set of temporal and spatial filters to manipulate data, a system to write notes to single or groups of participants, and the possibility to incorporate private geo-located management information (water drain cartography, risk areas, epidemiologically relevant information, etc).
\item
Interactive Map: A public map with all the reports and validations, with filters (temporal and spatial) and from which anyone can directly download data in CSV, KML and other formats. As explained above in the first bullet point under General Public, content should be distributed in a way that maximizes potential reuse while also protecting participants' privacy and respecting their preferences.
\end{itemize}
\hypertarget{educators}{%
\subsubsection{Educators:}\label{educators}}
\begin{itemize}
\tightlist
\item
Educational contents in the form of Webquests or other digital formats, for teachers to use in the classroom.
\item
Leaflets, power points, and other graphically designed materials for teachers to use in the classrooms
\item
Interactive Map: A public map with all the reports and validations, with filters (temporal and spatial) and from which anyone can directly download data in CSV, KML and other formats. As explained above in the first bullet point under General Public, content should be distributed in a way that maximizes potential reuse while also protecting participants' privacy and respecting their preferences.
\end{itemize}
\hypertarget{academics}{%
\subsubsection{Academics:}\label{academics}}
\begin{itemize}
\tightlist
\item
Interactive Map: A public map with all the reports and validations, with filters (temporal and spatial) and from which anyone can directly download data in CSV, KML and other formats. As explained above in the first bullet point under General Public, content should be distributed in a way that maximizes potential reuse while also protecting participants' privacy and respecting their preferences.
\item
Daily data transfers to open access repository like zenodo.
\item
Make code freely available in a public repository like GitHub under an open source license like the GNU General Public License (GPL) or the MIT license.
\item
Data sharing through GBIF. (To get started with this, contact your national GBIF node or, in the absence of one, register as a GBIF publisher and find a partner.)
\end{itemize}
\hypertarget{data-use-for-vector-management-and-research}{%
\subsection{Data use for vector management and research}\label{data-use-for-vector-management-and-research}}
There are a variety of options for facilitating data use by vector managers and researchers apart from the mechanisms for presentation and distribution described above. For example, iNaturalist allows anyone to create a project to aggregate any data at any level (for example, some set of vector species at a selected geographic location). Mosquito Alert cooperates directly with vector managers and other public stakeholders through various mechanisms. We draw on these in proposing the following models of cooperation.
\emph{Cooperation type 1}: \emph{I want to use Global Mosquito Alert's data for monitoring and control purposes.}
We offer the following options:
\begin{itemize}
\tightlist
\item
View data: view sightings of targeted mosquitoes and their breeding sites in your territory, filtering by sighting type and date (months and years).
\item
Share selected data: have you found one or more sightings of interest in your territory? A breeding site you were previously unaware of, for example? Share the information with those involved in monitoring and control activities in your territory (municipal personnel, pest control companies, town or city councils, etc.).
\item
Export data in a report: export sightings from a map view, with all their details (photo, coordinates, etc.), in the form of a report. Share it with those involved in monitoring and control activities in your territory (municipal personnel, pest control companies, town or city councils, etc.).
\item
Communicate with citizens via the app: tell us about your territory's monitoring and control activities. We can send app users in your town or city messages of your choosing via the notification system.
\item
Generate your own maps: create a hashtag (e.g.~\#LocalCouncil) for your territory's citizens and municipal personnel to include in the ``Notes'' section when they report sightings. It is possible to use a filter to export all the data corresponding to a hashtag from the public online map in the form of a list.
\end{itemize}
\emph{Cooperation type 2}: \emph{I want to carry out informative, educational or prevention activities or campaigns to combat targeted mosquitoes, (a) using Global Mosquito Alert's free resources or (b) arranging a face-to-face activity (education, rise awareness action).}
We offer you the tools and resources listed below, which will be available on Global Mosquito Alert's website, free of charge, for your own informative, educational or prevention activities or campaigns to combat tiger mosquitoes:
\begin{itemize}
\tightlist
\item
Informative leaflet in different languages, to distribute online or print*. You can add your institution's logo using the version in .ppt format. If you need to adapt the leaflet further, please contact us.
\item
Global Mosquito Alert poster in different languages, to distribute online or print.
\item
Images and drawings of targeted mosquitoes and their breeding sites (see distribution licence in each case). Global Mosquito Alert images and logos.
\item
Extensive informative content, all up to date and revised by experts in scientific communication, covering entomology, public health, distribution, prevention measures, etc. Unless otherwise indicated, these may be reused under a Creative Commons licence (CC BY).
\item
Global Mosquito Alert's communication channels are available to you to give your campaign a boost online. Contact us to discuss ways of doing so.
\item
Web blog for brief communiqués or news items.
\item
Social media accounts (Twitter, Facebook, Weibo, WeChat).
\item
App notifications sent directly to the public participants.
\item
YouTube channel.
\end{itemize}
\emph{Cooperation type 3}: \emph{I want to establish a different type of cooperation or formalize cooperation through an agreement or protocol.}
Formalizing cooperation entails many benefits for both parties. It is a way of establishing a joint plan, specifying each party's rights and obligations, and pursuing optimal results in relation to common and individual goals alike, so that cooperation pays greater dividends. It is also a way for an institution to become an official partner of Global Mosquito Alert and have access to the private managers portal and the app notification system from which managers can freely communicate with citizens their surveillance and control actions.
\hypertarget{data-licensing}{%
\subsection{Data Licensing}\label{data-licensing}}
Data should be released with an open access license or placed in the public domain (CC0). The latter option is best for maximizing potential reuse, but there may be situations in which CC-BY (open access with attribution required) is preferable, Restrictions beyond CC-BY are best avoided, although there may be value in giving individual citizen scientists a choice of multiple license options for their individual reports (as is done, for example, by iNaturalist).
\hypertarget{data-structures-and-repository-links}{%
\section{Data Structures and Repository Links}\label{data-structures-and-repository-links}}
The Mosquito Alert data is stored in a SQL database (PostgreSQL/PostGIS) that is managed by the Django/Python server layer. It is exported daily to Zenodo at \url{http://doi.org/10.5281/zenodo.597466}. The project has also set up a mechanism to share the data regularly with GBIF (see \url{https://www.gbif.org/es/dataset/1fef1ead-3d02-495e-8ff1-6aeb01123408}). It is also exploring the possibility of linking the application to iNaturalist so that participants' observations can be viewed in real time by the iNaturalist community.
The primary unit of observation for the Mosquito Alert data is a report-version. Each time a participant creates a new report or edits or deletes an existing report, a new report-version record is created. Different versions of the same report are linked by a unique report UUID automatically assigned to each report when it is created.
Each report-version is linked to the set of photographs that the participant included with the report, and to the participant's answers to the three taxonomic questions designed as a first check of validity. One important point here is that the application sends both the answer to the question and the question, both written as posed to the participant in whatever language the participant was using. Both pieces of information (question and answer in original language) are stored in the database. This makes it possible to deal with updates over time in the language used in the applications and to identify possibly errors or sources of confusion that might be affecting data quality.
The most recent version of each report is sent to the expert validation system (and if new versions come in after validation, they are sent again). The validation results are then linked to the report-version (each report version also has its own unique UUID).
Reports are also linked to participants through a unique reporting UUID assigned by the application to each participant when they register. Data on participants includes registration time, which is used in the sampling effort model.
The background tracking data is stored in its own table, which contains a unique user UUID for background tracking purposes (different from the reporting UUID).
\hypertarget{existing-tools}{%
\section{Existing Tools}\label{existing-tools}}
The Mosquito Alert system can be used by anyone in the world and it is possible for specific projects to make use of this directly, tracking their own data through the use of hashtags entered in the participant note section or through other mechanisms. All of the Mosquito Alert code for mobile device applications as well as server-side processes is free and open source (GPLv3), available at \url{https://github.com/MoveLab}.
\hypertarget{case-studies}{%
\section{Case Studies}\label{case-studies}}
Existing projects most relevant to this pillar include:
\begin{itemize}
\item
Mosquito Alert (\url{http://mosquitoalert.com})
\item
iNaturalist (\url{http://www.inaturalist.org/}) with example projects including (\url{https://www.inaturalist.org/projects/mosquitoes-in-hawaii}).
\end{itemize}
\bibliography{book.bib}
\end{document}
| {
"alphanum_fraction": 0.80946516,
"avg_line_length": 89.4683195592,
"ext": "tex",
"hexsha": "cea621c3e1d02c00a288992c09a310b3ca644701",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9146a90277b737a8e0a516aff44212ac95c2b5c9",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "GlobalMosquitoAlert/guides-pillar1",
"max_forks_repo_path": "docs/gmac_guides_pillar1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9146a90277b737a8e0a516aff44212ac95c2b5c9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "GlobalMosquitoAlert/guides-pillar1",
"max_issues_repo_path": "docs/gmac_guides_pillar1.tex",
"max_line_length": 1221,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9146a90277b737a8e0a516aff44212ac95c2b5c9",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "GlobalMosquitoAlert/guides-pillar1",
"max_stars_repo_path": "docs/gmac_guides_pillar1.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6737,
"size": 32477
} |
\documentclass{article}
\usepackage[LGR, OT1, T1]{fontenc}
\usepackage[ngerman, english, USenglish]{babel}
\usepackage{blindtext}
\usepackage{xcolor}
\usepackage{amsmath, amsfonts, amssymb}
%\usepackage[notext, nott, slantedGreeks]{kpfonts} % The "partialup" option conflicts with the mathdesign package
%\batchmode % \widering is defined by kpfonts and then again by mathdesign
%\let\widering\relax
%\usepackage[charter, greeklowercase=italicized, greekuppercase=italicized]{mathdesign}
\usepackage[sloped]{fourier}
% When using the fourier package, make all uppercase Greek letters italic ==>
%\makeatletter
% \@for\@tempa:=%
% %alpha,beta,gamma,delta,epsilon,zeta,eta,theta,iota,kappa,lambda,mu,nu,xi,%
% %pi,rho,sigma,varsigma,tau,upsilon,phi,chi,psi,omega,digamma,%
% Gamma,Delta,Theta,Lambda,Xi,Pi,Sigma,Upsilon,Phi,Psi,Omega%
% \do{\expandafter\let\csname\@tempa\expandafter\endcsname\csname other\@tempa\endcsname}%
%\makeatother
\let \Gamma \otherGamma
\let \Delta \otherDelta
\let \Theta \otherTheta
\let \Lambda \otherLambda
\let \Xi \otherXi
\let \Pi \otherPi
\let \Sigma \otherSigma
\let \Upsilon \otherUpsilon
\let \Phi \otherPhi
\let \Psi \otherPsi
\let \Omega \otherOmega
% <==
\usepackage[p, sups, scaled=0.979, space=1.045]{erewhon} % Utopia with proportional lining figures as text font; scaled and spaced to math the Utiopa from the fourier package.
\let \alphaup \otheralpha
\newcommand{\mathbfit}[1]{\mathbf{\mathit{#1}}}
\usepackage{mathrsfs} % Provides \mathscr
\makeatletter
\let\savermmdseries\seriesdefault
\let\savermbfseries\bfdefault
\let\savermfamily\rmdefault
\makeatother
%\errorstopmode
\usepackage[book, semibold, lining, tabular]{FiraSans}
\newcommand{\savesffamily}{\sfdefault}
\makeatletter
\newcommand{\savesfmdseries}{\mdseries@sf}
\newcommand{\savesfbfseries}{\bfseries@sf}
\makeatother
% Take letters from Fira Sans in sans mathversion -->
%\DeclareMathVersion{sans}
%\DeclareSymbolFont{lettersfira} {T1}{\savesffamily}{\savesfmdseries}{n}
%\DeclareSymbolFont{letterssfit} {T1}{\savesffamily}{\savesfmdseries}{it}
%\DeclareSymbolFont{lettersboldsf} {T1}{\savesffamily}{\savesfbfseries}{n}
%\DeclareSymbolFont{lettersboldsfit}{T1}{\savesffamily}{\savesfbfseries}{n}
%\SetSymbolFont{operators}{sf}{T1}{\savesffamily}{\savesfmdseries}{n}
%\SetSymbolFont{letters}{sf}{T1}{\savesffamily}{\savesfmdseries}{it}%
%\SetSymbolFont{largesymbols}{sans}{OMX}{mdbch}{m}{n}%
%\SetSymbolFont{symbols} {sansbold}{OMS}{mdbch}{b}{n}%
%\SetSymbolFont{largesymbols}{sansbold}{OMX}{mdbch}{b}{n}%
%\usepackage{lmodern}
\DeclareMathVersion{sf}
%\SetSymbolFont{operators}{sf}{OT1}{cmbr}{m}{n}
%\SetSymbolFont{letters}{sf}{OT1}{\savesffamily}{\savesfmdseries}{it}
\SetSymbolFont{symbols}{sf}{OMS}{cmbrs}{m}{n}
%\SetMathAlphabet{\mathit}{sf}{OT1}{cmbr}{m}{sl}
%\SetMathAlphabet{\mathbf}{sf}{OT1}{cmbr}{bx}{n}
%\SetMathAlphabet{\mathtt}{sf}{OT1}{cmtl}{m}{n}
%\SetSymbolFont{largesymbols}{sf}{OMX}{iwona}{m}{n}
%\SetMathAlphabet{\mathsf}{bold}{OT1}{jkpss}{b}{n}
%\SetMathAlphabet{\mathsf}{sf}{OT1}{jkpss}{m}{n}
%\SetMathAlphabet{\mathsf}{boldrm}{OT1}{jkpss}{b}{n}
%\SetMathAlphabet{\mathsf}{sf}{OT1}{jkpss}{m}{n}
%\SetMathAlphabet{\mathsf}{boldsf}{OT1}{jkpss}{b}{n}
%\SetSymbolFont{operators}{bold}{OT1}{jkpss}{b}{n}
\DeclareMathVersion{boldsf}
\newif\IfInSansMode
\newif\IfInBoldMode
%% Loading all these packages jointly eats up math alphabets in the "normal" mathversion.
%% Avoid the "Too many math alphabets used in verison normal" error by
%% relegating \mathcal and \mathfrak to a separate version. ==>
%%\MathastextDeclareVersion[n]{calligraphic}{OMS}{xmdcmsy}{m}{n}
%\let \mathcalaux \mathcal
%\let \mathfrakaux \mathfrak
%\renewcommand{\mathcal}[1]{%
% \textnormal{\IfInSansMode\mathversion{normal}$\mathcalaux{#1}$\mathastextversion{sans}%
% \else$\mathcalaux{#1}$%
% \fi\relax}%
%}
%\renewcommand{\mathfrak}[1]{%
% \textnormal{\IfInSansMode\mathversion{normal}$\mathfrakaux{#1}$\mathastextversion{sans}%
% \else$\mathfrakaux{#1}$%
% \fi\relax}%
%}
%% <==
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Fill in ``missing'' Greek glyphs for completeness (not really necessary,
% since they look identical to Latin glyphs and are thus almost never used) ==>
\newcommand{\omicron}{o}
\newcommand{\Digamma}{F}
\newcommand{\Alpha} {A}
\newcommand{\Beta} {B}
\newcommand{\Epsilon}{E}
\newcommand{\Zeta} {Z}
\newcommand{\Eta} {H}
\newcommand{\Iota} {I}
\newcommand{\Kappa} {K}
\newcommand{\Mu} {M}
\newcommand{\Nu} {N}
\newcommand{\Omicron}{O}
\newcommand{\Rho} {P}
\newcommand{\Tau} {T}
\newcommand{\Chi} {X}
% <==
\newcommand{\renewgreekletters}{%
\renewcommand{\Alpha} {\textit{\fontencoding{LGR}\selectfont A}}
\renewcommand{\Beta} {\textit{\fontencoding{LGR}\selectfont B}}
\renewcommand{\Gamma} {\textit{\fontencoding{LGR}\selectfont G}}
\renewcommand{\Delta} {\textit{\fontencoding{LGR}\selectfont D}}
\renewcommand{\Epsilon} {\textit{\fontencoding{LGR}\selectfont E}}
\renewcommand{\Zeta} {\textit{\fontencoding{LGR}\selectfont Z}}
\renewcommand{\Eta} {\textit{\fontencoding{LGR}\selectfont H}}
\renewcommand{\Theta} {\textit{\fontencoding{LGR}\selectfont J}}
\renewcommand{\Iota} {\textit{\fontencoding{LGR}\selectfont I}}
\renewcommand{\Kappa} {\textit{\fontencoding{LGR}\selectfont K}}
\renewcommand{\Lambda} {\textit{\fontencoding{LGR}\selectfont L}}
\renewcommand{\Mu} {\textit{\fontencoding{LGR}\selectfont M}}
\renewcommand{\Nu} {\textit{\fontencoding{LGR}\selectfont N}}
\renewcommand{\Xi} {\textit{\fontencoding{LGR}\selectfont X}}
\renewcommand{\Omicron} {\textit{\fontencoding{LGR}\selectfont O}}
\renewcommand{\Pi} {\textit{\fontencoding{LGR}\selectfont P}}
\renewcommand{\Rho} {\textit{\fontencoding{LGR}\selectfont R}}
\renewcommand{\Sigma} {\textit{\fontencoding{LGR}\selectfont S}}
\renewcommand{\Tau} {\textit{\fontencoding{LGR}\selectfont T}}
\renewcommand{\Upsilon} {\textit{\fontencoding{LGR}\selectfont U}}
\renewcommand{\Phi} {\textit{\fontencoding{LGR}\selectfont F}}
\renewcommand{\Chi} {\textit{\fontencoding{LGR}\selectfont Q}}
\renewcommand{\Psi} {\textit{\fontencoding{LGR}\selectfont Y}}
\renewcommand{\Omega} {\textit{\fontencoding{LGR}\selectfont W}}
\renewcommand{\Digamma} {\textit{\fontencoding{LGR}\selectfont \char195}}
%%% 'lowercase'
\renewcommand{\alpha} {\textit{\fontencoding{LGR}\selectfont a}}
\renewcommand{\beta} {\textit{\fontencoding{LGR}\selectfont b}}
\renewcommand{\gamma} {\textit{\fontencoding{LGR}\selectfont g}}
\renewcommand{\delta} {\textit{\fontencoding{LGR}\selectfont d}}
\renewcommand{\epsilon} {\textit{\fontencoding{LGR}\selectfont e}}
\renewcommand{\zeta} {\textit{\fontencoding{LGR}\selectfont z}}
\renewcommand{\eta} {\textit{\fontencoding{LGR}\selectfont h}}
\renewcommand{\theta} {\textit{\fontencoding{LGR}\selectfont j}}
\renewcommand{\iota} {\textit{\fontencoding{LGR}\selectfont i}}
\renewcommand{\kappa} {\textit{\fontencoding{LGR}\selectfont k}}
\renewcommand{\lambda} {\textit{\fontencoding{LGR}\selectfont l}}
\renewcommand{\mu} {\textit{\fontencoding{LGR}\selectfont m}}
\renewcommand{\nu} {\textit{\fontencoding{LGR}\selectfont n}}
\renewcommand{\xi} {\textit{\fontencoding{LGR}\selectfont x}}
\renewcommand{\omicron} {\textit{\fontencoding{LGR}\selectfont o}}
\renewcommand{\pi} {\textit{\fontencoding{LGR}\selectfont p}}
\renewcommand{\rho} {\textit{\fontencoding{LGR}\selectfont r}}
\renewcommand{\sigma} {\textit{\fontencoding{LGR}\selectfont s}}
\renewcommand{\varsigma}{\textit{\fontencoding{LGR}\selectfont c}}
\renewcommand{\tau} {\textit{\fontencoding{LGR}\selectfont t}}
\renewcommand{\upsilon} {\textit{\fontencoding{LGR}\selectfont u}}
\renewcommand{\phi} {\textit{\fontencoding{LGR}\selectfont f}}
\renewcommand{\chi} {\textit{\fontencoding{LGR}\selectfont q}}
\renewcommand{\psi} {\textit{\fontencoding{LGR}\selectfont y}}
\renewcommand{\omega} {\textit{\fontencoding{LGR}\selectfont w}}
\renewcommand{\digamma} {\textit{\fontencoding{LGR}\selectfont \char147}}
}
\renewcommand{\renewgreekletters}{}
\frenchspacing
% An auxiliary command to display the current font settings -->
\makeatletter
\newcommand{\showfont}{{%
\color{magenta}
\textit{Encoding:} \f@encoding{},
\textit{family:} \f@family{},
\textit{series:} \f@series{},
\textit{shape:} \f@shape{},
\textit{size:} \f@size{}.
}}
\makeatother
% <--
\makeatletter
\newcommand*{\checkgreekletters}{%
\@for\@tempa:=%
alpha,beta,gamma,delta,epsilon,zeta,eta,theta,iota,kappa,lambda,mu,nu,xi,%
omicron,pi,rho,sigma,varsigma,tau,upsilon,phi,chi,psi,omega,digamma,%
Alpha,Beta,Gamma,Delta,Epsilon,Zeta,Eta,Theta,Iota,Kappa,Lambda,Mu,Nu,Xi,%
Omicron,Pi,Rho,Sigma,Tau,Upsilon,Phi,Chi,Psi,Omega,Digamma%
\do{$\csname\@tempa\endcsname,$ }%
}
\makeatother
\input{../mathtest_preamble}
%%%%%%%%%%%%%%%%%%%%%%%
%% DOCUMENT BEGINS %%
%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\selectlanguage{english}
\section{Saved for sans math}
\noindent
\savesffamily\
\savesfmdseries\
\savesfbfseries
{\Huge $x$\textit{x}$f$\textit{f}$B$\textit{B}}
\noindent Should match the size.
\section{Serif}
\rmfamily\mdseries\mathversion{normal}%
%\blindmathpaper
\newcommand{\coloneq}{:=}
\newcommand{\mathup}[1]{\mathrm{#1}}
\textbf{Simplest form of the \textit{Central Limit Theorem}:} \textit{Let
$X_1$, $X_2,\cdots$ be a sequence of i.i.d. random variables with mean~$0$
and variance $1$ on a probability space $(\Omega,\mathcal{F},\Pr)$. Then}
\[\Pr\left(\frac{X_1+\cdots+X_n}{\sqrt{n}}\le v\right)\to\mathfrak{N}(v)\coloneq
\int_{-\infty}^v \frac{\mathrm{e}^{-t^2/2}}{\sqrt{2\mathup{\pi}}}\,
\mathrm{d}t\quad\mbox{as $n\to\infty$,}\]
\textit{or, equivalently, letting} $S_n\coloneq\sum_1^n X_k$,
\[\mathbb{E} f\left(S_n/\sqrt{n}\right)\to \int_{-\infty}^\infty f(t)
\frac{\mathrm{e}^{-t^2/2}}{\sqrt{2\mathup{\pi}}}\,\mathrm{d}t
\quad\mbox{as $n\to\infty$, for every $f\in\mathrm{b}
\mathcal{C}(\mathbb{R})$.}\]
\section{Serif Bold}
\rmfamily\bfseries\mathversion{bold}
\blindmathpaper
\textbf{Simplest form of the \textit{Central Limit Theorem}:} \textit{Let
$X_1$, $X_2,\cdots$ be a sequence of iid random variables with mean~$0$
and variance $1$ on a probability space $(\Omega,\mathcal{F},\Pr)$. Then}
\[\Pr\left(\frac{X_1+\cdots+X_n}{\sqrt{n}}\le v\right)\to\mathfrak{N}(v)\coloneq
\int_{-\infty}^v \frac{\mathrm{e}^{-t^2/2}}{\sqrt{2\mathup{\pi}}}\,
\mathrm{d}t\quad\mbox{as $n\to\infty$,}\]
\textit{or, equivalently, letting} $S_n\coloneq\sum_1^n X_k$,
\[\mathbb{E} f\left(S_n/\sqrt{n}\right)\to \int_{-\infty}^\infty f(t)
\frac{\mathrm{e}^{-t^2/2}}{\sqrt{2\mathup{\pi}}}\,\mathrm{d}t
\quad\mbox{as $n\to\infty$, for every $f\in\mathrm{b}
\mathcal{C}(\mathbb{R})$.}\]
\section{Sans Serif}
\sffamily\mdseries\mathversion{sf}%
\renewgreekletters%
\blindmathpaper
\mathversion{sf}%
\textbf{Simplest form of the \textit{Central Limit Theorem}:} \textit{Let
$X_1$, $X_2,\cdots$ be a sequence of iid random variables with mean~$0$
and variance $1$ on a probability space $(\Omega,\mathcal{F},\Pr)$. Then}
\[\Pr\left(\frac{X_1+\cdots+X_n}{\sqrt{n}}\le v\right)\to\mathfrak{N}(v)\coloneq
\int_{-\infty}^v \frac{\mathrm{e}^{-t^2/2}}{\sqrt{2\mathup{\pi}}}\,
\mathrm{d}t\quad\mbox{as $n\to\infty$,}\]
\textit{or, equivalently, letting} $S_n\coloneq\sum_1^n X_k$,
\[\mathbb{E} f\left(S_n/\sqrt{n}\right)\to \int_{-\infty}^\infty f(t)
\frac{\mathrm{e}^{-t^2/2}}{\sqrt{2\mathup{\pi}}}\,\mathrm{d}t
\quad\mbox{as $n\to\infty$, for every $f\in\mathrm{b}
\mathcal{C}(\mathbb{R})$.}\]
\blindmathpaper
\section{Sans Serif Bold}
\sffamily\bfseries\mathversion{boldsf}
\renewgreekletters%
\blindmathpaper
\textbf{Simplest form of the \textit{Central Limit Theorem}:} \textit{Let
$X_1$, $X_2,\cdots$ be a sequence of iid random variables with mean~$0$
and variance $1$ on a probability space $(\Omega,\mathcal{F},\Pr)$. Then}
\[\Pr\left(\frac{X_1+\cdots+X_n}{\sqrt{n}}\le v\right)\to\mathfrak{N}(v)\coloneq
\int_{-\infty}^v \frac{\mathrm{e}^{-t^2/2}}{\sqrt{2\mathup{\pi}}}\,
\mathrm{d}t\quad\mbox{as $n\to\infty$,}\]
\textit{or, equivalently, letting} $S_n\coloneq\sum_1^n X_k$,
\[\mathbb{E} f\left(S_n/\sqrt{n}\right)\to \int_{-\infty}^\infty f(t)
\frac{\mathrm{e}^{-t^2/2}}{\sqrt{2\mathup{\pi}}}\,\mathrm{d}t
\quad\mbox{as $n\to\infty$, for every $f\in\mathrm{b}
\mathcal{C}(\mathbb{R})$.}\]
\blindmathpaper
\input{../mathtest_serif}
%\input{../mathtest_serifbold}
%\input{../mathtest_sans}
%\input{../mathtest_sansbold}
\end{document} | {
"alphanum_fraction": 0.7003961965,
"avg_line_length": 38.358662614,
"ext": "tex",
"hexsha": "31adb5bae0e5f4d323fb014d6c267bb4e76a496a",
"lang": "TeX",
"max_forks_count": 10,
"max_forks_repo_forks_event_max_datetime": "2021-10-12T04:13:23.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-02T03:10:26.000Z",
"max_forks_repo_head_hexsha": "3ab28a23fb60cb0a97fcec883847e2d8728b98c0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lemoxiao/Awesome-Beamer-Collection",
"max_forks_repo_path": "200+ beamer 模板合集/TeXTemplates(论文,报告,beamer,学术报告)/2_Math_Tests/Obsolete/mathtest_kpfonts_no_mathastext.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3ab28a23fb60cb0a97fcec883847e2d8728b98c0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lemoxiao/Awesome-Beamer-Collection",
"max_issues_repo_path": "200+ beamer 模板合集/TeXTemplates(论文,报告,beamer,学术报告)/2_Math_Tests/Obsolete/mathtest_kpfonts_no_mathastext.tex",
"max_line_length": 175,
"max_stars_count": 13,
"max_stars_repo_head_hexsha": "3ab28a23fb60cb0a97fcec883847e2d8728b98c0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lemoxiao/Awesome-Beamer-Collection",
"max_stars_repo_path": "200+ beamer 模板合集/TeXTemplates(论文,报告,beamer,学术报告)/2_Math_Tests/Obsolete/mathtest_kpfonts_no_mathastext.tex",
"max_stars_repo_stars_event_max_datetime": "2021-12-24T09:27:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-30T04:09:54.000Z",
"num_tokens": 4689,
"size": 12620
} |
\section{\scshape Proposal}\label{sec:proposal}
\subsection{Research questions}
\begin{frame}{Research questions}
\begin{itemize}
\item How to reliably learn new reusable semantic assembly skills from human demonstrations?
\item How to automatically extract assembly information from CAD / SOP data?
\item How to efficiently coordinate complex assembly procedures between humans and robots in a shared work space?
\end{itemize}
\end{frame}
\subsection{Objectives}
\begin{frame}{Objectives}
\begin{itemize}
\item Development of a cooperative assembly system capable of:
\begin{itemize}
\item Reliably learning by human demonstration
\item Perform cooperative assembly tasks with human operators
\item Help human operators perform their tasks faster by projecting assembly information into the workspace
\begin{itemize}
\item Showing which objects the human should pick up
\item Where to place the objects with precision (no need for manual measurements)
\item The order of assembly
\end{itemize}
\end{itemize}
\end{itemize}
\end{frame}
\subsection{Case studies}
\begin{frame}{Case studies}
\begin{itemize}
\item Assembly of objects with increasing complexity, such as:
\begin{itemize}
\item Gearboxes
\item Alternators
\item Engines
\end{itemize}
\end{itemize}
\begin{figure}[!ht]
\centering
\begin{minipage}{0.32\textwidth}
\centering
\includegraphics[height=.28\textheight]{gearbox}
\caption{Gearbox parts}
\end{minipage}%
\begin{minipage}{.32\textwidth}
\centering
\includegraphics[height=.28\textheight]{alternator}
\caption{Alternator parts}
\end{minipage}%
\begin{minipage}{0.32\textwidth}
\centering
\includegraphics[height=.28\textheight]{engines}
\caption{Engines parts}
\end{minipage}
\end{figure}
\end{frame}
\subsection{Methodology}
\begin{frame}{Methodology}
\begin{itemize}
\item Detailed description of the intended functionality and final users of each software module
\item Selection of the hardware and software platform in which the research will be performed
\item Selection / creation of representative testing datasets for each of the main software modules
\begin{itemize}
\item 2d / 3D perception of geometry / objects and operator movements
\item Learning new skills
\item Human Machine Interface
\end{itemize}
\item Test and comparison of current state of the art methods for each software module
\item Development of new / improved methods / algorithms when required
\item Focus research on reliably learning new assembly skills
\item Industrial testing of each software module with the final users
\end{itemize}
\end{frame}
| {
"alphanum_fraction": 0.7618161518,
"avg_line_length": 34.8961038961,
"ext": "tex",
"hexsha": "946d4861e1eeca3f57d968150bd004285b17cab8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c144ec287e2d4ed934586b031485cdbda5495d1e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "carlosmccosta/prodei-research-planning-presentation",
"max_forks_repo_path": "tex/sections/proposal.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c144ec287e2d4ed934586b031485cdbda5495d1e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "carlosmccosta/prodei-research-planning-presentation",
"max_issues_repo_path": "tex/sections/proposal.tex",
"max_line_length": 115,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c144ec287e2d4ed934586b031485cdbda5495d1e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "carlosmccosta/prodei-research-planning-presentation",
"max_stars_repo_path": "tex/sections/proposal.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 740,
"size": 2687
} |
\title{Deep Probabilistic Programming}
\subsection{Deep Probabilistic Programming}
This webpage is a companion to the article,
\href{https://arxiv.org/abs/1701.03757}{Deep Probabilistic Programming}
\citep{tran2017deep}.
Here we provide more details for plug-and-play with the code snippets.
An interactive version with Jupyter notebook is available
\href{http://nbviewer.jupyter.org/github/blei-lab/edward/blob/master/notebooks/iclr2017.ipynb}{here}.
The code snippets assume the following versions.
\begin{lstlisting}[language=bash]
pip install edward==1.3.1
pip install tensorflow==1.1.0 # alternatively, tensorflow-gpu==1.1.0
pip install keras==2.0.0
\end{lstlisting}
\subsubsection{Section 3. Compositional Representations for Probabilistic Models}
\textbf{Figure 1}. Beta-Bernoulli program.
\begin{lstlisting}[language=python]
import tensorflow as tf
from edward.models import Bernoulli, Beta
theta = Beta(1.0, 1.0)
x = Bernoulli(tf.ones(50) * theta)
\end{lstlisting}
For an example of it in use, see
\href{https://github.com/blei-lab/edward/blob/master/examples/beta_bernoulli.py}{\texttt{examples/beta\_bernoulli.py}}
in the Github repository.
\textbf{Figure 2}. Variational auto-encoder for a data set of 28 x 28 pixel images
\citep{kingma2014auto,rezende2014stochastic}.
\begin{lstlisting}[language=python]
import tensorflow as tf
from edward.models import Bernoulli, Normal
from keras.layers import Dense
N = 55000 # number of data points
d = 50 # latent dimension
# Probabilistic model
z = Normal(loc=tf.zeros([N, d]), scale=tf.ones([N, d]))
h = Dense(256, activation='relu')(z)
x = Bernoulli(logits=Dense(28 * 28, activation=None)(h))
# Variational model
qx = tf.placeholder(tf.float32, [N, 28 * 28])
qh = Dense(256, activation='relu')(qx)
qz = Normal(loc=Dense(d, activation=None)(qh),
scale=Dense(d, activation='softplus')(qh))
\end{lstlisting}
For an example of it in use, see
\href{https://github.com/blei-lab/edward/blob/master/examples/vae.py}{\texttt{examples/vae.py}}
in the Github repository.
\textbf{Figure 3}. Bayesian recurrent neural network \citep{neal2012bayesian}.
The program has an unspecified number of time steps; it uses a
symbolic for loop (\texttt{tf.scan}).
\begin{lstlisting}[language=python]
import edward as ed
import tensorflow as tf
from edward.models import Normal
H = 50 # number of hidden units
D = 10 # number of features
def rnn_cell(hprev, xt):
return tf.tanh(ed.dot(hprev, Wh) + ed.dot(xt, Wx) + bh)
Wh = Normal(loc=tf.zeros([H, H]), scale=tf.ones([H, H]))
Wx = Normal(loc=tf.zeros([D, H]), scale=tf.ones([D, H]))
Wy = Normal(loc=tf.zeros([H, 1]), scale=tf.ones([H, 1]))
bh = Normal(loc=tf.zeros(H), scale=tf.ones(H))
by = Normal(loc=tf.zeros(1), scale=tf.ones(1))
x = tf.placeholder(tf.float32, [None, D])
h = tf.scan(rnn_cell, x, initializer=tf.zeros(H))
y = Normal(loc=tf.matmul(h, Wy) + by, scale=1.0)
\end{lstlisting}
\subsubsection{Section 4. Compositional Representations for Inference}
\textbf{Figure 5}. Hierarchical model \citep{gelman2006data}.
It is a mixture of Gaussians over
$D$-dimensional data $\{x_n\}\in\mathbb{R}^{N\times D}$. There are
$K$ latent cluster means $\beta\in\mathbb{R}^{K\times D}$.
\begin{lstlisting}[language=python]
import tensorflow as tf
from edward.models import Categorical, Normal
N = 10000 # number of data points
D = 2 # data dimension
K = 5 # number of clusters
beta = Normal(loc=tf.zeros([K, D]), scale=tf.ones([K, D]))
z = Categorical(logits=tf.zeros([N, K]))
x = Normal(loc=tf.gather(beta, z), scale=tf.ones([N, D]))
\end{lstlisting}
It is used below in Figure 6 (left/right) and Figure * (variational EM).
\textbf{Figure 6} \textbf{(left)}. Variational inference
\citep{jordan1999introduction}.
It performs inference on the model defined in Figure 5.
\begin{lstlisting}[language=python]
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Categorical, Normal
x_train = np.zeros([N, D])
qbeta = Normal(loc=tf.Variable(tf.zeros([K, D])),
scale=tf.exp(tf.Variable(tf.zeros([K, D]))))
qz = Categorical(logits=tf.Variable(tf.zeros([N, K])))
inference = ed.VariationalInference({beta: qbeta, z: qz}, data={x: x_train})
\end{lstlisting}
\textbf{Figure 6} \textbf{(right)}. Monte Carlo \citep{robert1999monte}.
It performs inference on the model defined in Figure 5.
\begin{lstlisting}[language=python]
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Empirical
x_train = np.zeros([N, D])
T = 10000 # number of samples
qbeta = Empirical(params=tf.Variable(tf.zeros([T, K, D])))
qz = Empirical(params=tf.Variable(tf.zeros([T, N])))
inference = ed.MonteCarlo({beta: qbeta, z: qz}, data={x: x_train})
\end{lstlisting}
\textbf{Figure 7}. Generative adversarial network
\citep{goodfellow2014generative}.
\begin{lstlisting}[language=python]
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Normal
from keras.layers import Dense
N = 55000 # number of data points
d = 50 # latent dimension
def generative_network(eps):
h = Dense(256, activation='relu')(eps)
return Dense(28 * 28, activation=None)(h)
def discriminative_network(x):
h = Dense(28 * 28, activation='relu')(x)
return Dense(1, activation=None)(h)
# DATA
x_train = np.zeros([N, 28 * 28])
# MODEL
eps = Normal(loc=tf.zeros([N, d]), scale=tf.ones([N, d]))
x = generative_network(eps)
# INFERENCE
inference = ed.GANInference(data={x: x_train},
discriminator=discriminative_network)
\end{lstlisting}
For an example of it in use, see the
\href{/tutorials/gan}{generative adversarial networks} tutorial.
\textbf{Figure *}. Variational EM \citep{neal1993new}.
It performs inference on the model defined in Figure 5.
\begin{lstlisting}[language=python]
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Categorical, PointMass
# DATA
x_train = np.zeros([N, D])
# INFERENCE
qbeta = PointMass(params=tf.Variable(tf.zeros([K, D])))
qz = Categorical(logits=tf.Variable(tf.zeros([N, K])))
inference_e = ed.VariationalInference({z: qz}, data={x: x_train, beta: qbeta})
inference_m = ed.MAP({beta: qbeta}, data={x: x_train, z: qz})
inference_e.initialize()
inference_m.initialize()
tf.initialize_all_variables().run()
for _ in range(10000):
inference_e.update()
inference_m.update()
\end{lstlisting}
For more details, see the
\href{/api/inference-compositionality}{inference compositionality} webpage.
See
\href{https://github.com/blei-lab/edward/blob/master/examples/factor_analysis.py}{\texttt{examples/factor\_analysis.py}} for
a version performing Monte Carlo EM for logistic factor analysis
in the Github repository.
It leverages Hamiltonian Monte Carlo for the E-step to perform maximum
marginal a posteriori.
\textbf{Figure *}. Data subsampling.
\begin{lstlisting}[language=python]
import edward as ed
import tensorflow as tf
from edward.models import Categorical, Normal
N = 10000 # number of data points
M = 128 # batch size during training
D = 2 # data dimension
K = 5 # number of clusters
# DATA
x_batch = tf.placeholder(tf.float32, [M, D])
# MODEL
beta = Normal(loc=tf.zeros([K, D]), scale=tf.ones([K, D]))
z = Categorical(logits=tf.zeros([M, K]))
x = Normal(loc=tf.gather(beta, z), scale=tf.ones([M, D]))
# INFERENCE
qbeta = Normal(loc=tf.Variable(tf.zeros([K, D])),
scale=tf.nn.softplus(tf.Variable(tf.zeros([K, D]))))
qz = Categorical(logits=tf.Variable(tf.zeros([M, D])))
inference = ed.VariationalInference({beta: qbeta, z: qz}, data={x: x_batch})
inference.initialize(scale={x: float(N) / M, z: float(N) / M})
\end{lstlisting}
For more details, see the
\href{/api/inference-data-subsampling}{data subsampling} webpage.
\subsubsection{Section 5. Experiments}
\textbf{Figure 9}. Bayesian logistic regression with Hamiltonian Monte Carlo.
\begin{lstlisting}[language=python]
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Bernoulli, Empirical, Normal
N = 581012 # number of data points
D = 54 # number of features
T = 100 # number of empirical samples
# DATA
x_data = np.zeros([N, D])
y_data = np.zeros([N])
# MODEL
x = tf.Variable(x_data, trainable=False)
beta = Normal(loc=tf.zeros(D), scale=tf.ones(D))
y = Bernoulli(logits=ed.dot(x, beta))
# INFERENCE
qbeta = Empirical(params=tf.Variable(tf.zeros([T, D])))
inference = ed.HMC({beta: qbeta}, data={y: y_data})
inference.run(step_size=0.5 / N, n_steps=10)
\end{lstlisting}
For an example of it in use, see
\href{https://github.com/blei-lab/edward/blob/master/examples/bayesian_logistic_regression.py}{\texttt{examples/bayesian\_logistic\_regression.py}}
in the Github repository.
\subsubsection{Appendix A. Model Examples}
\textbf{Figure 10}. Bayesian neural network for classification \citep{denker1987large}.
\begin{lstlisting}[language=python]
import tensorflow as tf
from edward.models import Bernoulli, Normal
N = 1000 # number of data points
D = 528 # number of features
H = 256 # hidden layer size
W_0 = Normal(loc=tf.zeros([D, H]), scale=tf.ones([D, H]))
W_1 = Normal(loc=tf.zeros([H, 1]), scale=tf.ones([H, 1]))
b_0 = Normal(loc=tf.zeros(H), scale=tf.ones(H))
b_1 = Normal(loc=tf.zeros(1), scale=tf.ones(1))
x = tf.placeholder(tf.float32, [N, D])
y = Bernoulli(logits=tf.matmul(tf.nn.tanh(tf.matmul(x, W_0) + b_0), W_1) + b_1)
\end{lstlisting}
For an example of it in use, see
\href{https://github.com/blei-lab/edward/blob/master/examples/getting_started_example.py}{\texttt{examples/getting\_started\_example.py}}
in the Github repository.
\textbf{Figure 11}. Latent Dirichlet allocation \citep{blei2003latent}.
\begin{lstlisting}[language=python]
import tensorflow as tf
from edward.models import Categorical, Dirichlet
D = 4 # number of documents
N = [11502, 213, 1523, 1351] # words per doc
K = 10 # number of topics
V = 100000 # vocabulary size
theta = Dirichlet(tf.zeros([D, K]) + 0.1)
phi = Dirichlet(tf.zeros([K, V]) + 0.05)
z = [[0] * N] * D
w = [[0] * N] * D
for d in range(D):
for n in range(N[d]):
z[d][n] = Categorical(theta[d, :])
w[d][n] = Categorical(phi[z[d][n], :])
\end{lstlisting}
\textbf{Figure 12}. Gaussian matrix factorization
\citep{salakhutdinov2011probabilistic}.
\begin{lstlisting}[language=python]
import tensorflow as tf
from edward.models import Normal
N = 10
M = 10
K = 5 # latent dimension
U = Normal(loc=tf.zeros([M, K]), scale=tf.ones([M, K]))
V = Normal(loc=tf.zeros([N, K]), scale=tf.ones([N, K]))
Y = Normal(loc=tf.matmul(U, V, transpose_b=True), scale=tf.ones([N, M]))
\end{lstlisting}
\textbf{Figure 13}. Dirichlet process mixture model \citep{antoniak1974mixtures}.
\begin{lstlisting}[language=python]
import tensorflow as tf
from edward.models import DirichletProcess, Normal
N = 1000 # number of data points
D = 5 # data dimensionality
dp = DirichletProcess(alpha=1.0, base=Normal(loc=tf.zeros(D), scale=tf.ones(D)))
mu = dp.sample(N)
x = Normal(loc=mu, scale=tf.ones([N, D]))
\end{lstlisting}
To see the essential component defining the \texttt{DirichletProcess}, see
\href{https://github.com/blei-lab/edward/blob/master/examples/pp_dirichlet_process.py}{\texttt{examples/pp\_dirichlet\_process.py}}
in the Github repository. Its source implementation can be found at
\href{https://github.com/blei-lab/edward/blob/master/edward/models/dirichlet_process.py}{\texttt{edward/models/dirichlet\_process.py}}.
\subsubsection{Appendix B. Inference Examples}
\textbf{Figure *}. Stochastic variational inference \citep{hoffman2013stochastic}.
For more details, see the
\href{/api/inference-data-subsampling}{data subsampling} webpage.
\subsubsection{Appendix C. Complete Examples}
\textbf{Figure 15}. Variational auto-encoder
\citep{kingma2014auto,rezende2014stochastic}.
See the script
\href{https://github.com/blei-lab/edward/blob/master/examples/vae.py}{\texttt{examples/vae.py}}
in the Github repository.
\textbf{Figure 16}. Exponential family embedding \citep{rudolph2016exponential}.
A Github repository with comprehensive features is available at
\href{https://github.com/mariru/exponential_family_embeddings}{mariru/exponential\_family\_embeddings}.
\subsubsection{References}\label{references}
| {
"alphanum_fraction": 0.7342320261,
"avg_line_length": 33.6263736264,
"ext": "tex",
"hexsha": "2e82285dc3313a97f52ee3532a4e6cb9962f39c9",
"lang": "TeX",
"max_forks_count": 1004,
"max_forks_repo_forks_event_max_datetime": "2022-03-25T00:08:08.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-05-03T22:45:14.000Z",
"max_forks_repo_head_hexsha": "8ec452eb0a3801df8bda984796034a9e945faec7",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "zhangyewu/edward",
"max_forks_repo_path": "docs/tex/iclr2017.tex",
"max_issues_count": 724,
"max_issues_repo_head_hexsha": "8ec452eb0a3801df8bda984796034a9e945faec7",
"max_issues_repo_issues_event_max_datetime": "2022-02-28T02:41:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-05-04T09:04:37.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "zhangyewu/edward",
"max_issues_repo_path": "docs/tex/iclr2017.tex",
"max_line_length": 147,
"max_stars_count": 5200,
"max_stars_repo_head_hexsha": "8ec452eb0a3801df8bda984796034a9e945faec7",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "zhangyewu/edward",
"max_stars_repo_path": "docs/tex/iclr2017.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T03:32:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-05-03T04:59:01.000Z",
"num_tokens": 3630,
"size": 12240
} |
\documentclass[./butidigress.tex]{subfiles}
\begin{document}
\chapter{From Where I (We) Came}\label{chap:fromwhereicame}
\newpage
\lettrine{A}{round} \num{13.799+-0.021} billion years ago,\autocite[32]{planckcollab} space and time began, with the Big Bang.
Soon after,\ftnote{On the order of \SI{e-33}{\second} later} the four fundamental forces of the Standard Model\ftnote{Strong, Weak, Electromagnetic, and Gravitation; all four will be explained in due time.} emerged.
Three hundred and eighty thousand years after that,\ftnote{In that \num{380000} years, the universe cooled down, allowing quarks to go from a quark-gluon plasma to forming hadrons, antimatter was annihilated, and neutrinos stopped interacting with normal matter} electrons started binding to atomic nuclei.\ftnote{Which came into existence earlier, but it was too hot for electrons to bind to nuclei.}
Over one and a half million years later, stars started forming.
From there, galaxies, including but not limited to the Milky Way, and galaxy clusters began to take shape.
Fast-forwarding a bit\lips
(But don't worry, we'll get into some of that stuff later on.)
Some \num{4.54+-0.04} billion years ago, the Earth was formed, coalescing out of the same cloud of dust and gas (nebula) from which the Sun, the other planets, and the Solar System generally were formed.
In the center of the nebula,\ftnote{Where the angular momentum was lowest, i.e., where centripetal force was highest ($F = mr\omega^{2}$), causing rapid compression leading to nuclear fusion.} the Sun was born; in a similar, but smaller scale, process planetary bodies began forming around areas of higher density.
Earth's history is split up, currently, into four eons, which are each divided into several eras, which are divided into periods, which are divided into epochs.
The four eons are the Hadean, the Archean, the Proterozoic, and the Phanerozoic, with the Hadean starting with Earth's formation and the Phanerozoic continuing today.
\setlength{\tabcolsep}{0.5em}
\tabulinesep=0.7em
\begin{table}[h]
\centering
\begin{tabu} to 0.75\textwidth {| X[1,r,m] | X[3,l,m] |}\hline
\multicolumn{2}{|c|}{\large\textbf{Earth's Eons}} \\ \hline \hline
\textbf{Hadean} & \numrange{4540}{4000} million years ago \\ \hline
\textbf{Archean} & \numrange{4000}{2500} million years ago \\ \hline
\textbf{Proterozoic} & \numrange{2500}{542} million years ago \\ \hline
\textbf{Hadean} & \num{541} million years ago \\ \hline
\end{tabu}
\end{table}
\digsec{Time Divided}{7}{27}{18}{geologicaltimescale}
\textsc{A question} which may arise in one's mind at this point: why is the history of Earth divided this way?
The answer is complex and ever changing.
Humans, for all our talent, are pretty garbage at agreeing with
\digsec{Hadean: Hell is Earth}{7}{20}{18}{thehadeanera}
\textsc{Lasting roughly} \num{500000000} years, the Hadean featured an Earth which probably looked a bit closer to how one imagines Hell looking like than to the current Earth.
In fact, the word \enquote{Hadean} comes from Hades, the Greek god of the underworld.\ftnote{I feel like he got a bad rap. Just getting dunked on by Zeus and Poseidon and having everyone hate you, all because you drew the short straw and got the downer gig, kinda sucks.}
We really don't know a whole lot about this period of time, it was pretty long ago.
Scientists think that we caught the Moon during the Hadean, based on the current theory.
The theory being that a big-ass (Mars sized) object (planet) hit Earth and the combined ejecta eventually coalesced into the Moon.\autocite{wheredidthemooncomefrom}
This object, generally referred to as Theia (formed in the same way the planets did), started out at Earth's $L_{4}$ or $L_{5}$, points which are gravitationally stable relative to Earth.\autocite[3--4]{wheredidthemooncomefrom}
Without taking into account the other bodies of the Solar System, this system would be stable and we'd have a fun sister planet, equally capable of sustaining life as Earth.
But, gravitational disturbance from other proto-planets caused Theia to impact the Earth, the debris coalescing into the Moon.\autocite[44--47]{wheredidthemooncomefrom}
Earth got hit by a bunch of stuff at this point, a period, lasting from the late Hadean to the early Archean, called the \emph{Late Heavy Bombardment}.\autocite{lunarbombardment}
Whether or not life may have started during this eon, there is scant evidence, is up for debate, but it certainly got going during the Archean.
\digsec{Archean: Water, Water, Everywhere, but no Oxygen}{7}{21}{18}{thearcheanera}
\textsc{The term} Archean comes from the Greek \emph{Arkh\={e}}, meaning \enquote{beginning, origin,} and the eon originally was the first, until the Hadean was split off into its own thing.\ftnote{See \fref{sec:geologicaltimescale}}
While still quite hot, relative to current temperatures, the crust had cooled down by this point, leading to the formation of the first continents.
One theory describes a proto-continent, some \num{3100} million years ago, referred to as \enquote{Ur,} which consisted of what is now India, western Australia, and southern Africa,\autocite{historyofcontinents} while another posits a mass dubbed \enquote{Vaalbara,} some \num{3600} million years before present time, consisting of western Australia and southern Africa.\autocite{sequencestratigraphy}
As one might infer from the above, plate tectonics, similar to those present today, were in full swing.
The Late Heavy Bombardment continued, possibly contributing the the emergence of life.
The Moon was much closer to Earth than it is today, causing tides up to \num{1000} feet high.
Liquid water was present on Earth, but there was barely any free oxygen in the atmosphere.
An oxygen-less atmosphere may sound bad to you or me, but it represents the ideal for anaerobic life, like bacteria, or more generally, prokaryotes.\ftnote{Prokaryote, by the way, comes from the Greek words \emph{pro}, meaning \enquote{before,} and \emph{karyon}, meaning \enquote{nut or kernel,} apt seeing as they haven't nuclei, mitochondria, or any membrane-bound organelle.}
It is theorized that the last common ancestor to all life on earth lived in the Archean, around \numrange{3.5}{3.8} billion years ago.
Bacteria developed photosynthesis and started using ATP to generate energy, both of which mechanisms are used by cells today.
\digsec{Proterozoic: Life, Now Featuring Multiple Cells}{7}{21}{18}{theproterozoicera}
\textsc{As we} get closer to current time our knowledge of the time period gets, understandably, more concrete.
While the Proterozoic\ftnote{A word which comes from, obviously, the Greek \emph{protero-} meaning \enquote{former, earlier} and \emph{zoic-} meaning \enquote{animal, living being.}} is, on a human scale, pretty well in the past, its extant fossil record and better-preserved rock give us a peek into what it was like.
\begin{somenotes}{Proterozoic Eon}
\item \num{2500} to \num{542} million years ago
\item the time between the appearance of oxygen in the Earth's atmosphere and the appearance of the first complex life forms
\end{somenotes}
\digsubsec{Classism, but it's Taxonomy}{7}{21}{18}{classismtaxonomy}
\textsc{From the} Greek \emph{taxis}\ftnote{Meaning arrangement} and \emph{-nomia},\ftnote{Meaning method} biological taxonomy is \enquote{the science dealing with the description, identification, naming, and classification of organisms.}
The \enquote{father} of taxonomy is generally taken to be the Swedish botanist Carl Linnaeus (1707--1778).
Since Linnaeus' time, quite a bit of debate has taken place regarding the proper organization for the tree of life.\autocite{phylogenyandbeyond}
But, the generally accepted, or at least the one I will generally adhere to, is the one introduced by Carl Woese and George Fox in 1977, which features three \emph{domains} as the highest division of life.\autocite{woeseorignial}
The three domains, according to Woese and Fox, are Archaea, Bacteria, and Eukarya (or Eykaryota).
\todobrak{explain different taxa}
\todobrak{transition into Phanerozoic}
\begin{somenotes}{Taxonomy}
\item from the Greek \emph{taxis}, meaning \enquote{arrangement,} and \emph{-nomia}, meaning \enquote{method}
\item Carl Linnaeus is generally regarded as its father
\item lots of conflict and schools of thought, probably gonna stick to the Woese system\autocite{woeseorignial}
\item Domain: highest level
\begin{itemize}
\item Archaea
\item Bacteria
\item Eukarya
\end{itemize}
\item Kingdom
\item Phylum
\item Class
\item Order
\item Family
\item Genus
\item Species
\end{somenotes}
\digsec{Phanerozoic: 2 Complex 5 Me}{7}{21}{18}{thephanerozoicera}
\begin{somenotes}{Phanerozoic Eon}
\item the current eon, started \num{541} millions years ago
\item began with the Cambrian period
\end{somenotes}
\digsubsec{The Evolution of Evolution}{7}{22}{18}{evolutionevolution}
\todobrak{discuss the history of the theory of evolution}\ftnote{\smartcite{zimmer2006evolution} and \smartcite{huxley2010evolution}}
\digsubsec{The Holocene Epoch}{7}{21}{18}{holoceneepoch}
\textsc{The current} period of time, the one we live in, is the Holocene\ftnote{From the Greek \emph{holos}, meaning \enquote{whole or entire,} and \emph{kainos}, meaning \enquote{entirely recent.}} epoch.\ftnote{And is, itself, split into smaller sections, called stages, but I'll not get into them too much. The current stage is the Meghalayan, but the concept of the \enquote{Anthropocene} has been discussed.}
The extinction event currently underway\lips
\digsec{The Point}{7}{21}{18}{thepoint}
\todobrak{the point should be clear throughout}
Then I was born, about thirteen billion years after the Big Bang.
I will resist my compulsion for analogy, for my life has been nothing like the early stages of the universe or of Earth, except in that I was once younger and now am older.
\todobrak{work in the Standard Model at some point}
\end{document} | {
"alphanum_fraction": 0.7702689292,
"avg_line_length": 75.7669172932,
"ext": "tex",
"hexsha": "ad59a15395aeca957c2262f346700028a5a59758",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5c0b2a25bb7dcc0c5d3035f6e69b9d0816ef9a96",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "mvwicky/ButIDigress",
"max_forks_repo_path": "sections/ocean/fromwherewecame.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5c0b2a25bb7dcc0c5d3035f6e69b9d0816ef9a96",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "mvwicky/ButIDigress",
"max_issues_repo_path": "sections/ocean/fromwherewecame.tex",
"max_line_length": 413,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5c0b2a25bb7dcc0c5d3035f6e69b9d0816ef9a96",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "mvwicky/ButIDigress",
"max_stars_repo_path": "sections/ocean/fromwherewecame.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2756,
"size": 10077
} |
\include{config}
\begin{document}
\newcommand{\numberBox}[3]{
\noindent
\fcolorbox{#2}{white}{
\begin{minipage}[l][13.1cm][c]{
\dimexpr19cm-4
\fboxsep-2
\fboxrule}
\centering
#1
\end{minipage}
}
\\[.5pc]
}
\newcommand{\descriptionBox}[5]{
\noindent
\fcolorbox{#1}{white}{
\begin{minipage}[l][13.1cm][c]{
\dimexpr19cm-4
\fboxsep-2
\fboxrule}
\section*{#3}
#4\\[2pc]
Follow the steps below:\\
#5
\end{minipage}
}
\\[.5pc]
}
\newpage
\numberBox{%
\includegraphics[width=4cm]{img/logo/reality}%
\section*{1 - Reality}
}{RealityBlue}{RealityBlue}
%
\numberBox{%
\includegraphics[width=4cm]{img/logo/play}%
\section*{3 - Play}
}{PlayGreen}{PlayGreen}
%
%
\descriptionBox{RealityBlue}{RealityBlue}{\includegraphics[height=8mm]{img/logo/reality} Reality}{What problems related to privacy exists out there? In this part we want you to think about what concerns you, and what privacy problem you would like to solve.}{%
\begin{enumerate}%
\item Choose one \textbf{Reality card}%
\item Discuss the \textbf{privacy problems} related to the card.%
\item Create a situation about the \textbf{privacy problem} you have selected.%
\item Use the questions on the question card to help you think.%
\item Explain your idea to the other groups.%
\end{enumerate}%
}
%
\descriptionBox{PlayGreen}{PlayGreen}{\includegraphics[height=8mm]{img/logo/play} Play}{%
What makes a game fun to play? In this part we want you to think about what makes a game fun to play, and try to create a new exciting game in a given genre.%
}{%
\begin{enumerate}%
\item Think of what \textbf{genre} the game you selected in the meaning part is, and of the how you raised \textbf{awareness} about your \textbf{privacy problem}
\item Build a game that raises \textbf{awareness} of your \textbf{privacy problem} in this \textbf{genre}
\item Use the questions on the question card to help you think.%
\item Draw 4 scenes from the game.%
\item Explain your idea to the other groups.%
\end{enumerate}%
}
%
%
\numberBox{%
\includegraphics[width=4cm]{img/logo/meaning}%
\section*{2 - Meaning}
}{MeaningYellow}{MeaningYellow}
%
\numberBox{%
\includegraphics[width=4cm]{img/logo/technology}%
\section*{4 - Technology}
}{TechnologyRed}{TechnologyRed}
%
%
\descriptionBox{MeaningYellow}{MeaningYellow}{Meaning \includegraphics[height=8mm]{img/logo/meaning}}{%
How can a game be meaningful? In this part we want you to think about how existing games can be changed to have a meaning, and increase the awareness or change the attitude of the player. %
}{%
\begin{enumerate}%
\item Choose a game the majority of the group enjoys.
\item Make sure everyone in the groups understand how the game works.
\item Change one or more elements of the game so that it raises awareness about the \textbf{privacy problem} identified in the reality part.
\item Use the questions on the question card to help you think.%
\item Explain your idea to the other groups.%
\end{enumerate}%
}
%
\descriptionBox{TechnologyRed}{TechnologyRed}{Technology \includegraphics[height=8mm]{img/logo/technology}}{%
What remains is to combine your \textbf{privacy problem}, ways of raising \textbf{awareness}, game \textbf{genre}, with a \textbf{technology}. %
}{%
\begin{enumerate}%
\item Draw one \textbf{Technology card}.
\item Combine the previous parts into one successful Serious Game!
\item Use the questions on the question card to help you think.%
\item Explain your idea to the other groups.%
\end{enumerate}%
}
\end{document}
| {
"alphanum_fraction": 0.7333710727,
"avg_line_length": 32.4128440367,
"ext": "tex",
"hexsha": "3b442d2f00eb2878e0ac43c6cc847a8af8e66c2e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5f762eecd8d9d2806ecd4b0ad9bb756f237541c5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sprig-workshop/sprig",
"max_forks_repo_path": "board.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "5f762eecd8d9d2806ecd4b0ad9bb756f237541c5",
"max_issues_repo_issues_event_max_datetime": "2018-02-25T12:08:26.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-02-16T13:12:37.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sprig-workshop/sprig",
"max_issues_repo_path": "board.tex",
"max_line_length": 260,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5f762eecd8d9d2806ecd4b0ad9bb756f237541c5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sprig-workshop/sprig",
"max_stars_repo_path": "board.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1018,
"size": 3533
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Template: Article
%
% Por: Abrantes Araújo Silva Filho
% [email protected]
%
% Citação: Se você gostou deste template, por favor ajude a divulgá-lo mantendo
% o link para meu repositório GitHub em:
% https://github.com/abrantesasf/LaTeX
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Configura o tipo de documento, papel, tamanho da fonte e informações básicas
%%% para as proriedades do PDF/DVIPS e outras propriedades do documento
\RequirePackage{ifpdf}
\ifpdf
% Classe, língua e tamanho da fonte padrão. Outras opções a considerar:
% draft
% onecolumn (padrão) ou twocolumn (OU usar o package multicol)
% fleqn com ou sem leqno (alinhamento à esquerda das fórmulas e dos números)
% oneside (padrão para article ou report) ou twoside (padrão para book)
\documentclass[pdftex, brazil, 12pt, twoside]{article}
\else
% Classe, língua e tamanho da fonte padrão. Outras opções a considerar:
% draft
% onecolumn (padrão) ou twocolumn (OU usar o package multicol)
% fleqn com ou sem leqno (alinhamento à esquerda das fórmulas e dos números)
% oneside (padrão para article ou report) ou twoside (padrão para book)
\documentclass[brazil, 12pt]{article}
\fi
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Carrega pacotes iniciais necessários para estrutura de controle e para a
%%% criação e o parse de novos comandos
\usepackage{ifthen}
\usepackage{xparse}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Configuração do tamanho da página, margens, espaçamento entrelinhas e, se
%%% necessário, ativa a indentação dos primeiros parágrafos.
\ifpdf
\usepackage[pdftex]{geometry}
\else
\usepackage[dvips]{geometry}
\fi
\geometry{a4paper, left=2.6cm, right=4.0cm, top=3.0cm, bottom=3.4cm}
\usepackage{setspace}
\singlespacing
%\onehalfspacing
%\doublespacing
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Configurações de cabeçalho e rodapé:
\usepackage{fancyhdr}
\setlength{\headheight}{1cm}
\setlength{\footskip}{1.5cm}
\renewcommand{\headrulewidth}{0.3pt}
\renewcommand{\footrulewidth}{0.0pt}
\pagestyle{fancy}
\renewcommand{\sectionmark}[1]{%
\markboth{\uppercase{#1}}{}}
\renewcommand{\subsectionmark}[1]{%
\markright{\uppercase{\thesubsection \hspace{0.1cm} #1}}{}}
\fancyhead{}
\fancyfoot{}
\newcommand{\diminuifonte}{%
\fontsize{9pt}{9}\selectfont
}
\newcommand{\aumentafonte}{%
\fontsize{12}{12}\selectfont
}
% Configura cabeçalho e rodapé para documentos TWOSIDE
\fancyhead[EL]{\textbf{\thepage}}
\fancyhead[EC]{}
\fancyhead[ER]{\diminuifonte \textbf{\leftmark}}
\fancyhead[OR]{\textbf{\thepage}}
\fancyhead[OC]{}
\fancyhead[OL]{\diminuifonte \textbf{\rightmark}}
\fancyfoot[EL,EC,ER,OR,OC,OL]{}
% Configura cabeçalho e rodapé para documentos ONESIDE
%\lhead{ \fancyplain{}{sup esquerdo} }
%\chead{ \fancyplain{}{sup centro} }
%\rhead{ \fancyplain{}{\thesection} }
%\lfoot{ \fancyplain{}{inf esquerdo} }
%\cfoot{ \fancyplain{}{inf centro} }
%\rfoot{ \fancyplain{}{\thepage} }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Configurações de encoding, lingua e fontes:
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{babel}
% Altera a fonte padrão do documento (nem todas funcionam em modo math):
% phv = Helvetica
% ptm = Times
% ppl = Palatino
% pbk = bookman
% pag = AdobeAvantGarde
% pnc = Adobe NewCenturySchoolBook
\renewcommand{\familydefault}{ppl}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Carrega pacotes para referências cruzadas, citações dentro do documento,
%%% links para internet e outros.Configura algumas opções.
%%% Não altere a ordem de carregamento dos packages.
\usepackage{varioref}
\ifpdf
\usepackage[pdftex]{hyperref}
\hypersetup{
% Informações variáveis em cada documento (MUDE AQUI!):
pdftitle={Calculus 1B: Integration},
pdfauthor={MITx on EdX},
pdfsubject={MITx 18.01.2x on EdX --- Calculus 1B: Integration},
pdfkeywords={calculus, integral},
pdfinfo={
CreationDate={}, % Ex.: D:AAAAMMDDHH24MISS
ModDate={} % Ex.: D:AAAAMMDDHH24MISS
},
% Coisas que você não deve alterar se não souber o que está fazendo:
unicode=true,
pdflang={pt-BR},
bookmarksopen=true,
bookmarksnumbered=true,
bookmarksopenlevel=5,
pdfdisplaydoctitle=true,
pdfpagemode=UseOutlines,
pdfstartview=FitH,
pdfcreator={LaTeX with hyperref package},
pdfproducer={pdfTeX},
pdfnewwindow=true,
colorlinks=true,
citecolor=green,
linkcolor=red,
filecolor=cyan,
urlcolor=blue
}
\else
\usepackage{hyperref}
\fi
\usepackage{cleveref}
\usepackage{url}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Carrega bibliotecas de símbolos (matemáticos, físicos, etc.), fontes
%%% adicionais, e configura algumas opções
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsfonts}
\usepackage{siunitx}
\sisetup{group-separator = {.}}
\sisetup{group-digits = {false}}
\sisetup{output-decimal-marker = {,}}
\usepackage{bm}
\usepackage{cancel}
% Altera separador decimal via comando, se necessário (prefira o siunitx):
%\mathchardef\period=\mathcode`.
%\DeclareMathSymbol{.}{\mathord}{letters}{"3B}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Carrega packages relacionados à computação
\usepackage{algorithm2e}
\usepackage{algorithmicx}
\usepackage{algpseudocode}
\usepackage{listings}
\lstset{literate=
{á}{{\'a}}1 {é}{{\'e}}1 {í}{{\'i}}1 {ó}{{\'o}}1 {ú}{{\'u}}1
{Á}{{\'A}}1 {É}{{\'E}}1 {Í}{{\'I}}1 {Ó}{{\'O}}1 {Ú}{{\'U}}1
{à}{{\`a}}1 {è}{{\`e}}1 {ì}{{\`i}}1 {ò}{{\`o}}1 {ù}{{\`u}}1
{À}{{\`A}}1 {È}{{\'E}}1 {Ì}{{\`I}}1 {Ò}{{\`O}}1 {Ù}{{\`U}}1
{ä}{{\"a}}1 {ë}{{\"e}}1 {ï}{{\"i}}1 {ö}{{\"o}}1 {ü}{{\"u}}1
{Ä}{{\"A}}1 {Ë}{{\"E}}1 {Ï}{{\"I}}1 {Ö}{{\"O}}1 {Ü}{{\"U}}1
{â}{{\^a}}1 {ê}{{\^e}}1 {î}{{\^i}}1 {ô}{{\^o}}1 {û}{{\^u}}1
{Â}{{\^A}}1 {Ê}{{\^E}}1 {Î}{{\^I}}1 {Ô}{{\^O}}1 {Û}{{\^U}}1
{œ}{{\oe}}1 {Œ}{{\OE}}1 {æ}{{\ae}}1 {Æ}{{\AE}}1 {ß}{{\ss}}1
{ű}{{\H{u}}}1 {Ű}{{\H{U}}}1 {ő}{{\H{o}}}1 {Ő}{{\H{O}}}1
{ç}{{\c c}}1 {Ç}{{\c C}}1 {ø}{{\o}}1 {å}{{\r a}}1 {Å}{{\r A}}1
{€}{{\euro}}1 {£}{{\pounds}}1 {«}{{\guillemotleft}}1
{»}{{\guillemotright}}1 {ñ}{{\~n}}1 {Ñ}{{\~N}}1 {¿}{{?`}}1
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Ativa suporte extendido a cores
\usepackage[svgnames]{xcolor} % Opções de cores: usenames (16), dvipsnames (64),
% svgnames (150) e x11names (300).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Suporte à importação de gráficos externos
\ifpdf
\usepackage[pdftex]{graphicx}
\else
\usepackage[dvips]{graphicx}
\fi
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Suporte à criação de gráficos proceduralmente na LaTeX:
\usepackage{tikz}
\usetikzlibrary{arrows,automata,backgrounds,matrix,patterns,positioning,shapes,shadows}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Packages para tabelas
\usepackage{array}
\usepackage{longtable}
\usepackage{tabularx}
\usepackage{tabu}
\usepackage{lscape}
\usepackage{colortbl}
\usepackage{booktabs}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Packages ambientes de listas
\usepackage{enumitem}
\usepackage[ampersand]{easylist}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Packages para suporte a ambientes floats, captions, etc.:
\usepackage{float}
\usepackage{wrapfig}
\usepackage{placeins}
\usepackage{caption}
\usepackage{sidecap}
\usepackage{subcaption}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Meus comandos específicos:
% Commando para ``italizar´´ palavras em inglês (e outras línguas!):
\newcommand{\ingles}[1]{\textit{#1}}
% Commando para colocar o espaço correto entre um número e sua unidade:
\newcommand{\unidade}[2]{\ensuremath{#1\,\mathrm{#2}}}
\newcommand{\unidado}[2]{{#1}\,{#2}}
% Produz ordinal masculino ou feminino dependendo do segundo argumento:
\newcommand{\ordinal}[2]{%
#1%
\ifthenelse{\equal{a}{#2}}%
{\textordfeminine}%
{\textordmasculine}}
\newcommand{\bolota}{\item[$\bigcirc$]}
\newcommand{\quadrado}{\item[$\square$]}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Hifenização específica quando o LaTeX/Babel não conseguirem hifenizar:
\babelhyphenation{Git-Hub}
%\usepackage{exsol}
\usepackage{exercise}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% COMEÇA O DOCUMENTO %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\title{Calculus 1B: Integration}
\author{MITx 18.01.2x}
\date{2018/11/17 -- 2019/03/06}
\maketitle
\tableofcontents
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newpage
\section{Getting started (2018/11/17)}
\label{gs}
\textbf{Hello and welcome to Calculus 1B: Integration!}
Your Calculus adventure into the integral starts now! Share the news, tell your
friends and family --- invite them to come with you. As you embark on this second
leg of your calculus adventure, we want to encourage you to post questions to
clarify content and get help with problems. If you have an insight into a classmate's
post, answer it! By participating in the discussion forum, you have the opportunity
to become part of a global learning community. We think this is one of the most
exciting features of these online courses, so we hope you take advantage of it!
About posting for help with a problem: Choose post type ``Question'', write a
descriptive title, and make sure that your comment is clear, pointing out the
difficulty and where you are stuck on a problem.
About posting about bugs, typos, and errors: Choose topic area ``Bugs, typos, and
errors'', give a descriptive title, and add [Staff] to your title.
About answering others' posts: Please refrain from posting solutions to problems.
Instead try to clarify the problems, and offer hints to help your fellow students
solve the problem. We hope you'll enjoy this course!
18.01.2x Calculus 1B: Integration begins next Wednesday, 21 November 2018.
But we are eager to get started, and we hope you are too!
If you haven't taken the first part Calculus 1A with us, please start by reading
through the materials in the Getting Started section, which will explain:
\begin{itemize}
\item how the course works (grading, syllabus, schedule, organization)
\item help familiarize you with the problem types used in this course
\item provides an Entrance Survey helps us get to know you
\item has a diagnostic section to assess your readiness for this course
\end{itemize}
Looking forward to starting this next calculus adventure with you.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Overview and logistics}
\label{gs-ol}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Meet the course team}
\label{gs-ol-team}
\paragraph{Professor David Jerison}
David Jerison received his Ph.D.\ from Princeton University in 1980, and joined the
mathematics faculty at MIT in 1981. In 1985, he received an A.P.\ Sloan Foundation
Fellowship and a Presidential Young Investigator Award. In 1999 he was elected to the
American Academy of Arts and Sciences. In 2004, he was selected for a Margaret MacVicar
Faculty Fellowship in recognition of his teaching. In 2012, the American Mathematical
Society awarded him and his collaborator Jack Lee the Bergman Prize in Complex Analysis.
\begin{figure}[H]
\begin{center}
\caption{Professor David Jerison}
\label{fig:david-jerison}
\fbox{\includegraphics[scale=0.7]{imagens/getting-started/jerison.jpg}}
%\includegraphics[scale=0.4]{imagens/palavras-imagens-simbolos.png}
%
%\footnotesize{Fonte:}
\end{center}
\end{figure}
Professor Jerison's research focuses on PDEs and Fourier Analysis. He has taught single
variable calculus, multivariable calculus, and differential equations at MIT several
times each.
\paragraph{Professor Gigliola Staffilan}
Gigliola Staffilani is the Abby Rockefeller Mauzé Professor of Mathematics since 2007.
She received her Ph.D.\ from the University of Chicago in 1995. Following faculty
appointments at Stanford, Princeton, and Brown, she joined the MIT mathematics faculty
in 2002. She received both a teaching award and a research fellowship while at Stanford.
She received a Sloan Foundation Fellowship in 2000. In 2014 she was elected to the
American Academy of Arts and Sciences.
\begin{figure}[H]
\begin{center}
\caption{Professor Gigliola Staffilani}
\label{fig:gigliola}
\fbox{\includegraphics[scale=0.7]{imagens/getting-started/staffilani.jpg}}
%\includegraphics[scale=0.4]{imagens/palavras-imagens-simbolos.png}
%
%\footnotesize{Fonte:}
\end{center}
\end{figure}
Professor Staffilani is an analyst, with a concentration on dispersive nonlinear PDEs.
She has taught multivariable calculus several times at MIT, as well as differential
equations.
\paragraph{Instructor Jen French}
Jen French is an MITx Digital Learning Scientist in the MIT math department. She earned
her Ph.D.\ in mathematics from MIT in 2010, with specialization in Algebraic Topology.
After teaching after school math for elementary aged students and working with the
Teaching and Learning Lab at MIT developing interdisciplinary curricular videos tying
foundational concepts in math and science to engineering design themes, she joined
MITx in 2013. She has developed videos, visual interactives, and problems providing
immediate feedback using the edX platform residentially in the MIT math department
to aid student learning. She has developed the calculus series (3 courses) and differential
equations series (5 courses) available here on edX.
\begin{figure}[H]
\begin{center}
\caption{Instructor Jen French }
\label{fig:jen}
\fbox{\includegraphics[scale=0.7]{imagens/getting-started/french.jpg}}
%\includegraphics[scale=0.4]{imagens/palavras-imagens-simbolos.png}
%
%\footnotesize{Fonte:}
\end{center}
\end{figure}
\paragraph{Instructor Karene Chu}
Karene Chu received her Ph.D.\ in mathematics from the University of Toronto in 2012.
Since then she has been a postdoctoral fellow first at the University of Toronto/Fields
Institute, and then at MIT, with research focus on knot theory. She has taught single
and multi-variable calculus, and linear algebra at the University of Toronto where
she received a teaching award.
\begin{figure}[H]
\begin{center}
\caption{Instructor Karene Chu}
\label{fig:wang}
\fbox{\includegraphics[scale=0.7]{imagens/getting-started/karene.jpg}}
\end{center}
\end{figure}
\paragraph{Special thanks to \ldots}
Professor Arthur Mattuck for starting it all.
Ed Tech Developers:
\begin{itemize}[noitemsep]
\item Phillip Ai
\item J.\ M.\ Claus
\item Eric Heubel
\item Haynes Miller
\item Martin Segado
\end{itemize}
MITx Video Team:
\begin{itemize}[noitemsep]
\item Brittany Bellamy
\item Chris Boebel
\item Tsinu Heramo
\item Douglass McLean
\item Lana Scott
\item Catilin Stier
\end{itemize}
\newpage
MITx Support Staff:
\begin{itemize}[noitemsep]
\item Kyle Boots
\item Brad K.\ Goodman
\end{itemize}
\fbox{\begin{minipage}{10cm}This course was funded in part by:\ \\
Class of 1960 Alumni Funds\ \\
2014--2015 Alumni Class Funds Grant\ \\
Wertheimer Fund\end{minipage}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Course description}
\label{gs-ol-description}
Discover the integral --- what it is and how to compute it. See how to use calculus
to model real world phenomena. Part 2 of 3.
How long should the handle of your spoon be so that your fingers do not burn while
mixing chocolate fondue? Can you find a shape that has finite volume, but infinite
surface area? How does the weight of the rider change the trajectory of a zip line
ride? These and many other questions can be answered by harnessing the power of the
integral. But what is an integral? You will learn to interpret it geometrically as
an area under a graph, and discover its connection to the derivative. You will encounter
functions that you cannot integrate without a computer and develop a big bag of tricks
to attack the functions that you can integrate by hand. The integral is vital in engineering
design, scientific analysis, probability and statistics. You will use integrals to find
centers of mass, the stress on a beam during construction, the power exerted by a motor,
and the distance traveled by a rocket.
This course, in combination with \emph{18.01.1x Calculus 1A: Differentiation}, covers the
AP Calculus AB curriculum.
This course, in combination with \emph{18.01.1x Calculus 1A: Differentiation} \textbf{and}
\emph{18.01.3x Calculus 1C: Coordinate Systems and Infinite Series}, covers the AP Calculus BC
curriculum.
If you intend to take an AP exam, we strongly suggest that you familiarize yourself with
the AP exam to prepare for it.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{The making of this course}
\label{gs-ol-making}
This course was created using latex2edX, a free tool developed at MIT for creating
content for edX written in \LaTeX. \LaTeX\ is a typesetting language that is fantastic
for writing math! Occasionally, the equations you see in the webpage (which are rendered
in mathjax) do not load appropriately. Our apologies. The easiest fix is to simply
reload the page. Another solution is to change browsers. (Firefox seems to render
mathjax less reliably than Chrome or Safari. However, frequent changes to edX
will cause disruptions in our content.)
Note edX is not supported on tablet devices. That said, users report that 95\% of
the problems can be done on a tablet device, but if weird errors are creeping
in (especially with formula input type problems) you may try switching to a laptop
or desktop computer.
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:latex2edx}
\fbox{\includegraphics[scale=0.7]{imagens/getting-started/latex2edx.png}}
\end{center}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{How to succeed}
\label{gs-ol-succeed}
\paragraph{Prerequisites}
This course has a global audience with students from a wide variety of backgrounds.
To succeed in this course, you must have a solid foundation in
\begin{enumerate}[noitemsep]
\item Algebra
\item Geometry
\item Trigonometry
\item Exponents
\item Logarithms
\item Limits
\end{enumerate}
Because we know you come from different backgrounds, we want to help you to choose
the best path through this content. To aid us in this, please take the
``Choose your calculus adventure'' diagnostics. This will help you to determine
if you have the skills to succeed, what skills you may need to review, and which
units you may be able to skip!
\paragraph{Reference materials}
The material we provide in the Courseware contains all of the content you need for
this course. However, there are many good calculus texts that have a great deal of
problems and alternate explanations that may help you. Most widely used calculus
texts are adequate.
There is also the free web resource \href{https://www.khanacademy.org/}{Khan Academy}.
Links to other web resources can be found on the Course Info page under the header
``Related Links''. Feel free to share other resources on the course wiki or through
the discussion forum.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Grading}
\label{gs-ol-grading}
There are 4 categories of graded problems in 18.01.2x: in-lecture Exercises,
Part A Homework, Part B Homework, and the Final Exam.
\begin{itemize}
\item \textbf{Exercises:} These are the problems that are interspersed between videos
in each lecture. These problems count for 20\% of your grade. These problems will
be used to motivate theory, practice a concept you just learned, and review material
from previous sequences that we are using. While you are graded on these problems,
they are low-stakes: you have multiple attempts, and have the opportunity to look
at the answer after you have submitted a correct answer or run out of attempts.
This is where you will do the majority of your learning. We encourage you to make
mistakes and learn from them!
\item \textbf{Part A Homework:} Each unit has 1 Part A Homework assignment, which
gives you an opportunity to practice what you learned. These problems count for 10\% o
f your total grade. Wait until the end of the unit to attempt these problems.
These problems help you identify the concepts that you have forgotten, and aid in
long-term retention. These problems are mostly mechanical–asking you to practice
methods, and techniques learned in each unit. Each problem typically tests knowledge
from only one section in a unit. (We won't necessarily tell you which one though!)
\item \textbf{Part B Homework:} Each unit has 1 Part B Homework assignment. The part
B homework counts for 25\% of your total grade. The problems on this homework
combine ideas from all of the sequences in the unit. These problems are mostly in
the form of word problems which ask you to apply the methods learned to new scenarios.
\item \textbf{Final:} The final exam is the culmination of your learning, and will
account for 45\% of your grade. These problems cover all of the material in this course.
Several of the problems follow the AP short-answer format. However, we cannot grade
the justifications to your reasoning here. To prepare for the AP exam, you should
take and review the solutions to sample AP exams from the AP website.
\end{itemize}
\paragraph{Certification} To earn an ID verified certificate, you must earn 60\% of
the points in this course. You can see your progress towards certification by clicking
on the Progress link above.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Using the EdX platform}
\label{gs-edx}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Navigating EdX}
\label{gs-edx-nav}
This course was developed at MIT and is made available to you by the edX platform.
The edX platform is a platform for learning! It allows people from around the world
to access content for free, based on their own interests and background.
If you have never taken a course on edX, please take the short 1 hour course
\href{https://www.edx.org/course/demox-edx-demox-1-0}{DemoX} to familiarize yourself
with the platform and its capabilities.
In this course, we have the following top-level resources:
\begin{itemize}[noitemsep]
\item \textbf{Course:} This is the graded content of this course, as well as all
learning materials.
\item \textbf{Calendar:} All of the due dates are in UTC, and are available in
the google calendar, which you can download into your own calendar so that you
can have these due dates available in your own time zone.
\item \textbf{Discussion:} This is a link to the full discussion forum. For
specific discussions related to a problem or video, link through the discussion
forum link at the bottom of each page. (See the discussion at the bottom of
this page for help with these problems.)
\item \textbf{Progress:} Use this tab to see how your are progressing through
the content!
\end{itemize}
\textbf{Course} is where you will spend most of your time. This is where we
put the content and assessments for your learning. Everything else is a resource
to support your learning.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Example problem types}
\label{gs-edx-example}
Take a moment to familiarize yourself with the main problem types we use in this
course.
\textbf{Checking and submitting an answer:}
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:exqst01}
\fbox{\includegraphics[scale=0.33]{imagens/getting-started/question-01.png}}
\end{center}
\end{figure}
\textbf{Resetting a Problem:}
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:exqst02}
\fbox{\includegraphics[scale=0.35]{imagens/getting-started/question-02.png}}
\end{center}
\end{figure}
\newpage
\textbf{Limited Number of Attempts 1:}
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:exqst03}
\fbox{\includegraphics[scale=0.35]{imagens/getting-started/question-03.png}}
\end{center}
\end{figure}
\textbf{Limited Number of Attempts 2:}
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:exqst04}
\fbox{\includegraphics[scale=0.35]{imagens/getting-started/question-04.png}}
\end{center}
\end{figure}
\textbf{Formula Entry Problems:}
This is a math class, which means we are going to be using formulas. And sometimes,
we want you to find these formulas. There are some rules for entering formulas into
the text entry box (which follows rules for ASCII math). Use:
\begin{itemize}[noitemsep]
\item Use $+$ to denote addition; e.g. $2+3$.
\item Use $-$ to denote subtraction; e.g. $x-1$.
\item Use $*$ to denote multiplication; e.g. $2*x$.
\item Use $\wedge$ to denote exponentiation; e.g. $x\wedge 2$ for $x^2$.
\item Use $/$ to denote division; e.g. 7$/$x for $7/x$.
\item Type \textbf{pi} for the mathematical constant $\pi$.
\item Type \textbf{e} for the mathematical constant $e$.
\item Type \textbf{sqrt(x)}, \textbf{sin(x)}, \textbf{cos(x)}, \textbf{ln(x)},
\textbf{arccos(x)}, etc. for the known functions $\sqrt{x}$, $\sin{x}$, $\cos{x}$,
$\ln{x}$, $\arccos{x}$, etc. Note that parentheses are required.
\item Use parentheses ( ) to specify order of operations.
\end{itemize}
Each formula entry box will have a Formula Input Help button below the answer button,
where you can find these facts about how to enter formulas. (See the button below.)
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:exqst05}
\fbox{\includegraphics[scale=0.3]{imagens/getting-started/question-05.png}}
\end{center}
\end{figure}
\textbf{Drag and Drop Problems:}
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:exqst06}
\fbox{\includegraphics[scale=0.3]{imagens/getting-started/question-06.png}}
\end{center}
\end{figure}
\textbf{Sketch Input Problems:}
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:exqst07}
\fbox{\includegraphics[scale=0.3]{imagens/getting-started/question-07.png}}
\end{center}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Using the forum}
\label{gs-forum}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Discussion forum}
\label{gs-forum-forum}
The discussion forum is the tool for connecting with the community of online learners
in this course. Use the forum to ask questions, seek clarifications, report bugs,
start or respond to topical discussions.
On most pages, there is a link at the bottom, which says ``show discussion''.
Clicking this link will show the discussion forum associated with the videos
and problems on that page.
\paragraph{``Netiquette'': What to do}
\begin{itemize}[noitemsep]
\item \textbf{Be polite.} Make sure that your posts are respectful of the other
students and staff in the course.
\item Use the search button. Search for similar forum posts \textbf{before you
post} using the magnifying glass icon. Many of your classmates will have the
same question that you do! If you perform a search first, you may find the
question and answer without needing to post yourself. This helps us keep the
forum organized and useful!
\item Reply to existing discussions when you see someone with the same question.
This helps to organize responses.
\item Use a descriptive and specific title to your post. This will attract the
attention of TAs and classmates who can answer your question.
\item Be very specific about where you need help. Are you stuck on a particular
part of a problem? Are you confused by a particular concept? What have you
done so far?
\item Actively up-vote other posts, and other students will up-vote yours!
The more up-votes your post has, the more likely they are to be seen.
\end{itemize}
\paragraph{``Netiquette'': What not to do}
Follow common writing practices for online communication:
\begin{itemize}[noitemsep]
\item Avoid TYPING IN ALL CAPS. Some people read this as shouting, even if
that is not your intention.
\item Avoid \textbf{typing in bold}. Some people read this as shouting,
even if that is not your intention.
\item Avoid unnecessary symbols, abbreviated words, texting shorthand,
and replacing words with numbers (e.g. Pls don't rplce wrds w/\#s).
\item Avoid repeating letters or reeeeepeeaattinggggg chaaracterrrss.
\item Avoid excessive punctuation!!!!!!!!
\end{itemize}
\paragraph{Cheating!}
We encourage you to communicate in the forum about problems, and get hints
and help understanding the material from your fellow classmates and the
course TAs. However:
\begin{itemize}[noitemsep]
\item Please do not post solutions to lecture problems, homework
problems (part A or part B), or final exam problems. These will be
removed, and the student who posted will be contacted and dealt
with individually.
\item Do not post or copy solutions posted to the forum for any exercises.
This is cheating.
\item Do not copy solutions from yourself. This is cheating.
\end{itemize}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Syllabus and schedule}
\label{gs-syllabus}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Syllabus and schedule}
\label{gs-syllabus-syllabus}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Getting to know you}
\label{gs-know}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Getting to know you}
\label{gs-know-know}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Prerequisite Knowledge}
\label{gs-know-pre}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Integration diagnostics}
\label{gs-know-diag}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Entrance Survey}
\label{gs-survey}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Entrance Survey}
\label{gs-survey-survey}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newpage
\section{Unit 1: The Integral}
\label{u1}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Mean Value Theorem}
\label{u1-mvt}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Motivation}
\label{u1-mvt-motiv}
Video: \href{https://www.youtube.com/watch?v=9B-XGTaHqXk}{Mean Value Theorem}
--- How you doing today, sir?
--- Hi.
--- Can I see your license and registration please?
--- What was I doing?
--- Speeding.
--- I was?
--- Yes, you were.
--- How do you know I was speeding?
--- You're looking a little tired. How long you've driving today?
--- Maybe two hours.
--- We did you come from?
--- New Jersey.
--- How far away is New Jersey?
--- 170 miles.
--- So 170 miles divided by 2 is 85 miles an hour, sir. Isn't that a little fast?
--- OK. So you knew my average speed was 85, but how do you know there was an incident
when I was traveling at 85?
--- Sir, it's simple math. The \emph{mean value theorem} states that one moment
your instantaneous speed is going to match your average speed.
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-motiv}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00001.png}}
\end{center}
\end{figure}
--- You got me.
--- OK. Can I have that license and registration now, sir?
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{The Mean Value Theorem and some applications}
\label{u1-mvt-applications}
Objectives:
\begin{itemize}[noitemsep]
\item Know the hypothesis and conclusion of the \textbf{Mean Value Theorem}
\item Use \textbf{upper bounds} and \textbf{lower bounds} on the derivative
to establish inequalities between functions
\end{itemize}
Contents: 19 pages (9 videos, 33 minutes 1x speed, 35 questions)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Exploration: Average vs instantaneous rate of change}
\label{u1-mvt-avgxinst}
\begin{Exercise}[title={Review of the average rate of change}]
\noindent Recall the definition of the average rate of change of a function $x(t)$ over an
interval $[a, b]$:
\begin{equation}
\text{Average Rate of Change} = \frac{x(b) - x(a)}{b - a}
\end{equation}
\noindent Geometrically, the average rate of change over $[a, b]$ is the slope of:
\begin{itemize}[noitemsep]
\bolota the secant line through $(a, x(a))$, $(b, x(b))$
\bolota the tangent line through $(a, x(a))$
\bolota the tangent line through $(b, x(b))$
\end{itemize}
\end{Exercise}
\begin{Exercise}[title={Draw your answer, average rate of change}]
\noindent Draw your answer from the previous problem on the graph below:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-avgxinst-2}
\fbox{\includegraphics[scale=0.35]{imagens/unit-1/u1-m1-00002.png}}
\end{center}
\end{figure}
\end{Exercise}
\begin{Exercise}[title={Review of instantaneous rate of change}]
\noindent Recall the instantaneous rate of the change of the function $x(t)$
is the derivative:
\begin{equation}
x'(t) = \lim_{\Delta t \to 0} \frac{x(t + \Delta t) - x(t)}{\Delta t}
\end{equation}
\noindent Geometrically, the instantaneous rate of change at $t$ is the slope of:
\begin{itemize}[noitemsep]
\bolota the secant line through $(t, x(t)$, $(b, x(b))$
\bolota the tangent line through $(t, x(t))$
\bolota the secamt line through $(a, x(a))$, $(t, x(t))$
\end{itemize}
\end{Exercise}
\begin{Exercise}[title={Draw your answer, instantaneous rate of change}]
\noindent Draw your answer from the previous problem on the graph below:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-avgxinst-3}
\fbox{\includegraphics[scale=0.35]{imagens/unit-1/u1-m1-00003.png}}
\end{center}
\end{figure}
\end{Exercise}
\begin{Exercise}[title={Comparing average and instantaneous rates of change}]
\noindent On the graph below, the secant line through $(a, x(a))$, $(b, x(b))$,
has the same slope as the tangent line(s) at which of the following point(s)?
(Check all that apply.)
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-avgxinst-4}
\fbox{\includegraphics[scale=0.4]{imagens/unit-1/u1-m1-00004.png}}
\end{center}
\end{figure}
\begin{itemize}[noitemsep]
\quadrado $a$
\quadrado $t1$
\quadrado $t2$
\quadrado $t3$
\quadrado $b$
\end{itemize}
\end{Exercise}
Video: \href{https://www.youtube.com/watch?v=8yMIILYAxkw}{Mean Value Theorem: Conclusion}
The \emph{Mean Value Theorem (MVT)} relates the average rate of change
and the instantaneous rate of change of a function.
In more detail, consider a function
$x(t)$, over an interval $[a, b]$, so that the endpoints are
$(a, x(a))$ and $(b, x(b))$:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-avgxinst-5}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00005.png}}
\end{center}
\end{figure}
Recall the average rate of change of the function
$x(t)$ over the interval $[a, b]$ is geometrically
the slope of the secant line through the two endpoints:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-avgxinst-6}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00006.png}}
\end{center}
\end{figure}
Recall also that the instantaneous rate
of change of the function $x(t)$,
at a point $t_1$ between $a$ and $b$, is geometrically
the slope of the tangent line at $t_1$:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-avgxinst-7}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00007.png}}
\end{center}
\end{figure}
Notice the average rate of change is only one number,
but the instantaneous rate of change
can take different values at different points
within the interval:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-avgxinst-8}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00008.png}}
\end{center}
\end{figure}
So how does the mean value theorem relate to these two?
Well, as you will see on our graph,
there is a point at which the tangent
is parallel, in other words, has the same slope
as the secant line.
Let us find such a point now.
We can shift the secant line without changing its slope
until it is tangent to our graph.
And here it is at a point at which the tangent is parallel
to the secant.
And we will call this point $c$:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-avgxinst-9}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00009.png}}
\end{center}
\end{figure}
So the following is the conclusion
of the mean value theorem:
\textbf{there is some point $c$, in between the endpoints $a$
and $b$, such that the average rate of change from $a$ to $b$
is equal to the instantaneous rate of change at $c$}:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-avgxinst-10}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00010.png}}
\end{center}
\end{figure}
Or again, geometrically, \textbf{there is some point $c$, in between $a$
and $b$, such that the slope of the secant line through the two
endpoints is equal to the slope of the tangent line at $c$}.
Notice the MVT says that such a $c$ is \emph{strictly
in between the endpoints}, but it does not
say where exactly such a $c$ is, or even
\emph{how many} such $c$'s there are.
In fact, there are two such $c$'s in our example:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-avgxinst-11}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00011.png}}
\end{center}
\end{figure}
We have only talked about the conclusion of the mean value
theorem. But what about the hypothesis?
In other words, \emph{when does it hold}?
To find out when the theorem holds,
let us now explore \emph{when it fails}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Identifying necessary hypotheses}
\label{u1-mvt-hypot}
\begin{Exercise}[title={How the Mean Value Theorem can go wrong}]
\noindent The MVT conclusion: \textbf{There is a point $c$, such that $a<c<b$, at
which the tangent line is parallel to the secant line through $(a, x(a))$ and
$(b, x(b))$}. We may abbreviate ``such that'' with ``s.t.'' from now on.
For which of the graphs below is the MVT conclusion false? (Solid points are
the end points. Dotted lines are asymptotes. Check all that apply.)
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-hypot-1}
\includegraphics[scale=0.4]{imagens/unit-1/u1-m1-00012.png}
\end{center}
\end{figure}
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-hypot-2}
\includegraphics[scale=0.4]{imagens/unit-1/u1-m1-00013.png}
\end{center}
\end{figure}
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-hypot-3}
\includegraphics[scale=0.4]{imagens/unit-1/u1-m1-00014.png}
\end{center}
\end{figure}
\end{Exercise}
Video: \href{https://www.youtube.com/watch?v=WmAcRkkoqOA}{Mean Value Theorem: Full statement}
Here is the conclusion of the mean value theorem
that we have already seen:
\textbf{There is some point $c$, between $a$ and $b$, at which the tangent is
parallel to the secant}. You have just seen that this statement
holds for some functions, but doesn't for some others.
So \emph{what are conditions that will guarantee
that the MVT conclusion holds}?
Let us investigate the examples from the problem you just
solved.
We have collected the successful functions,
the ones for which the MVT conclusion hold,
on the top row, and the failures,
on the bottom two rows:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-hypot-4}
\fbox{\includegraphics[scale=0.3]{imagens/unit-1/u1-m1-00015.png}}
\end{center}
\end{figure}
We will start by discussing why the functions in the middle row
fail.
Let us look at the first graph.
For the first graph, we can shift the secant line
to approach a point where it is tangent to our graph.
But we find that the exact point that this would have happened
is a point where the function is undefined.
So there is no $c$:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-hypot-5}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00016.png}}
\end{center}
\end{figure}
Next, let's look at the second graph in the middle row.
It has a jump discontinuity at an endpoint.
The secant line slants down, but all the tangent lines slant up,
so there is no $c$.
Because of the discontinuity, there
is no relationship between the slope of the secant
and the slope of the tangents:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-hypot-6}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00017.png}}
\end{center}
\end{figure}
Similarly, for the rest of the graphs in this row,
the reason that the MVT conclusion fails
is a discontinuity either at an endpoint
or within the interval:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-hypot-7}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00018.png}}\\
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00019.png}}
\end{center}
\end{figure}
Let's look at the bottom row now.
Each of these two graphs is continuous
but has a point within the interval where
the derivative does not exist.
In the first, there is a corner, and in the second,
there is a cusp:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-hypot-8}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00020.png}}
\end{center}
\end{figure}
You can check that the reason there
is no tangent parallel to the secant
is because of the point where the function is not
differentiable.
Let us now look at the successful functions.
We can use the same method as before
to find a point at which the tangent is parallel
to the secant:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-hypot-9}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00021.png}}\\
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00022.png}}
\end{center}
\end{figure}
We see that the first four graphs on this row
have hump shapes at whose peaks the $c$'s appear.
On the other hand, the last function on this row
is equal to the secant line, so at every point,
the tangent is also equal to the secant line.
In other words, every point within the interval is a $c$.
What do all these graphs have in common?
Well, all of these graphs are \emph{continuous and differentiable}.
Let's return to our question, what condition on a function
will guarantee that the MVT conclusion holds?
We already see that continuity and differentiability
are conditions that will include all
of our examples of successes, and exclude
all of our cases of failures.
It turns out that \emph{continuity and differentiability are
enough to guarantee success}.
So let us state the mean value theorem precisely:
\textbf{If a function $x(t)$ is \emph{continuous}
on the closed interval $[a, b]$, and
\emph{differentiable} on the open interval $(a, b)$,
then there is some point $c$ in the open interval
$(a, b)$ such that the average rate of change from $[a, b]$
is equal to the instantaneous rate of change at $c$}:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-hypot-10}
\fbox{\includegraphics[scale=0.3]{imagens/unit-1/u1-m1-00023.png}}
\end{center}
\end{figure}
Let's write this conclusion in terms of formulas
now rather than words:
\begin{equation}
\frac{x(b) - x(a)}{b - a} = x'(c)
\end{equation}
Notice in the hypothesis the function:
\begin{itemize}[noitemsep]
\item needs to be continuous on the closed interval, including
the endpoints;
\item needs to be differentiable only on the open interval (so the
derivative does not have to exist for the endpoints)
\end{itemize}
Let us now do some exercises.
And we will continue with some immediate consequences
of the MVT.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Statement of the Mean Value Theorem}
\label{u1-mvt-statement}
If $x(t)$ is continuous on $a \le t \le b$, and differentiable on $a < t < b$, that
is, $x(t)$ is defined for all $t$, $a < t < b$, then for some $c$ with $a < c < b$:
\begin{equation}
\frac{x(b) - x(a)}{b - a} = x'(c)
\end{equation}
Equivalently, in geometric terms, there is at least one point $c$, with $a < c < b$,
at which the tangent line is parallel to the secant line through $(a, x(a))$ and
$(b, x(b))$:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-stat-1}
\fbox{\includegraphics[scale=1]{imagens/unit-1/u1-m1-00024.png}}
\end{center}
\end{figure}
\begin{Exercise}[title={The logic of the MVT}]
\noindent The following graph has a discontinuity within the interval $[a, b]$:
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-stat-2}
\fbox{\includegraphics[scale=0.5]{imagens/unit-1/u1-m1-00025.png}}
\end{center}
\end{figure}
\noindent Is there a point $c$ with $a < c < b$ at which the tangent line is
parallel to the secant line through $(a, x(a))$ and $(b, x(b))$?
\begin{itemize}[noitemsep]
\bolota Yes
\bolota No
\end{itemize}
\noindent What does this say about the MVT?
\begin{itemize}[noitemsep]
\bolota There can be no $c$ because the hypothesis of the MVT is not satisfied.
\bolota This graph does not satisfy the MVT hypothesis but satisfies the MVT
conclusion. This example does not contradict the MVT.
\bolota This graph shows that the MVT statement above must have the wrong hypothesis.
\bolota This graph shows that the MVT statement above must have the wrong conclusion.
\end{itemize}
\end{Exercise}
\paragraph{Proof of the MVT:} To prove the Mean Value Theorem, we will start by proving
a special case in which the function has the same values at the two end points,
and then use this special case to prove the full theorem.
To prove the special case, we will rely on the \emph{Extreme Value Theorem}, which says
that any function which is continuous on a closed interval must attain both its
maximum and minimum values in that closed interval. This theorem requires deeper analysis
of the real numbers and we will not prove it here. The point is that we need continuity
to guarantee that the function attains both its maximum and minimum.
\textbf{Proof of the special case:}
Suppose a function $x_0(t)$ satisfies the hypothesis of the MVT, that is,
$x_0(t)$is continuous on $[a, b]$, and differentiable on $(a, b)$.
In this special case, suppose also that $x_0(a) = x_0(b)$. By the Extreme Value Theorem,
$x_0(t)$ attains both its maximum and minimum in $[a, b]$. In other words, there is at
least one point $t_1$ in $[a, b]$ such that $\displaystyle x_0(t_1) = \min_{a \le t \le b}x_0(t)$,
and at least one point $t_2$ in $[a, b]$ such that $\displaystyle x_0(t_2) = \max_{a \le t \le b}x_0(t)$.
There are only two possibilities. The maximum and minimum are either equal or not.
\begin{itemize}
\item Case 1: $\displaystyle \max_{a \le t \le b}x_0(t) = \min_{a \le t \le b}x_0(t)$\\
In this case $x_0(t)$ must be constant over $[a, b]$, so $x'_0(t) = 0$ for all
$a < t < b$. In particular, there is at least one point $c$, with $a < c < b$ at
which $x'_0(c) = 0$.
\item Case 2: $\displaystyle \max_{a \le t \le b}x_0(t) \ne \min_{a \le t \le b}x_0(t)$\\
In this case, since $x_0(a) = x_0(b)$, they cannot be both at the end points.
Hence at least one of $\displaystyle \max_{a \le t \le b}x_0(t)$ and $\displaystyle \min_{a \le t \le b}x_0(t)$
must be achieved in $(a, b)$. Hence, there must be a $c$, with $a < c < b$ such that
$\displaystyle x_0(c) = \max_{a \le t \le b}x_0(t)$ or $\displaystyle x_0(c) = \min_{a \le t \le b}x_0(t)$.
Now, recall the derivative of a differentiable function at a local maximum or minimum.
By the hypothesis, $x_0(t)$ is differentiable in $(a, b)$, so $x'_0(c) = 0$ since
$c$ is either a local maximum or a minimum.
\end{itemize}
In both cases, since $x_0(a) = x_0(b)$, there is a point $c$, with $a < c < b$, such that
\begin{equation}
x'_0(c) = 0 = \frac{0}{b-a} = \frac{x_0(b) - x_0(a)}{b-a}
\end{equation}
This special case of the MVT is called \textbf{Rolle's Theorem}.
Let us now use the special case above to prove the MVT for functions with possibly
different endpoint values.
Suppose a function $x(t)$
satisfies the hypothesis of the MVT, that is, $x(t)$ is continuous on $[a, b]$,
and differentiable on $(a, b)$. Let
\begin{equation}
x_0(t) = x(t) - \left(x(a) + \frac{x(b) - x(a)}{b-a}(t-a)\right)
\end{equation}
That is, construct a function $x_0(t)$ by subtracting from $x(t)$ the line that goes through
$(a, x(a))$, $(b, x(b))$. Then $x_0(t)$ also satisfies the hypothesis of the MVT, and
$x_0(a) = x_0(b) = 0$. So we can apply Rolle's Theorem to $x_0(t)$, and know that there is a $c$
in $(a, b)$, such that $x'_0(c) = 0$.
Now we can rearrange the equation above and get $x(t)$ in terms of $x_0(t)$:
\begin{equation}
x(t) = x_0(t) + \left(x(a) + \frac{x(b) - x(a)}{b-a}(t-a)\right)
\end{equation}
Taking the derivative on both sides. we get
\begin{equation}
x'(t) = x'_0(t) + \frac{x(b) - x(a)}{b-a}
\end{equation}
And at the point $c$ in $(a, b)$ at which $x'_0(c) = 0$, the equation above reduces to
\begin{equation}
\begin{split}
x'(x) &= x'_0(c) + \frac{x(b) - x(a)}{b-a}\\
&= \frac{x(b) - x(a)}{b-a}
\end{split}
\end{equation}
Thus, we have found a $c$ we need for the conclusion of the MVT.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Back to the speeding ticket example}
\label{u1-mvt-speed}
\begin{Exercise}[title={Time-position graph of the speeding car}]
\noindent Recall in the example of the speeding car, the only information the
police had was that our car was at the $50$ mile marker at $8$ a.m., and $220$
mile marker at $10$ a.m. Let $x(t)$ be the position of the car at time $t$
and let the units of $x$ be miles and $t$ be hours, so that $x(8) = 50$, and
$x(10) = 200$. Which of the following graphs can be the graph of $x(t)$? Check all that applies.
\begin{figure}[H]
\begin{center}
%\caption{}
\label{fig:mvt-speed-1}
\fbox{\includegraphics[scale=0.65]{imagens/unit-1/u1-m1-00026.png}}
\end{center}
\end{figure}
\end{Exercise}
\begin{Exercise}[title={When 85 mph?}]
\noindent Recall the average velocity of the car between $8$ and $10$ am is $85$ mph.
According to the MVT, the strongest conclusion the police officer could make about when
the car is traveling at exactly $85$ mph is:
\noindent ``There is/are (a)\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_ such moment(s) (b)\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
and he (c)\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_ when such moment(s) is(are).''
\noindent The (a) is:
\begin{itemize}[noitemsep]
\bolota no
\bolota at least one
\bolota exactly one
\bolota more than one
\end{itemize}
\noindent The (b) is:
\begin{itemize}[noitemsep]
\bolota after 8, and before 10
\bolota at or after 8, and before 10
\bolota at 8 or at 10
\end{itemize}
\noindent The (c) is:
\begin{itemize}[noitemsep]
\bolota knows
\bolota does not know
\end{itemize}
\end{Exercise}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Application to simultaneous rates}
\label{u1-mvt-speed}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% TERMINA O DOCUMENTO %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\end{document}
| {
"alphanum_fraction": 0.6401553468,
"avg_line_length": 36.9806279225,
"ext": "tex",
"hexsha": "60e30a6efa355097f441ba6de9b94a20f9a42f0b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e30e7355094e66edf4e9e03e3a3fef0cbbbaf72d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "abrantesasf/matematica",
"max_forks_repo_path": "calculus/mit/edx/18.01.2x/calculus1b.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e30e7355094e66edf4e9e03e3a3fef0cbbbaf72d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "abrantesasf/matematica",
"max_issues_repo_path": "calculus/mit/edx/18.01.2x/calculus1b.tex",
"max_line_length": 113,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e30e7355094e66edf4e9e03e3a3fef0cbbbaf72d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "abrantesasf/matematica",
"max_stars_repo_path": "calculus/mit/edx/18.01.2x/calculus1b.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 14905,
"size": 55360
} |
%!TEX root = ../CS263_project_report.tex
\section{Implementation Details}
\label{sec:Solution}
In this section, we describe the system architecture and design choices we made.
\begin{center}
\includegraphics[width=10cm, height=5cm]{figures/architecture.png}
\end{center}
We implement the whole project in C++~\cite{prj_git}. However, we implement the client service in both Python and C++ to compare the two implementations. In the following section, we explain the main components of our system.
\subsection{Client}
The clients send \texttt{read}, \texttt{write}, and \texttt{delete} requests to the edge servers. In order to improve performance, the clients put \texttt{write} and \texttt{delete} requests in an ordered queue. This allows sending the requests in batches and eliminating redundant requests (e.g., on the same key). Our client implementation runs on two worker threads: a network thread and the main thread.
The main thread collects the data from the environment and initiates the requests to communicate with the edge server. The network thread optimizes and dispatches the requests to the edge servers. For this reason, the main thread can continue to collect data and does not have to wait for the network communication--since the network latency can be high.
\subsection{Edge Server}
We implement our key-value store database, which is used heavily by the edge servers. We define three main database queries: \texttt{READ}, \texttt{WRITE}, and \texttt{DELETE}. The main functionality of the edge server is to authenticate clients, execute their queries on the database, and return the query result. In our implementation, we use a REST API with three different endpoints for client-server communication.
\subsection{Master Server}
In a distributed database, it is crucial to guarantee data persistence among the edge servers. In order to guarantee a reliable database with low data loss (e.g., in case of a server or network failure), we implement a third architectural component: the master server.
The master server receives backup files from the edge servers and stores them in a secure, permanent disk storage. The master server does not store the backup files in a database object but stores them to--and restores them from--\texttt{json} files.
| {
"alphanum_fraction": 0.7954445905,
"avg_line_length": 91.32,
"ext": "tex",
"hexsha": "5fb58c010b191aeb7f6df42c44ad1231a7fc0146",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b3722b4b5a383ba26f786694772aee433b53e234",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "atefehmohseni/IoT_secure_distributed_database",
"max_forks_repo_path": "CS263_project_report/sections/Solution.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b3722b4b5a383ba26f786694772aee433b53e234",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "atefehmohseni/IoT_secure_distributed_database",
"max_issues_repo_path": "CS263_project_report/sections/Solution.tex",
"max_line_length": 419,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b3722b4b5a383ba26f786694772aee433b53e234",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "atefehmohseni/IoT_secure_distributed_database",
"max_stars_repo_path": "CS263_project_report/sections/Solution.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 486,
"size": 2283
} |
\documentclass[a4paper]{article}
\usepackage{anysize}
\marginsize{1cm}{1cm}{1cm}{1cm}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage{epsfig}
\usepackage{subfigure}
\usepackage{listings}
\usepackage{natbib}
\usepackage{verbatim}
\usepackage[T1]{fontenc}
\lstset{language=haskell}
\lstset{commentstyle=\textit}
\lstset{mathescape=true}
\lstset{backgroundcolor=,rulecolor=}
\lstset{basicstyle=\ttfamily}
%\linespread{2.0}
\begin{document}
\title{\bf Monadic Reference Counting}
\author{Giuseppe Maggiore \quad
Michele Bugliesi
\\ Universit\`a Ca' Foscari Venezia
\\ Dipartimento di Informatica
\\ \{maggiore,bugliesi\}@dsi.unive.it
}
\date{}
\maketitle
\begin{abstract}
In this paper we show how a powerful memory and resource management technique such as reference counting can be implemented transparently as a library through the use of monads. While this is interesting in itself, since garbage collectors are not always the best choice for all programs, our paper also shows how the bind and return operators can be used to correctly identify the lifetime of resources and references. Finally, we discuss how with a powerful enough type system and the use of a parameterized monad we can track various interesting properties about the state of stateful computations. In our case we track the set of resource-types that the program handles, but we argue that in other cases this technique could be used to track even more complex and interesting properties.
\end{abstract}
\section{Introduction}
\label{sec:intro}
\input{intro}
\section{State Monad}
\label{sec:state_monad}
\input{state_monad}
\section{Reference}
\label{sec:reference}
\input{reference}
\section{Bind, return and lifetime}
\label{sec:scoping_state_monad}
\input{scoping_state_monad}
\section{Parametrized state monad}
\label{sec:parametrized_state_monad}
\input{parametrized_state_monad}
\section{Related work}
\label{sec:related_work}
\input{related_work}
\section{Conclusions and future work}
\label{sec:conclusions}
\input{conclusions}
\bibliographystyle{plain}
\bibliography{references}
\cite{*}
\nocite{}
\end{document}
| {
"alphanum_fraction": 0.7901578459,
"avg_line_length": 29.5068493151,
"ext": "tex",
"hexsha": "89bf852c02a6c4f49c49384e25c0fd2ba409bdec",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "58fa4a3b4c8185ad30bf9a142002d87ceca756e6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vs-team/Papers",
"max_forks_repo_path": "Before Giuseppe's PhD/Functional Approach to Games/MonadicReferenceCounting/trunk/main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "58fa4a3b4c8185ad30bf9a142002d87ceca756e6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vs-team/Papers",
"max_issues_repo_path": "Before Giuseppe's PhD/Functional Approach to Games/MonadicReferenceCounting/trunk/main.tex",
"max_line_length": 791,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "58fa4a3b4c8185ad30bf9a142002d87ceca756e6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vs-team/Papers",
"max_stars_repo_path": "Before Giuseppe's PhD/Functional Approach to Games/MonadicReferenceCounting/trunk/main.tex",
"max_stars_repo_stars_event_max_datetime": "2019-08-19T07:16:23.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-04-06T08:46:02.000Z",
"num_tokens": 586,
"size": 2154
} |
\chapter{Bayesian Inference}
\begin{ex}
Let $f$ and $g$ be PDFs of normal distributions with means $\mu_f$ and $\mu_g$
and variances $\sigma_f^2$ and $\sigma_g^2$ respectively. Note that since
\begin{align*}
\frac{(x-\mu_f)^2}{\sigma_f^2}
+\frac{(x-\mu_g)^2}{\sigma_g^2}
& =\frac{\sigma_g^2(x^2-2x\mu_f+\mu_f^2)+\sigma_f^2(x^2-2x\mu_g+\mu_g^2)}{\sigma_f^2\sigma_g^2} \\
& =\frac{(\sigma_f^2+\sigma_g^2)x^2-2(\sigma_g^2\mu_f+\sigma_f^2\mu_g) x+\sigma_g^2\mu_f^2+\sigma_f^2\mu_g^2}{\sigma_f^2\sigma_g^2} \\
& =\frac{x^2-2\frac{\sigma_g^2\mu_f+\sigma_f^2\mu_g}{\sigma_f^2+\sigma_g^2} x+\frac{\sigma_g^2\mu_f^2+\sigma_f^2\mu_g^2}{\sigma_f^2+\sigma_g^2}}{\frac{\sigma_f^2\sigma_g^2}{\sigma_f^2+\sigma_g^2}} \\
& =\frac{\left(x-\frac{\sigma_g^2\mu_f+\sigma_f^2\mu_g}{\sigma_f^2+\sigma_g^2}\right)^2}{\frac{\sigma_f^2\sigma_g^2}{\sigma_f^2+\sigma_g^2}} +C,
\end{align*}
for some $C$ not depending on $x$, we have
\begin{align*}
f(x)g(x)
& \propto \exp\left\{-\frac{(x-\mu_f)^2}{2\sigma_f^2}\right\}
\exp\left\{-\frac{(x-\mu_g)^2}{2\sigma_g^2}\right\} \\
& \propto \exp\left\{-\frac{1}{2}\frac{\left(x-\frac{\sigma_g^2\mu_f+\sigma_f^2\mu_g}{\sigma_f^2+\sigma_g^2}\right)^2}{\frac{\sigma_f^2\sigma_g^2}{\sigma_f^2+\sigma_g^2}}\right\},
\end{align*}
which, by inspection, is proportional to the PDF of a normal distribution with
mean
\[
\frac{\sigma_g^2\mu_f+\sigma_f^2\mu_g}{\sigma_f^2+\sigma_g^2}
\]
and variance
\[
\frac{\sigma_f^2\sigma_g^2}{\sigma_f^2+\sigma_g^2}.
\]
Consider the product of $n$ normal PDFs each with mean $X_i$ and variance
$\sigma^2$. Since we can group it as the product of the first $n-1$ PDFs and
the last PDF, and since by the previous argument, the product of two normal
PDFs is proportional to a normal PDF, it follows by induction that so is the
product of $n$ PDFs. It only remains to figure out the mean and variance.
Note that for $n=2$ we have
\[
\sigma_2^2
=\frac{\sigma^2\sigma^2}{\sigma^2+\sigma^2}=\frac{\sigma^2}{2}
\text{ and }
\mu_2
=\frac{X_1\sigma^2+X_2\sigma^2}{\sigma^2+\sigma^2}
=\frac{X_1+X_2}{2}
=\Xbar_2.
\]
Suppose that for $n-1$ we have
\[
\sigma_{n-1}^2=\frac{\sigma^2}{n-1}
\text{ and }
\mu_{n-1}=\Xbar_{n-1},
\]
and note that then
\[
\sigma_n^2
=\frac{\sigma_{n-1}^2\sigma^2}{\sigma_{n-1}^2+\sigma^2}
=\frac{\frac{\sigma^2}{n-1}\sigma^2}{\frac{\sigma^2}{n-1}+\sigma^2}
=\frac{\sigma^4}{n-1} / \frac{n\sigma^2}{n-1}
=\frac{\sigma^2}{n}
=\se^2,\text{ and }
\]
\[
\mu_n
=\frac{\Xbar_{n-1}\sigma^2+X_n\sigma_{n-1}^2}{\sigma_{n-1}^2+\sigma^2}
=\frac{\Xbar_{n-1}\sigma^2+X_n\frac{\sigma^2}{n-1}}{\frac{\sigma^2}{n-1}+\sigma^2}
=\frac{(\sum_{i=1}^n X_i)\sigma^2}{n-1}/\frac{n\sigma^2}{n-1}
=\Xbar_n.
\]
The result follows by induction.
Next, consider the product of $n$ normal PDFs with mean $X_i$ and
variance $\sigma^2$ respectively, and a normal PDF with mean $a$ and variance
$b^2$. The product of the first $n$ PDFs is proportional to an
$N(\Xbar, \se^2)$ PDF, and therefore the full product is
proportional to an $N(\overline{\theta}, \tau^2)$ PDF, where
\[
\tau^2=\frac{\se^2b^2}{\se^2+b^2},\text{ and}
\]
\begin{align*}
\overline{\theta}
& =\frac{\Xbar b^2+a \se^2}{\se^2+b^2} \\
& =\frac{\Xbar b^2}{\se^2+b^2}+\frac{a \se^2}{\se^2+b^2} \\
& =\Xbar\frac{\frac{1}{\se^2}}{\frac{1}{\se^2}+\frac{1}{b^2}}
+a\frac{\frac{1}{b^2}}{\frac{1}{\se^2}+\frac{1}{b^2}} \\
& =\Xbar\frac{\frac{1}{\se^2}}{\frac{1}{\se^2}+\frac{1}{b^2}}
+a\left[1-\frac{\frac{1}{\se^2}}{\frac{1}{\se^2}+\frac{1}{b^2}}\right].
\end{align*}
Finally, let $X_1,\ldots,X_n\sim N(\theta,\sigma^2)$, where $\sigma^2$ is
known. We take a prior $\theta\sim N(a, b^2)$, and compute the
posterior for $\theta$. Note that
\[
f(\theta\,|\, X^n)
\propto \L(\theta\,|\, X^n)f(\theta)
= f(\theta)\prod_{i=1}^n\L(\theta\,|\, X_i)
= f(\theta)\prod_{i=1}^n f_i(\theta),
\]
where $f_i$ is the PDF for an $N(X_i,\sigma^2)$ distribution. However, this is
precisely the setting of our last proposition and therefore the result
follows.
\end{ex}
\begin{ex}~
\inputminted{python}{../code/11-02.py}
\inputminted{text}{../output/11-02.txt}
\begin{enumerate}[(a)]
\item[(b)] Let $f(\mu)=1$. Recall that the posterior is proportional to the
product of the likelihood and the prior and that we therefore have
\begin{align*}
f(\mu\,|\,x^n)\propto \prod_{i=1}^n\frac{1}{\sqrt{2\pi}}
\exp\left\{-\frac{(x_i-\mu)^2}{2}\right\},
\end{align*}
and therefore, by comparison to the probability density function of a
multivariate normal distribution,
\begin{align*}
f(\mu\,|\,x^n)=
(2\pi)^{-n/2}
\exp\left\{-\frac{\sum_{i=1}^n(x_i-\mu)^2}{2}\right\}.
\end{align*}
\begin{figure}[H]
\centering
\includegraphics[scale=0.8]{../images/11-02b}
\caption{Plot of the posterior density of $\mu$.}
\end{figure}
\item[(c)]~
\begin{figure}[H]
\centering
\includegraphics[scale=0.8]{../images/11-02c}
\caption{Histogram of $1000$ simulated values drawn from the
posterior density of $\mu$.}
\end{figure}
\item[(d)] Let $\theta=e^\mu$. We then have
\begin{align*}
\P{\Theta\leq \theta\,|\,x^n}
& =\P{e^\mu\leq \theta\,|\,x^n} \\
& =\P{\mu\leq \log{\theta}\,|\,x^n} \\
& =\int_{-\infty}^{\log{\theta}}\!f(\mu\,|\,x^n)\,\d{\mu},
\end{align*}
and therefore, by differentiating under the integral sign,
\[
f(\theta\,|\,x^n)
=\frac{\d}{\d\theta}\P{\Theta\leq \theta\,|\,x^n}
=\frac{1}{\theta}(2\pi)^{-n/2}
\exp\left\{-\frac{\sum_{i=1}^n(x_i-\log{\theta})^2}{2}\right\}.
\]
\begin{figure}[H]
\centering
\includegraphics[scale=0.54]{../images/11-02d}
\caption{A histogram of $10,000$ simulated values of $\theta$ drawn
using the posterior density of $\mu$ (left) and a plot of the
exact analytically computed posterior distribution of $\theta$
(right).}
\end{figure}
\item[(e)] As per the output of the code giving in the listing at the start
of this problem, a 95\% posterior interval for $\mu$ is given by
$[4.754, 5.146]$.
\item[(f)] From the output of the code listing at the top, a 95\%
posterior interval for $\theta$ is given by $[116.004, 171.681]$. Note
that we are asked for a confidence interval, but per Theorem
11.5 we may approximate it with a Bayesian posterior interval instead.
\end{enumerate}
\end{ex}
\begin{ex}
Note that
\[
\L(\theta, x_i)=\begin{cases}
0 & \theta < x_i, \\
x_i/\theta & \text{otherwise},
\end{cases}
\]
and that therefore
\[
\L(\theta, x^n)=\begin{cases}
0 & \theta < x_{(n)}, \\
\frac{\prod_{i=1}^n x_i}{\theta^n} & \text{otherwise}.
\end{cases}
\]
Thus, if $f(\theta)\propto 1/\theta$,
\[
f(\theta\,|\, x^n)\propto \begin{cases}
0 & \theta < x_{(n)}, \\
\frac{\prod_{i=1}^n x_i}{\theta^{n+1}} & \text{otherwise},
\end{cases}
\]
or, since
\[
\int_{x_{(n)}}^\infty \frac{\prod_{i=1}^n x_i}{\theta^{n+1}}\,\d\theta
=\frac{1}{n}\prod_{i=1}^n\frac{x_i}{x_{(n)}},
\]
\[
f(\theta\,|\, x^n)=\frac{n(x_{(n)})^n}{\theta^{n+1}}I_{[x_{(n)},\infty)}(\theta).
\]
\end{ex}
\begin{ex}~
\begin{enumerate}[(a)]
\inputminted{python}{../code/11-04.py}
\inputminted{text}{../output/11-04.txt}
\item By Exercise 9.7, the MLE for $\tau$ is given by
\[
\tauhat = \phat_1 -\phat_2=X_1/n_1-X_2/n_2,
\]
with
\[
\sehat=\sqrt{
\frac{\phat_1(1-\phat_1)}{n_1}
+\frac{\phat_2(1-\phat_2)}{n_2}.
}
\]
In particular, we have that the MLE for $\tau$ is $0.2$. The estimated
standard error is $0.08944$, and a 90\% confidence interval
is given by $[0.05288, 0.3471]$.
\item From the code listing given at the start of the problem, we can see
that a 90\% confidence interval for $\tau$ using the parametric
bootstrap method is given by $[0.06, 0.34]$.
\item Using the prior $f(p_1,p_2)=1$, it follows that the posterior mean of
$\tau$ is $0.1953$, and a 90\% posterior confidence interval is given
by $[0.04632, 0.3393]$.
\item Let
\[
\psi=g(p_1,p_2)=\log\left(
\left(\frac{p_1}{1-p_1}\right)\div
\left(\frac{p_2}{1-p_2}\right)
\right),
\]
and note that then
\begin{align*}
\nabla g=\begin{pmatrix}
\frac{1}{p_1-p_1^2} \\
\frac{1}{p_2^2-p_2}
\end{pmatrix}.
\end{align*}
By the equivariance of the MLE, $\psihat=g(\phat_1,\phat_2)$, and by
the multiparameter delta method
\begin{align*}
\sehat(\psihat)
& =\sqrt{
\begin{pmatrix}
\frac{1}{\phat_1-\phat_1^2} &
\frac{1}{\phat_2^2-\phat_2}
\end{pmatrix}
\begin{pmatrix}
\frac{\phat_1(1-\phat_1)}{n_1} & 0 \\
0 & \frac{\phat_2(1-\phat_2)}{n_2}
\end{pmatrix}
\begin{pmatrix}
\frac{1}{\phat_1-\phat_1^2} \\
\frac{1}{\phat_2^2-\phat_2}
\end{pmatrix}
}
=\sqrt{
\frac{1}{n_1(\phat_1-\phat_1^2)}+
\frac{1}{n_2(\phat_2^2-\phat_2)}
}.
\end{align*}
In particular, we have that the MLE of $\psi$ is $0.9808$. The
estimated standard error is $0.2041$, and a 90\% confidence interval
for $\psi$ is given by $[0.6451, 1.317]$.
\item Under the prior $f(p_1,p_2)=1$, the posterior mean of $\psi$ is
$0.9693$, a 90\% posterior confidence interval is given by
$[0.2244, 1.737]$.
\end{enumerate}
\end{ex}
% 5
\begin{ex}~
\inputminted{python}{../code/11-05.py}
\begin{figure}[H]
\centering
\includegraphics[scale=0.57]{../images/11-05a}
\caption{
Plots of the prior distributions of $p$ given by
$\text{Beta}(\alpha, \alpha)$ distributions for different values of
$\alpha$. Note that in all cases, the distribution is centered at $1/2$,
but becomes more sharply peaked as $\alpha$ increases.
}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[scale=0.57]{../images/11-05b}
\caption{
Plots of the different posterior distributions for $p$ given the prior
distributions from the previous figure. Note that the larger $\alpha$ is,
the less sensitive the posterior is to the data.
}
\end{figure}
\end{ex}
\begin{ex}
Let $X_1,\ldots,X_n\sim\text{Poisson}(\lambda)$.
\begin{enumerate}[(a)]
\item Recall that if $\lambda\sim\text{Gamma}(\alpha,\beta)$,
\[
f(\lambda)=\frac{1}{\beta^\alpha\Gamma(\alpha)}\lambda^{\alpha-1}e^{-\lambda/\beta},
\]
and that therefore
\begin{align*}
f(\lambda\,|\,x^n)
& \propto \left(\prod_{i=1}^n e^{-\lambda}\frac{\lambda^{x_i}}{x_i!}\right)
\frac{1}{\beta^\alpha\Gamma(\alpha)}\lambda^{\alpha-1}e^{-\lambda/\beta} \\
& \propto \lambda^{\alpha-1+\sum_{i=1}^nx_i}e^{-\lambda(\beta n+1)/\beta},
\end{align*}
and hence
\[
f(\lambda\,|\,x^n)
=\frac{1}{\beta'^{\alpha'}\Gamma(\alpha')}\lambda^{\alpha'-1}e^{-\lambda/\beta'},
\]
where
\[
\alpha'=\alpha+\sum_{i=1}^nx_i,\text{ and }
\beta'=\frac{\beta}{n\beta +1}.
\]
Note that the mean of a $\text{Gamma}(\alpha', \beta')$ distribution
is at $\alpha'\beta$, and that therefore the posterior mean is
\[
\left(\alpha+\sum_{i=1}^nx_i\right)
\left(\frac{\beta}{n\beta +1}\right).
\]
\item Recall that for a $\text{Poisson}(\lambda)$ distribution,
\[
\ell_n(\lambda)=\sum_{i=1}^n-\lambda+x_i\log(\lambda)+\log(x_i!),
\]
and therefore
\[
\frac{\d\ell_n(\lambda)}{\d\lambda}
=\sum_{i=1}^n-1+x_i/\lambda,
\text{ and }
\frac{\d^2\ell_n(\lambda)}{\d\lambda^2}
=-\sum_{i=1}^n x_i/\lambda^2.
\]
Therefore, $I(\lambda)=n/\lambda$, and the Jeffreys' prior
$f(\lambda)\propto 1/\sqrt{\lambda}$. Hence,
\begin{align*}
f(\lambda\,|\,x^n)
& \propto \left(\prod_{i=1}^n e^{-\lambda}\frac{\lambda^{x_i}}{x_i!}\right)\frac{1}{\sqrt{\lambda}} \\
& \propto \lambda^{-1/2+\sum_{i=1}^n x_i}e^{-\lambda/n^{-1}},
\end{align*}
and therefore the posterior is given by
\[
f(\lambda\,|\,x^n)
=\frac{1}{n^{-(1/2+\sum_{i=1}^nx_i)}\Gamma(1/2+\sum_{i=1}^nx_i)}\lambda^{\sum_{i=1}^nx_i+1/2-1}e^{-\lambda/n^{-1}}.
\]
\end{enumerate}
\end{ex}
\begin{ex}
Let
\[
\psihat=\frac{1}{n}\sum_{i=1}^n\frac{Y_iR_i}{\xi_{X_i}}.
\]
Then
\begin{align*}
\E{\psihat}
& =\E{\frac{1}{n}\sum_{i=1}^n\frac{Y_iR_i}{\xi_{X_i}}} \\
& =\frac{1}{n}\sum_{i=1}^n\E{\frac{Y_iR_i}{\xi_{X_i}}} \\
& =\frac{1}{n}\sum_{i=1}^n\E{\cE{\frac{Y_iR_i}{\xi_{X_i}}}{Y_i,X_i}} \\
& =\frac{1}{n}\sum_{i=1}^n\E{\frac{Y_i}{\xi_{X_i}}\cE{R_i}{Y_i,X_i}} \\
& =\frac{1}{n}\sum_{i=1}^n\E{Y_i} \\
& =\E{Y_i} \\
& =\psi,
\end{align*}
and since $\delta\leq \xi_{X_i}\leq 1-\delta$,
\begin{align*}
\var{\frac{Y_iR_i}{\xi_{X_i}}}
& =\E{\frac{Y_i^2R_i^2}{\xi^2_{X_i}}}
-\left[\E{\frac{Y_iR_i}{\xi^2_{X_i}}}\right]^2 \\
& =\E{\frac{Y_iR_i}{\xi^2_{X_i}}}
-\left[\E{\frac{Y_iR_i}{\xi_{X_i}}}\right]^2 \\
& \leq\frac{1}{\delta^2}\E{Y_iR_i}
-\frac{1}{(1-\delta)^2}\E{Y_iR_i}^2 \\
& \leq\frac{1}{\delta^2},
\end{align*}
and therefore
\[
\var{\psihat}
=\frac{1}{n}\sum_{i=1}^n\var{\frac{Y_iR_i}{\xi_{X_i}}}
\leq\frac{1}{n\delta^2}.
\]
\end{ex}
\begin{ex}
We are testing the hypothesis $H_0:\mu=0$ versus $H_1:\mu\neq 0$. We take the
priors $\P{H_0}=\P{H_1}=1/2$, and under $H_1$ we take the prior
$\mu\sim N(0, b^2)$. Recall from Section 11.8 that then
\begin{align*}
\cP{H_0}{X=x}
=\frac{\L(0)}{\L(0)+\int_{-\infty}^\infty \L(\mu)f(\mu)\,\d{\mu}}.
\end{align*}
We have
\begin{align*}
\int_{-\infty}^\infty \L(\mu)f(\mu)\,\d\mu
& =\int_{-\infty}^\infty \frac{1}{\sqrt{2\pi}}\exp\left\{-\frac{(x-\mu)^2}{2}\right\}
\frac{1}{\sqrt{2\pi b^2}}\exp\left\{-\frac{\mu^2}{2b^2}\right\}\,\d\mu \\
& =\frac{1}{\sqrt{2\pi}}
\int_{-\infty}^\infty\frac{1}{\sqrt{2\pi b^2}}
\exp\left\{-\frac{(x-\mu)^2}{2}-\frac{\mu^2}{2b^2}\right\}\,\d\mu \\
& =\frac{1}{\sqrt{2\pi(b^2+1)}}
\exp\left\{-\frac{x^2}{2(b^2+1)}\right\}
\int_{-\infty}^\infty\frac{\sqrt{b^2+1}}{\sqrt{2\pi b^2}}
\exp\left\{
-\frac{\left(\mu-\frac{xb^2}{b^2+1}\right)^2}{2b^2/(b^2+1)}
\right\}\,\d\mu \\
& =\frac{1}{\sqrt{2\pi(b^2+1)}}\exp\left\{-\frac{x^2}{2(b^2+1)}\right\},
\end{align*}
since
\begin{align*}
b^2(x-\mu)^2+\mu^2
& =(b^2+1)\mu^2-2xb^2\mu+b^2x^2 \\
& =(b^2+1)\left(\mu^2-\frac{2xb^2}{b^2+1}\mu+\frac{x^2b^4}{(b^2+1)^2}\right)
-\frac{x^2b^4}{b^2+1}+b^2x^2 \\
& =(b^2+1)\left(\mu-\frac{xb^2}{b^2+1}\right)^2
+\frac{-x^2b^4+b^4x^2+b^2x^2}{b^2+1} \\
& =(b^2+1)\left(\mu-\frac{xb^2}{b^2+1}\right)^2
+\frac{b^2x^2}{b^2+1}.
\end{align*}
Therefore,
\begin{align*}
\cP{H_0}{X=x}
& =\frac{\L(0)}{\L(0)+\int_{-\infty}^\infty \L(\mu)f(\mu)\,\d{\mu}} \\
& =\frac{\frac{1}{\sqrt{2\pi}}\exp\left\{-\frac{x^2}{2}\right\}}{
\frac{1}{\sqrt{2\pi}}\exp\left\{-\frac{x^2}{2}\right\}
+\frac{1}{\sqrt{2\pi(b^2+1)}}\exp\left\{-\frac{x^2}{2(b^2+1)}\right\}
} \\
& =\frac{\exp\left\{-\frac{x^2}{2}\right\}}{
\exp\left\{-\frac{x^2}{2}\right\}
+\frac{1}{\sqrt{b^2+1}}\exp\left\{-\frac{x^2}{2(b^2+1)}\right\}} \\
& =\frac{\sqrt{b^2+1}}{\sqrt{b^2+1}+\exp\left\{\frac{x^2b^2}{2(b^2+1)}\right\}}.
\end{align*}
The $p$-value of the Wald test is given by
\[
\P{|Z|>|x|}=2\Phi(-|x|).
\]
\inputminted{python}{../code/11-08a.py}
\begin{figure}[H]
\centering
\includegraphics[scale=0.54]{../images/11-08a}
\caption{Comparison of the posterior probability of $H_0$ for different
values of $b$ and the Wald test $p$-value as a function of $x$.}
\end{figure}
If instead we have a sample of size $n$,
\begin{align*}
& \int_{-\infty}^\infty \L(\mu)f(\mu)\,\d\mu \\
& \,=\int_{-\infty}^\infty\prod_{i=1}^n\left[\frac{1}{\sqrt{2\pi}}\exp\left\{-\frac{(x_i-\mu)^2}{2}\right\}\right]
\frac{1}{\sqrt{2\pi b^2}}\exp\left\{-\frac{\mu^2}{2b^2}\right\}\,\d\mu \\
& \, =(2\pi)^{-n/2}(b^2n+1)^{-1/2}\exp\left\{-\frac{\beta-\alpha^2/(b^2n+1)}{2}\right\}
\int_{-\infty}^\infty \frac{\sqrt{b^2n+1}}{\sqrt{2\pi b^2}}\exp\left\{
-\frac{\left(\mu-\frac{b^2\alpha}{b^2n+1}\right)^2}{2b^2/(b^2n+1)}
\right\}\,\d{\mu} \\
& \, =(2\pi)^{-n/2}(b^2n+1)^{-1/2}\exp\left\{-\frac{\beta(b^2n+1)-\alpha^2}{2(b^2n+1)}\right\},
\end{align*}
since
\begin{align*}
b^2\sum_{i=1}^n(x_i-\mu)^2+\mu^2
& =(b^2n+1)\mu^2-2b^2\left(\sum_{i=1}^nx_i\right)\mu+b^2\left(\sum_{i=1}^n x_i^2\right) \\
& =(b^2n+1)\left(\mu^2-2\frac{b^2\alpha}{b^2n+1}\mu+\frac{b^4\alpha^2}{(bn^2+1)^2} \right)-\frac{b^4\alpha^2}{b^2n+1}+b^2\beta \\
& =(b^2n+1)\left(\mu-\frac{b^2\alpha}{b^2n+1}\right)^2+b^2\left(\beta-\frac{b^2\alpha^2}{b^2n+1}\right) \\
\end{align*}
where $\alpha=\sum_{i=1}^n x_i$ and $\beta=\sum_{i=1}^n x_i^2$.
Therefore,
\begin{align*}
\cP{H_0}{X^n=x^n}
& =\frac{\L(0)}{\L(0)+\int_{-\infty}^\infty \L(\mu)f(\mu)\,\d{\mu}} \\
& =\frac{(2\pi)^{-n/2}\exp\left\{-\frac{1}{2}\beta\right\}}{
(2\pi)^{-n/2}\exp\left\{-\frac{1}{2}\beta\right\}
+(2\pi)^{-n/2}(b^2n+1)^{-1/2}\exp\left\{-\frac{\beta (b^2n+1)-b^2\alpha^2}{2(b^2n+1)}\right\}
} \\
& =\frac{\sqrt{b^2n+1}}{\sqrt{b^2n+1}+\exp\left\{\frac{b^2\alpha^2}{2(b^2n+1)}\right\}} \\
& =\frac{\sqrt{b^2n+1}}{\sqrt{b^2n+1}+\exp\left\{\frac{b^2n^2\Xbar^2}{2(b^2n+1)}\right\}},
\end{align*}
while the Wald test $p$-value for a sample of size $n$ is
\[
\P{|Z|>\sqrt{n}|\Xbar|}
=2\Phi\left(-\sqrt{n}|\Xbar|\right).
\]
\inputminted{python}{../code/11-08b.py}
\begin{figure}[H]
\centering
\includegraphics[scale=0.545]{../images/11-08b}
\caption{Comparison of the posterior probability of $H_0$ for different
values of $b$ and the Wald test $p$-value as a function of the sample
average and for different sample sizes. Note that as $n$ increases, a
region forms in which the $p$-value is near $0$, but the conditional
probability of $H_0$ is near $1$.}
\end{figure}
\end{ex} | {
"alphanum_fraction": 0.4974760829,
"avg_line_length": 42.1926977688,
"ext": "tex",
"hexsha": "95a7c4a7d40334f875ef54963612192b35fc6ded",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0572cdae22b128e71c1c6c7ead2bf3b259875bc9",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "dtrifuno/all-of-stats-solutions",
"max_forks_repo_path": "src/tex/ch11.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0572cdae22b128e71c1c6c7ead2bf3b259875bc9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "dtrifuno/all-of-stats-solutions",
"max_issues_repo_path": "src/tex/ch11.tex",
"max_line_length": 204,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0572cdae22b128e71c1c6c7ead2bf3b259875bc9",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "dtrifuno/all-of-stats-solutions",
"max_stars_repo_path": "src/tex/ch11.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7876,
"size": 20801
} |
%\documentclass[11pt]{article}
\documentclass[11pt]{report}
\usepackage{amsmath, amstext, amssymb}
\usepackage{widetable}
\usepackage{booktabs}
\usepackage{newcent}
\usepackage{pstricks}
\usepackage{fancyhdr}
\usepackage[dvips]{graphicx}
\usepackage{makeidx}
\usepackage{psfrag}
\usepackage{alltt}
\usepackage{index}
\usepackage{fancyvrb}
\usepackage{pst-blur}
\usepackage{pst-grad}
\usepackage{epsfig}
%\usepackage{subfig}
\usepackage{subfigure}
\usepackage{xspace}
\usepackage{array}
\usepackage[colorlinks=true,linktocpage=true]{hyperref}
%% Listings package START
\usepackage{color}
\usepackage{listings}
\definecolor{darkblue}{rgb}{0,0,.6}
\definecolor{darkred}{rgb}{.6,0,0}
\definecolor{darkgreen}{rgb}{0,.6,0}
\definecolor{red}{rgb}{.98,0,0}
\definecolor{lightgrey}{rgb}{0.98,0.98,0.98}
\definecolor{black}{rgb}{0,0,0}
%\lstloadlanguages{C++}
%\lstset{
% language=C++,
% basicstyle=\small\ttfamily,
% commentstyle=\itshape\color{darkgreen},
% keywordstyle=\bfseries\color{darkblue},
% stringstyle=\color{darkred},
% showspaces=false,
% showtabs=false,
% columns=fixed,
% backgroundcolor=\color{lightgrey},
% numbers=none,
% frame=single,
% numberstyle=\tiny,
% breaklines=true,
% showstringspaces=false,
% xleftmargin=0.1cm
%}
\lstset{
language=C++,
frame=lines,
basicstyle=\small\ttfamily,
keywordstyle=\bfseries\color{darkblue},
stringstyle=\color{black}\ttfamily,
tabsize=2,
%emphstyle=\color{blue}\texttt
commentstyle=\color{darkgreen}\ttfamily,
numbers=left,
numberstyle=\tiny,
showstringspaces=false,
literate={~} {$\sim$}{1}
}
\lstdefinelanguage{XML}
{
morestring=[b]",
morestring=[s]{>}{<},
morecomment=[s]{<?}{?>},
stringstyle=\color{black},
identifierstyle=\color{darkblue},
keywordstyle=\color{cyan},
morekeywords={xmlns,version,type}% list your attributes here
}
\newcommand{\cmake} {CMake\xspace}
\newcommand{\viennax} {ViennaX\xspace}
\newcommand{\viennafactory} {ViennaFactory\xspace}
\newcommand{\viennautils} {ViennaUtils\xspace}
\newcommand{\gcc} {GNU GCC\xspace}
\newcommand{\boost} {Boost\xspace}
\newcommand{\plugin} {plugin\xspace}
\newcommand{\openmpi} {Open MPI\xspace}
\newcommand{\mpich} {MPICH2\xspace}
\newcommand{\lammpi} {LAM/MPI\xspace}
\renewcommand{\arraystretch}{1.2}
%\include{keywords}
\include{setup}
\begin{document}
\pagenumbering{roman}
% ==============================================================================
% TITLEPAGE
% ==============================================================================
\begin{titlepage}
\vspace*{3cm}
\Huge{ViennaWD}
\rule[0.0cm]{12.3cm}{0.05cm}
\begin{flushright}
\Large{User Manual\\}
\Large{v3.0.0}
%\Large{Building Instructions}\\
%\Large{Input Specifications}
\end{flushright}
\vspace{12cm}
\rule[0.0cm]{16.0cm}{0.05cm}
\begin{figure}[!ht]
\vspace{-1.0cm}
\centering
\begin{minipage}{3cm}
\epsfig{file=figures/TU_Signet_CMYK, scale=0.25}
\end{minipage}
\hfill
\hspace{-0.5cm}
\begin{minipage}{5.5cm}
\vspace{0.5cm}
\begin{center}
Institute for Microelectronics\newline
Technische Universit\"at Wien\newline
Gu\ss hausstra\ss e 27-29 / E360\newline
A-1040 Vienna, Austria\newline
\end{center}
\end{minipage}
\hfill
\begin{minipage}{2.6cm}
\epsfig{file=figures/logo_px200, scale=1.6}
\end{minipage}
\end{figure}
\end{titlepage}
% ==============================================================================
\clearpage
% ==============================================================================
% CONTRIBUTERS
% ==============================================================================
\begin{tabular}{ll}
Copyright {\copyright} 2012-2015 & Institute for Microelectronics, TU Wien.
\end{tabular}
\vspace{3.5cm}
\textbf{Principal Investigator:}\\
Mihail Nedjalkov\\
\textbf{Developers:}\\
Paul Ellinghaus\\
Josef Weinbub\\
Matthias Glanz\\
\textbf{Advisors:}\\
Ivan Dimov\\
Dragica Vasileska\\
Siegfried Selberherr\\
\textbf{Former Contributers:}\\
Marek Pobjecky\\
Philipp Schwaha\\
\vspace{7.0cm}
Institute for Microelectronics, TU Wien\newline
Gu\ss hausstra\ss e 27-29 / E360\newline
A-1040 Vienna, Austria/Europe\newline
\begin{tabular}{ll}
Phone & +43-1-58801-36001\\
FAX & +43-1-58801-36099\\
Web & \texttt{http://www.iue.tuwien.ac.at/}
\end{tabular}
% ==============================================================================
%\addtocontents{toc}{\protect\setcounter{tocdepth}{1}}
\setcounter{tocdepth}{3}
\tableofcontents
\addcontentsline{toc}{chapter}{Contents}
\clearpage
\pagenumbering{arabic}
% ==============================================================================
\chapter{Overview} %\addcontentsline{toc}{chapter}{Overview}
% ==============================================================================
ViennaWD is a suite of Monte Carlo simulation tools intended for the simulation of semiconductor devices in the classical domain and the investigation of quantum phenomena using the Wigner formalism. The suite encompasses the following simulation tools:
\begin{itemize}
\item Wigner Ensemble Monte Carlo (WEMC)
\item Classical Ensemble Monte Carlo (CEMC)
\item Phonon Decoherence (PD)
\end{itemize}
The purpose, use and installation of each tool is discussed self-consistently in the following chapters.
\chapter{Building Information}
ViennaWD ships a central CMake~\cite{cmake} script, allowing to configure and build all simulators via one script.
In the following, we will discuss general installation aspects, however, for simulator-specific aspects please consult the respective
building-related sections.
\section{Build Configuration}
A default build configuration, which setup all simulators, is generated by:
\begin{lstlisting}
$> cd ViennaWD/
$> mkdir build
$> cd build
$> cmake ..
\end{lstlisting}
\NOTE{Watch for errors or warnings reported by CMake during the configuration process.}
\TIP{CMake will automatically try to discover the required dependencies on your system.}
\TIP{Use CMake's GUI application (\texttt{cmake-gui}) to conveniently select build features.}
\section{Configuration Options}
ViennaWD's build system allows to select the optimization level as well as the simulation tools to be generated by using the following CMake~\cite{cmake} options:
\begin{itemize}
\item -D CMAKE\_BUILD\_TYPE=Debug$|$Release$|$RelWithDebInfo (default=Release)
\item -D BUILD\_WEMC=OFF (default=ON)
\item -D BUILD\_CEMC=ON (default=OFF)
\item -D BUILD\_PD=ON (default=OFF)
\end{itemize}
\section{Building}
To build the simulators issue the following
\begin{lstlisting}
$> make
\end{lstlisting}
\TIP{Use \texttt{-jN}, where \texttt{N} refers to the number of parallel jobs/available CPU cores, to speed-up the build process.}
% ==============================================================================
% Add Project specifc external Latex source file
% ==============================================================================
\input{wigner_ensemble_monte_carlo}
\input{classic_ensemble_monte_carlo}
\input{phonon_decoherence}
\input{visualization}
% ==============================================================================
\begingroup
\raggedright
%\bibliographystyle{IEEEtran_v1.13}
\bibliographystyle{MyIEEEtran}
\bibliography{literature}
\addcontentsline{toc}{chapter}{Bibliography}
\end{document}
| {
"alphanum_fraction": 0.6476649271,
"avg_line_length": 27.2737226277,
"ext": "tex",
"hexsha": "4a9cc2cc95f8c437ad4fb029d2bf2cf7677d6d6b",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-02-18T19:55:18.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-02-18T19:55:18.000Z",
"max_forks_repo_head_hexsha": "fbad17ca280b0ea97ccb9a9e8efb64a6f11dd9ee",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ViennaTools/ViennaEMC",
"max_forks_repo_path": "doc/manual/manual.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fbad17ca280b0ea97ccb9a9e8efb64a6f11dd9ee",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ViennaTools/ViennaEMC",
"max_issues_repo_path": "doc/manual/manual.tex",
"max_line_length": 253,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fbad17ca280b0ea97ccb9a9e8efb64a6f11dd9ee",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ViennaTools/ViennaEMC",
"max_stars_repo_path": "doc/manual/manual.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2097,
"size": 7473
} |
\section*{Context}
This project offers a severe method of coarse-graining for the analysis of neutron reflectometry\footnote{NR.} and XRR data.
The system is coarse-grained to represent a head group and a tail group of a phospholipid species.
There is a chemical constraint present in the model, such that the number of head groups must be equal to the number of pairs of tail groups.
However, there is no potential model considered beyond this ``bonded'' interaction.
Additionally, this modelling approach is applied again in Chapter~\ref{reflectometry2}, as an example of the cutting edge of traditional modelling, against which the classical simulation-driven methods are compared.
The specific application of this modelling approach grew from a collaboration with experimental colleagues working on self-assembly in DES.
Therefore, this chemical system will be briefly introduced in Section~\ref{sec:ref1intro}.
However, the main focus of this chapter will be the modelling methodology.
| {
"alphanum_fraction": 0.8173562059,
"avg_line_length": 99.1,
"ext": "tex",
"hexsha": "5e33b68d86013a9ea708327607fe1a25114b63f3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4c76e837b1041472a5522427de0069a5a28d40c9",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "arm61/thesis",
"max_forks_repo_path": "reports/chapters/reflectometry1/context.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "4c76e837b1041472a5522427de0069a5a28d40c9",
"max_issues_repo_issues_event_max_datetime": "2019-06-04T17:11:33.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-06-04T17:11:33.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "arm61/thesis",
"max_issues_repo_path": "reports/chapters/reflectometry1/context.tex",
"max_line_length": 215,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "4c76e837b1041472a5522427de0069a5a28d40c9",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "arm61/thesis",
"max_stars_repo_path": "reports/chapters/reflectometry1/context.tex",
"max_stars_repo_stars_event_max_datetime": "2020-06-01T06:25:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-06-04T20:53:19.000Z",
"num_tokens": 199,
"size": 991
} |
% Define document class
\documentclass[twocolumn]{aastex631}
% Custom style defs for this paper
\input{style.tex}
\usepackage{enumitem}
\setlist[enumerate]{wide=0pt,widest=99,leftmargin=\parindent,labelsep=*}
\setlist[itemize]{wide=0pt,widest=99,leftmargin=\parindent,labelsep=*}
% Begin!
\begin{document}
% Title
\title{\showyourwork: a workflow for open source scientific articles}
% Author list
\author[0000-0002-0296-3826]{Rodrigo Luger}
\author{Others TBD}
% Abstract with filler text
\begin{abstract}
Abstract coming soon.
\end{abstract}
% Main body with filler text
\section{Introduction}
\label{sec:intro}
This paper introduces \showyourwork, a workflow that enables the creation and distribution of fully reproducible and open source scientific articles.
As astronomical software becomes increasingly more complex, and as research results become increasingly more interdependent, it is crucial to ensure the validity and correctness of papers published in the field.
However, the current peer review system is simply not set up to do this, since checking all of the results in a paper would require the painstaking and methodical review of all of the paper's methods—which usually means scrutinizing all of the code used to generate the figures, tables, and other quantities in the paper.
In practice, this is virtually impossible for three reasons:
\begin{enumerate}
%
\item Modern codebases can be very large and often require deep familiarity with the software to use—not to mention review them.
%
\item Writing a paper in astronomy is rarely ever done in a linear, procedural fashion: the codebase is constantly changing, and the state of the code when (say) Figure 1 was produced may be very different from that when (say) Figure 2 was made.
Moreover, many results depend on the execution of lengthy pipelines with intermediate steps, each potentially requiring manual tinkering that is not always documented and may be difficult to exactly replicate.
%
\item The majority of astronomical code is not open source and simply cannot be vetted by third parties.
While there has been a marked increase in the number of open source astronomical tools in recent years (e.g., astropy, exoplanet, emcee, exofast...), most code associated with the generation of the results in individual papers is not open source; readers are often expected to take it on faith that there are no bugs in that code, or that the code works exactly as described in the text, with no pitfalls or missing details.
Even when the code is made publicly available, e.g., by being published on \texttt{GitHub}, it is often not documented sufficiently to enable one to execute it and reproduce the paper's results out-of-the-box.
And even with proper documentation, the code may require external dependencies, custom virtual environments, or access to closed-source datasets that make it difficult or impossible for a third party to replicate it.
%
\end{enumerate}
\showyourwork was designed to tackle these three issues, making it easy to develop, publish, and distribute truly open and reproducible research papers in astronomy.
It exists as a \texttt{GitHub} template repository, which can be cloned at the click of a button to set up a new article.
Users then add their LaTeX manuscript, bibliography, scripts used in the generation of the paper's figures, an anaconda environment specification, and instructions on how to download any external datasets required by the figures.
Every time the user pushes a new commit to \texttt{GitHub}, the article is automatically built on the cloud using \texttt{GitHub Actions} and the resulting PDF is pushed to a separate branch of the repository.
The build step—which sets up the conda environment, generates all figures from scratch (with intelligent caching), and compiles the PDF—acts as a unit test for the paper.
If it passes, the paper is (by definition) reproducible.
The workflow works out of the box for simple projects, in which each figure is generated by running a given \texttt{Python} script.
But it also works for more complicated pipelines, such as projects that depend on many intermediate steps or those that require running expensive simulations on clusters.
The workflow interfaces directly with \texttt{Zenodo}, allowing users to automatically upload the results of simulations so that expensive build steps can be bypassed on the cloud.
In fact, most of the stuff under the hood is there to make the workflow as flexible and customizable as possible; see the documentation for a list of examples of custom workflows.
Papers that use this workflow can be reproduced by cloning the repository and running \texttt{make}.
Furthermore, these papers include clickable icons next to each of their figures linking to (1) the exact version of the script used to generate them and (2) the exact version(s) of the \texttt{Zenodo}-hosted dataset(s) used in their creation.
\section{Examples}
\label{sec:examples}
\begin{figure}[ht!]
\begin{centering}
\includegraphics[width=\linewidth]{figures/eccentricity.pdf}
\caption{
The effect of binary eccentricity on the detectability of a \emph{LISA} gravitational wave source; reproduced from Figure 3 in \citet{Wagg2021}.
This figure was automatically generated from the script \texttt{src/figures/eccentricity.py}.
}
\label{fig:eccentricity}
\end{centering}
\end{figure}
\begin{figure}[ht!]
\begin{centering}
\includegraphics[width=\linewidth]{figures/luhman16b.pdf}
\caption{
16 \emph{CRIRES} spectra of WISE 1049-5319B spanning a full rotation period of the brown dwarf; adapted from Figure 14 in \citet{Luger2021} and based on data from \citet{Crossfield2014}.
This figure was automatically generated from the script \texttt{src/figures/luhman16b.py} and a dataset downloaded from \texttt{Zenodo}.
}
\label{fig:luhman16b}
\end{centering}
\end{figure}
\begin{figure}[ht!]
\begin{centering}
\includegraphics[width=\linewidth]{figures/rossbyridge.pdf}
\caption{
A pile-up of stars in the rotation period-temperature space at slightly faster rotation than the Sun (orange dot); adapted from Figure 1 in David et al. (in prep).
This figure was automatically generated from the script \texttt{src/figures/rossbyridge.py}, a dataset downloaded from \texttt{Zenodo}, and the helper script \texttt{src/figures/helpers/sun.py}.
}
\label{fig:rossbyridge}
\end{centering}
\end{figure}
\begin{figure}[ht!]
\begin{centering}
\includegraphics[width=\linewidth]{figures/HD118203_transit.pdf}
\includegraphics[width=\linewidth]{figures/HD118203_corner.pdf}
\caption{
The phase-folded transit of HD 118203b in \emph{TESS} (\emph{top}) and the inferred joint posterior distributions over its period, radius, and impact parameter (\emph{bottom}); adapted from the \texttt{exoplanet} documentation \citep{ForemanMackey2021}.
Both figures were generated from the script \texttt{src/figures/HD118203.py}.
They both depend on an intermediate result (a dataset containing the MCMC posterior samples), which can either be generated from scratch by running \texttt{src/analysis/HD118203.py} or by downloading a cached version from \texttt{Zenodo}.
}
\label{fig:HD118203}
\end{centering}
\end{figure}
\begin{figure}[ht!]
\begin{centering}
\includegraphics[width=\linewidth]{figures/v1298tau.pdf}
\caption{
\emph{TESS} target pixel file (TPF) of V1298 Tau
overlaid with an r-band sky image from the Digitized Sky Survey (DSS);
reproduced from Figure 1 in \citet{Feinstein2021}.
This figure was downloaded directly from the \texttt{GitHub} repository for that paper by providing a custom rule in the \texttt{Snakefile}.
By default, margin icons are not added to figures with custom rules.
Here we manually add an icon linking to the original figure using the \texttt{\textbackslash marginicon} command.
}
\marginicon{%
\href{https://github.com/afeinstein20/v1298tau_tess/raw/c670e0/src/static/TESSaperture.pdf}{\color{sywBlue}\faFileImage}
}
\label{fig*:v1298tau}
\end{centering}
\end{figure}
\section{Prerequisites}
\label{sec:prereq}
Conda. Linux/UNIX/Mac. GitHub account.
\section{Starting a project}
\label{sec:start}
%
Users can start a new project by \href{https://github.com/rodluger/showyourwork-template/generate}{creating a fresh repository based on the \texttt{showyourwork-template}}.
This will create a new repository under the user's \texttt{GitHub} account and trigger a \texttt{GitHub Action} that will finish the setup process and build a skeleton version of the paper.
After a few minutes, a banner will appear on the repository's main page with links to the build logs and the compiled article PDF (see Figure~\ref{fig:banner}).
%
\begin{figure}[th!]
\begin{centering}
\includegraphics[width=\linewidth]{static/banner.png}
\caption{
The default \texttt{README.md} banner in a repository instantiated from the \texttt{showyourwork-template}, with links to the \texttt{GitHub Action} build logs, a tarball containing the \texttt{TeX} source for the article, a directed acyclic graph (DAG) of the build process, and the compiled article PDF, respectively.
Note that since this figure is a screenshot, we place it in the \texttt{src/static} folder and \texttt{git}-commit it.
The workflow skips the generation step for any figures in that directory; note the absence of a margin icon next to this caption.
}
\label{fig:banner}
\end{centering}
\end{figure}
%
At this point, the repository can be cloned locally, e.g.,\\
\noindent\begin{minipage}{\linewidth}
\begin{lstlisting}[
style=bash,
otherkeywords={user,repo},
emph={user,repo},
emphstyle={\color{lsthilite}}
]
git clone https://github.com/user/repo
cd repo
\end{lstlisting}
\end{minipage}
\noindent where {\color{lsthilite}\texttt{user}} and {\color{lsthilite}\texttt{repo}} are the \texttt{GitHub} user name and repository name, respectively.
The user may now edit the manuscript (\texttt{src/ms.tex}), add figure scripts to the \texttt{src/figures} directory, etc. (see \S\ref{sec:struct}).
The article may be built locally by running \texttt{make} in the top level directory (see \S\ref{sec:local-builds}).
Upon committing and pushing the changes to the remote repository, the \texttt{GitHub Action} will be triggered automatically, generating a new article PDF in sync with the latest changes (see \S\ref{sec:remote-builds}).
\section{Reproducing a project}
\label{sec:reproduce}
%
Any project based on \showyourwork can be reproduced by cloning its \texttt{GitHub} repository and running \texttt{make}:\\
\noindent\begin{minipage}{\linewidth}
\begin{lstlisting}[
style=bash,
otherkeywords={user,repo},
emph={user,repo},
emphstyle={\color{lsthilite}}
]
git clone https://github.com/user/repo
cd repo
make
\end{lstlisting}
\end{minipage}
\noindent This will create a fresh \texttt{conda} environment and run the \texttt{Snakemake} workflow to build the article from scratch, which may entail the execution of computationally-intensive scripts/tasks.
These may sometimes be skipped by instead running\\
\noindent\begin{minipage}{\linewidth}
\begin{lstlisting}[
style=bash
]
make fast
\end{lstlisting}
\end{minipage}
\noindent which will download any available intermediate files from \texttt{Zenodo} instead of generating them from scratch. See \S\ref{sec:local-builds} for details.
\section{Local builds}
\label{sec:local-builds}
%
Coming soon.
\section{Remote builds}
\label{sec:remote-builds}
%
Coming soon.
\section{Repository structure}
\label{sec:struct}
%
\begin{figure}[ht!]
\begin{centering}
\includegraphics[width=0.5\linewidth]{figures/tree.pdf}
\caption{
The basic repository structure for an open source scientific article based on \texttt{showyourwork-template}.
See text for details.
}
\label{fig:tree}
\end{centering}
\end{figure}
%
Figure~\ref{fig:tree} shows the basic directory structure for a repository instantiated from \texttt{showyourwork-template}.
The main components are:
\begin{itemize}
\item \texttt{.github/workflow/showyourwork.yml}: The configuration file for the \texttt{GitHub Actions} workflow, with instructions on how to set up the virtual environment, checkout the repository, and invoke the \texttt{showyourwork-action} to build and publish the article PDF.
\item \texttt{showyourwork}: The \texttt{git} submodule containing the main workflow, which manages the entire build process for the article.
\item \texttt{src}: A directory containing all of the scripts needed to generate the article.
\item \texttt{src/data}: A directory containing programmatically generated (or downloaded) datasets and dependencies that are not tracked by \texttt{git}; this directory should be empty when the repository is first cloned.
\item \texttt{src/figures}: A directory containing all of the scripts needed to generate the article figures.
When the article is built, figure (output) files will be stored here, but they should not be tracked by \texttt{git}.
\item \texttt{src/static}: A directory containing miscellaneous files (usually figures) that are tracked by \texttt{git}.
These may include photographs, flowcharts, or other figures that cannot be programmatically generated.
\item \texttt{src/ms.tex}: The main \texttt{TeX} article manuscript.
\item \texttt{Makefile}: A read-only UNIX/Linux makefile that enables users to build the article by running \texttt{make} in the command-line terminal.
\item \texttt{Snakefile}: A file containing the rules for the \texttt{Snakemake} workflow; see \S\ref{sec:Snakefile}
By default, it simply imports all of the rules defined in \texttt{showyourwork/workflow/Snakefile}, but users can edit this file to add new rules or customize existing rules.
\item \texttt{environment.yml}: The \texttt{conda} environment file specifying all of the direct software dependencies of the workflow; see \S\ref{sec:environmentyml}
\item \texttt{showyourwork.yml}: The main configuration file for the workflow, where users can specify figure and dataset dependencies, instructions for downloading datasets from \texttt{Zenodo}, etc.; see \S\ref{sec:showyourworkyml}
\end{itemize}
\section{Automatic figure generation}
\label{sec:auto-fig}
%
The workflow automatically determines the relationship between a figure (e.g., a \texttt{*.pdf} image) and the script that generated it (e.g., a \texttt{*.py} file) via inspection of the \texttt{figure} environment in the \texttt{TeX} file.
Specifically, it inspects the \texttt{{\textbackslash}includegraphics} and \texttt{{\textbackslash}label} commands to infer the name of the figure and the script that generated it, respectively.
Consider the following snippet, used to generate Figure~\ref{fig:eccentricity}:\\
\noindent\begin{minipage}{\linewidth}
\begin{lstlisting}[
style=LaTeX,
otherkeywords={figures/eccentricity.pdf,fig:eccentricity},
emph={figures/eccentricity.pdf,fig:eccentricity},
emphstyle={\color{lsthilite}}
]
\begin{figure}
\begin{centering}
\includegraphics{figures/eccentricity.pdf}
\caption{...}
\label{fig:eccentricity}
\end{centering}
\end{figure}
\end{lstlisting}
\end{minipage}
\noindent The convention in \showyourwork is to infer the parent script for a figure referenced in an \texttt{includegraphics} call (in this case, {\color{lsthilite}\texttt{figures/eccentricity.pdf}}) from the figure \texttt{label}.
Specifically, if a label starts with {\color{lsthilite}\texttt{fig:}}, the remainder of the label (e.g., {\color{lsthilite}\texttt{eccentricity}}) is interpreted as the name of a script in the \texttt{figures/} directory which should be executed to produce that figure.
By default, figure scripts are expected to be \texttt{Python} scripts, so in this case the workflow will attempt to run\\
\noindent\begin{minipage}{\linewidth}
\begin{lstlisting}[
style=bash
]
python eccentricity.py
\end{lstlisting}
\end{minipage}
\noindent from within the \texttt{figures/} directory to generate \texttt{eccentricity.pdf}.
This behavior may be customized in several ways.
For instance, the figure script need not be a \texttt{Python} script; see \S\ref{sec:other-lang} for information on how to include scripts in any language.
Another common scenario involves a script that generates multiple figures. These can all be included (using \texttt{{\textbackslash}includegraphics}) in the same figure environment with a single shared label (denoting the name of the script), or in multiple different figure environments by appending an arbitrary suffix to each figure label to make them all unique (i.e., \texttt{fig:eccentricity:one}, \texttt{fig:eccentricity:two}, etc.)
And finally, this behavior may be overridden entirely by either placing the figure in the \texttt{src/static} directory (and committing it directly to the \texttt{GitHub} repository) or by supplying a label that instead begins with \texttt{fig*:}, which instructs the workflow to not attempt to programmatically generate the figure.
In the latter case, a custom rule must be provided in the \texttt{Snakefile} to generate it (see, e.g., Figure~\ref{fig*:v1298tau}).
\section{Support for other languages}
\label{sec:other-lang}
%
Scripts in languages other than \texttt{Python} are supported via the inclusion of entries under the \texttt{scripts} key in the \texttt{showyourwork.yml} config file.
Consider Figure~\ref{fig:tree}, which was generated from the \texttt{TeX} file \texttt{figures/tree.tex}, which contains a \texttt{TikZ} picture.
In the main manuscript, we include the figure in the usual way, i.e.,\\
\noindent\begin{minipage}{\linewidth}
\begin{lstlisting}[
style=LaTeX,
otherkeywords={figures/eccentricity.pdf,fig:eccentricity},
emph={figures/eccentricity.pdf,fig:eccentricity},
emphstyle={\color{lsthilite}}
]
\begin{figure}
\begin{centering}
\includegraphics{figures/tree.pdf}
\caption{...}
\label{fig:tree}
\end{centering}
\end{figure}
\end{lstlisting}
\end{minipage}
\noindent By default, as we saw in \S\ref{sec:auto-fig}, this instructs the workflow to execute a file called \texttt{figures/tree.py} to generate the corresponding figure.
However, in the present case, that file does not exist; instead, we have a file called \texttt{figures/tree.tex}, which we would like to compile into a \texttt{PDF} using te \texttt{tectonic} engine.
We can instruct the workflow to do this by specifying the following in \texttt{showyourwork.yml}:\\
\noindent\begin{minipage}{\linewidth}
\begin{lstlisting}[
style=yaml,
otherkeywords={script.path,script.name},
emph={script.path,script.name},
emphstyle={\color{lsthilite}}
]
scripts:
tex:
cd {script.path} && tectonic {script.name}
\end{lstlisting}
\end{minipage}
\noindent Each entry under the \texttt{scripts} key should be a file extension, and under each one, a string containing a shell command that instructs the workflow how to execute a given {\color{lsthilite}\texttt{script}} to produce a given {\color{lsthilite}\texttt{output}}.
For convenience, the following placeholders are recognized and expand as follows at runtime:
%
\begin{itemize}
\item \texttt{\{script\}}: The full path to the input script.
\item \texttt{\{script.path\}}: The full path to the directory containing the input script.
\item \texttt{\{script.name\}}: The name of the input script (without the path).
\item \texttt{\{output\}}: The full path to the output file.
\item \texttt{\{output.path\}}: The full path to the directory containing the output file.
\item \texttt{\{output.name\}}: The name of the output file (without the path).
\end{itemize}
%
If additional customization is needed, such as the need to provide command-line arguments that are specific to individual figures, users should instead provide custom rules in the \texttt{Snakefile} (\S\ref{sec:arbitrary-rules}).
\section{Arbitrary rules}
\label{sec:arbitrary-rules}
%
Coming soon.
\section{Zenodo interface}
\label{sec:zenodo}
%
Coming soon.
\section{This paper}
\begin{figure*}[p!]
\begin{centering}
\includegraphics[width=\linewidth]{figures/dag.pdf}
\caption{
A directed acyclic graph (DAG) showing the build process for each of the (other) figures in this article.
Figure scripts are represented by black rectangles; helper scripts (such as ones imported by figure scripts or used in dataset generation) are similar, but have rounded edges.
Blue cylinders correspond to Zenodo records; blue boxes correspond to datasets.
}
\label{fig*:dag}
\end{centering}
\end{figure*}
\subsection{The \texttt{Snakefile}}
\label{sec:Snakefile}
%
\begin{minipage}{\linewidth}
\lstinputlisting[
style=Snakefile
]
{../Snakefile}
\end{minipage}
\subsection{The \texttt{showyourwork.yml} file}
\label{sec:showyourworkyml}
%
\begin{minipage}{\linewidth}
\lstinputlisting[
style=yaml
]
{../showyourwork.yml}
\end{minipage}
\subsection{The \texttt{environment.yml} file}
\label{sec:environmentyml}
%
\begin{minipage}{\linewidth}
\lstinputlisting[
style=yaml
]
{../environment.yml}
\end{minipage}
\bibliography{bib}
\end{document}
| {
"alphanum_fraction": 0.7523967552,
"avg_line_length": 53.5703703704,
"ext": "tex",
"hexsha": "a1e48004461069d86baf15911093ce856c3632da",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "689ca8ac559a30cad6860c46e3030b5ceedb7e0a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rodluger/showyourwork-paper",
"max_forks_repo_path": "src/ms.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "689ca8ac559a30cad6860c46e3030b5ceedb7e0a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rodluger/showyourwork-paper",
"max_issues_repo_path": "src/ms.tex",
"max_line_length": 440,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "689ca8ac559a30cad6860c46e3030b5ceedb7e0a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rodluger/showyourwork-paper",
"max_stars_repo_path": "src/ms.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-11T22:21:06.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-15T22:18:22.000Z",
"num_tokens": 5443,
"size": 21696
} |
\chapter{Exchanged messages}\label{ch:msgs}
In this section we describe the different types of messages which are exchanged
in the system. The definition of each message can be found in
\code{include/messages.h}.
\input{msgs/server}
\input{msgs/p2p}
\input{msgs/format}
| {
"alphanum_fraction": 0.7830882353,
"avg_line_length": 27.2,
"ext": "tex",
"hexsha": "229283ca476a588d63cdf888a591fd9ea11e791e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bc76015d81d2562628a5be8e96db46e8f87d63ed",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SpeedJack/foc-fiar",
"max_forks_repo_path": "doc/chapters/msgs.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bc76015d81d2562628a5be8e96db46e8f87d63ed",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SpeedJack/foc-fiar",
"max_issues_repo_path": "doc/chapters/msgs.tex",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bc76015d81d2562628a5be8e96db46e8f87d63ed",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SpeedJack/foc-fiar",
"max_stars_repo_path": "doc/chapters/msgs.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 69,
"size": 272
} |
%
% This is a borrowed LaTeX template file for lecture notes for CS267,
% Applications of Parallel Computing, UCBerkeley EECS Department.
% Now being used for CMU's 10725 Fall 2012 Optimization course
% taught by Geoff Gordon and Ryan Tibshirani. When preparing
% LaTeX notes for this class, please use this template.
%
% To familiarize yourself with this template, the body contains
% some examples of its use. Look them over. Then you can
% run LaTeX on this file. After you have LaTeXed this file then
% you can look over the result either by printing it out with
% dvips or using xdvi. "pdflatex template.tex" should also work.
%
\documentclass[twoside]{article}
\setlength{\oddsidemargin}{0.25 in}
\setlength{\evensidemargin}{-0.25 in}
\setlength{\topmargin}{-0.6 in}
\setlength{\textwidth}{6.5 in}
\setlength{\textheight}{8.5 in}
\setlength{\headsep}{0.75 in}
\setlength{\parindent}{0 in}
\setlength{\parskip}{0.1 in}
%
% ADD PACKAGES here:
%
\usepackage{amsmath,amsfonts,graphicx}
%
% The following commands set up the lecnum (week number)
% counter and make various numbering schemes work relative
% to the week number.
%
\newcounter{lecnum}
\renewcommand{\thepage}{\thelecnum-\arabic{page}}
\renewcommand{\thesection}{\thelecnum.\arabic{section}}
\renewcommand{\theequation}{\thelecnum.\arabic{equation}}
\renewcommand{\thefigure}{\thelecnum.\arabic{figure}}
\renewcommand{\thetable}{\thelecnum.\arabic{table}}
%
% The following macro is used to generate the header.
%
\newcommand{\week}[4]{
\pagestyle{myheadings}
\thispagestyle{plain}
\newpage
\setcounter{lecnum}{#1}
\setcounter{page}{1}
\noindent
\begin{center}
\framebox{
\vbox{\vspace{2mm}
\hbox to 6.28in { {\bf CS4244: Knowledge Representation and Reasoning
\hfill Spring 2020} }
\vspace{4mm}
\hbox to 6.28in { {\Large \hfill Week #1: #2 \hfill} }
\vspace{2mm}
\hbox to 6.28in { {\it Lecturer: #3 \hfill Scribes: #4} }
\vspace{2mm}}
}
\end{center}
\markboth{Week #1: #2}{Week #1: #2}
% {\bf Note}: {\it LaTeX template courtesy of UC Berkeley EECS dept.}
% {\bf Disclaimer}: {\it These notes have not been subjected to the
% usual scrutiny reserved for formal publications. They may be distributed
% outside this class only with the permission of the Instructor.}
% \vspace*{4mm}
}
%
% Convention for citations is authors' initials followed by the year.
% For example, to cite a paper by Leighton and Maggs you would type
% \cite{LM89}, and to cite a paper by Strassen you would type \cite{S69}.
% (To avoid bibliography problems, for now we redefine the \cite command.)
% Also commands that create a suitable format for the reference list.
\renewcommand{\cite}[1]{[#1]}
\def\beginrefs{\begin{list}%
{[\arabic{equation}]}{\usecounter{equation}
\setlength{\leftmargin}{2.0truecm}\setlength{\labelsep}{0.4truecm}%
\setlength{\labelwidth}{1.6truecm}}}
\def\endrefs{\end{list}}
\def\bibentry#1{\item[\hbox{[#1]}]}
%Use this command for a figure; it puts a figure in wherever you want it.
%usage: \fig{NUMBER}{SPACE-IN-INCHES}{CAPTION}
\newcommand{\fig}[3]{
\vspace{#2}
\begin{center}
Figure \thelecnum.#1:~#3
\end{center}
}
% Use these for theorems, lemmas, proofs, etc.
\newtheorem{theorem}{Theorem}[lecnum]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}[theorem]{Definition}
\newenvironment{proof}{{\bf Proof:}}{\hfill\rule{2mm}{2mm}}
%\newenvironment{example}{{\bf Example:}}{\hfill\rule{2mm}{2mm}}
\newcounter{example}[section]
\newenvironment{example}[1][]{\refstepcounter{example}\par\medskip
\noindent \textbf{Example~\theexample. #1} \rmfamily}{\medskip}
% **** IF YOU WANT TO DEFINE ADDITIONAL MACROS FOR YOURSELF, PUT THEM HERE:
\newcommand\E{\mathbb{E}}
\begin{document}
%FILL IN THE RIGHT INFO.
%\week{**WEEK-NUMBER**}{**DATE**}{**LECTURER**}{**SCRIBE**}
\week{2}{Jan 23}{Kuldeep Singh Meel}{Ji Bo}
%\footnotetext{These notes are partially based on those of Nigel Mansell.}
% **** YOUR NOTES GO HERE:
% Some general latex examples and examples making use of the
% macros follow.
%**** IN GENERAL, BE BRIEF. LONG SCRIBE NOTES, NO MATTER HOW WELL WRITTEN,
%**** ARE NEVER READ BY ANYBODY.
\section{Propositional Logic: Syntax and Semantics} % Don't be this informal in your notes!
\subsection{Terms}
Propositional variable is a variable that can be either true or false.
\begin{itemize}
\item \textbf{Proposition}: A proposition is a statement is either true or false.
\item \textbf{Propositional Variable}: A propositional variable is a variable that can be either true or false.
\item \textbf{Logical Connective}: A binary connective is a symbol used to connect two formulas, such as AND($\wedge$) and OR($\vee$). A unary connective is a symbol applied to one formula, such as NOT($\neg$).
\item \textbf{Formula}: A formula is defined as follows: (i) propositional variable, (ii) negation of a formula, (iii) two formulas connected by a binary connective. An atomic formula ($p, q$) is a formula that contains no logical connectives, i.e., cannot be further subdivided, and they form compound formulas ($p \circ q, \neg p, \neg q$) using the logical connectives.
\end{itemize}
\subsection{Sentence}
A valid sentence can be defined as follows:
\begin{itemize}
\item $\mathbf{FORM}_0 = \{p, q, r\}$:
\item $\mathbf{FORM}_1 = \mathbf{FORM}_0 \cup \{(p \circ q), (p \circ r), (q \circ r) \} \cup \{(\neg p), (\neg q)\} $
\item $\mathbf{FORM} = \cup_{i=0}^\infty \mathbf{FORM}_i$
\end{itemize}
where, $\circ$ represents a binary connective, such as $\wedge$ and $\vee$.
\begin{example}
$\phi$: $\neg \alpha$
\begin{itemize}
\item $\neg$: primary connective
\item $\alpha$: immediate sub-formula
\end{itemize}
\end{example}
\begin{example}
$\phi$: $\alpha \circ \beta$
\begin{itemize}
\item $\circ$: primary connective
\item $\alpha, \beta$: immediate sub-formula
\end{itemize}
\end{example}
\begin{example}
$\phi$: $(p \vee q) \wedge r$
\begin{itemize}
\item $\wedge$: primary connective
\item $(p\vee q), r$: immediate sub-formula
\end{itemize}
\end{example}
\begin{theorem}
For a composite formula $\phi$, there is
\begin{itemize}
\item unique primary connections
\item unique set of immediate sub-formulas
\end{itemize}
\end{theorem}
\subsection{Semantics}
To determine a proposition is true or false, we define an interpretation that assigns Boolean values to the proposition variables:
$$
\tau: \text{PROP} \rightarrow {0, 1}
$$
For instance, if $\text{PROP} = \{p, q\}$, we can define $\tau$ as $\tau(p) = 0$ and $\tau(q)= 1$ or $\tau = \{p \mapsto 0, q\mapsto 1\}$. Equivalently, we can view $\tau$ as a set of variables ($\tau \in 2^{\text{PROP}}$) defining the variables from the set to be true and the others false. Therefore, the aforementioned statement can be rewritten as $\tau = \{q\}$.
Define $\phi$ as a function: $\phi: 2^{\text{PROP}} \Rightarrow \{0, 1\}$
$$
\phi(\tau) = \begin{cases}\tau(p) & \phi=p \\ \neg (\alpha (\tau)) & \phi=(\neg \alpha) \\ \circ(\alpha(\tau), \beta(\tau)) & \phi = (\alpha \circ \beta) \\\end{cases}
$$
\begin{example}
Given
\begin{align*}
& \text{PROP} = \{p, q, r\} \\
& \tau = \{p\mapsto 1, q\mapsto 1, r\mapsto 0\}
\end{align*}
\begin{itemize}
\item $(p) (\tau) = \tau(p) = 1$, because $\tau$ assigns true to proposition $p$.
\item $(p \vee r) (\tau) = \vee(p(\tau), r(\tau)) = \vee(\tau(p), \tau(q)) = \vee(1, 0) = 1$.
\end{itemize}
\end{example}
\begin{example}
Given
\begin{align*}
& \text{PROP} = \{p_1, p_2, \dots, p_{10}\}, \\
& \tau_1 = \{p_1\mapsto 1, p_2\mapsto 0, p_3\mapsto 0, \dots, p_9\mapsto 0, p_{10} \mapsto 0\} \\
& \tau_2 = \{p_1\mapsto 1, p_2\mapsto 0, p_3\mapsto 0, \dots, p_{9} \mapsto 0, p_{10}\mapsto 1\}\\
& \phi = (p_1 \vee p_2)
\end{align*}
\begin{itemize}
\item Both $\phi(\tau_1)$ and $\phi(\tau_2)$ equal to $1$, because $\phi=(p_1 \vee p_2)$ contains only $p_1$ and $p_2$ which have the same boolean variables for both $\tau_1$ and $\tau_2$, even though $\tau_1$ and $\tau_2$ are different.
\end{itemize}
\end{example}
\end{document}
| {
"alphanum_fraction": 0.6709997636,
"avg_line_length": 35.2583333333,
"ext": "tex",
"hexsha": "e4a4bd615ef90d6ed110a1ab1ca08fe267a383da",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-12-10T15:33:43.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-12-10T15:33:43.000Z",
"max_forks_repo_head_hexsha": "ceb939810fb475fba19317a754080888ccf82eac",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "orris27/computer_science_notes",
"max_forks_repo_path": "knowledge_representation_reasoning/notes/week2/week2-A0213788U.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ceb939810fb475fba19317a754080888ccf82eac",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "orris27/computer_science_notes",
"max_issues_repo_path": "knowledge_representation_reasoning/notes/week2/week2-A0213788U.tex",
"max_line_length": 376,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "ceb939810fb475fba19317a754080888ccf82eac",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "orris27/computer_science_notes",
"max_stars_repo_path": "knowledge_representation_reasoning/notes/week2/week2-A0213788U.tex",
"max_stars_repo_stars_event_max_datetime": "2019-07-05T08:00:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-05T08:00:28.000Z",
"num_tokens": 2746,
"size": 8462
} |
\documentclass[9pt,twocolumn,twoside]{../../styles/osajnl}
\usepackage{fancyvrb}
\usepackage{graphicx}
\journal{i524}
\title{An overview of Apache THRIFT and its architecture}
\author[1]{Karthik Anbazhagan}
\affil[1]{School of Informatics and Computing, Bloomington, IN 47408, U.S.A.}
\affil[*]{Corresponding authors: [email protected]}
\dates{\today}
\ociscodes{Cloud, Apache Thrift, cross-language, I524}
% replace this with your url in github/gitlab
\doi{\url{https://github.com/cloudmesh/sp17-i524/tree/master/paper2/S17-IR-2008/report.pdf}}
\begin{abstract}
Thrift is a software framework developed at Facebook to accelerate the development and implementation of efficient and scalable cross-language development services. Its primary goal is to enable efficient and reliable communication across programming languages by abstracting the portions of each language that tend to require the most customization into a common library that is implemented in each language. This paper summarizes the how Thrift provides flexibility in use by choosing different layers of the architecture separately.
\end{abstract}
\setboolean{displaycopyright}{true}
\begin{document}
\maketitle
\section{Introduction}
Apache Thrift is an Interface Definition Language \cite{www-thrift-idl} (IDL) used to define and create services between numerous languages as a Remote Procedure Call (RPC). Thrift's lightweight framework and its support for cross-language communication makes it more robust and efficient compared to other RPC frameworks like SOA \cite{blog-thrift} (REST/SOAP). It allows you to create services that are usable by numerous languages through a simple and straightforward IDL. Thrift combines a software stack with a code generation engine to build services that works efficiently and seamlessly between $C++$, Java, Python, PHP, Ruby, Erlang, Perl, Haskell, C, Cocoa, JavaScript, Node.js, Smalltalk, and OCaml. In addition to interoperability, Thrift can be very efficient because of a serialization mechanism \cite{git-thrift-serial} which can save both space and time. In other words, Apache Thrift lets you create a service to send/receive data between two or more softwares that are written in completely different languages/platforms. \\
Thrift was originally developed at Facebook and is one of the core parts of their infrastructure. The choice of programming language at Facebook \cite{thrift-paper-2013} was based on what language was best suited for the task at hand. This flexibility resulted in difficulties when these applications needed to call one another and Facebook needed an application that could meet their needs of interoperability, transport efficiency, and simplicity. Out of this need, they developed efficient protocols and a service infrastructure which became Thrift. Facebook decided to make Thrift an Open Source and finally contributed it to Apache Software Foundation (ASF) in April 2007 in order to increase usage and development. Thrift was later released under the Apache 2.0 license.
\section{Architecture}
Figure. 1 Architecture of Apache Thrift shows the architecture of a model for using the Thrift Stack. It is essential to understand every component of the architecture to understand how Apache Thrift works. It includes a complete stack for creating clients and servers. The top portion of the stack is the user generated code from the Thrift Client-Server definition file. The next layer of the framework are the Thrift generate client and processor codes which also comprises of data structures. The next two important layers are the protocol and transport layers which are part of the Thrift run-time libraries. This provides Thrift the freedom to define a service and change the protocol and transport without regenerating any code. Thrift includes a server infrastructure to tie the protocols and transports together. There are blocking, non-blocking, single and multi-threaded servers. The 'Physical' portion of the stack varies from stack to stack based on the language. For example, for Java and Python network I/O, the built-in libraries are leveraged by the Thrift library, while the C++ implementation uses its own custom implementation. Thrift allows users to choose independently between protocol, transport and server. With Thrift being originally developed in C++, Thrift has the greatest variation among these in the C++ implementation \cite{www-thrift-example}.
\begin{figure}[h]
\centering
\includegraphics[width=3.5in]{images/thrift_arch.png}
\caption{Architecture of Apache Thrift \cite{www-thrift-arch}}
\label{fig:thrift_arch}
\end{figure}
\subsection{Transport Layer}
The transport layer provides simple freedom for read/write to/from the network. Each language must have a common interface to transport bidirectional raw data. The transport layer describes how the data is transmitted. This layer seperates the underlying transport from the rest of the system, exposing only the following interface: open, close, isOpen, read, write, and flush
There are multiple transports supported by Thrift:
\begin{enumerate}
\item \textbf{TSocket}: The TSocket class is implemented across all target languages. It provides a common, simple interface to a TCP/IP stream socket and uses blocking socket I/O for transport.
\item \textbf{TFramedTransport}: The TFramedTransport class transmits data with frame size headers for chunking optimization or non-blocking operation
\item \textbf{TFileTransport}: The TFileTransport is an abstraction of an on-disk file to a data stream. It can be used to write out a set of incoming Thrift requests to a file on disk
\item \textbf{TMemoryTransport}: Uses memory for I/O operations. For example, The Java implementation uses a simple ByteArrayOutput stream
\item \textbf{TZlibTransport}: Performs compression using zlib. It should be used in conjunction with another transport
\end{enumerate}
\subsection{Protocol Layer}
The second major abstraction in Thrift is the separation of data structure from transport representation. While transporting the data, Thrift enforces a certain messaging structure. That is, it does not matter what method the data encoding is in, as long as the data supports a fixed set of operations that allows it to be read and written by generated code. The Thrift Protocol interface is very straightforward, it supports two things: bidirectional sequenced messaging, and encoding of base types, containers, and structs.
Thrift supports both text and binary protocols. The binary protocols almost always outperforms text protocols, but sometimes text protocols may prove to be useful in cases of debugging. The Protocols available for the majority of the Thrift-supported languages are:
\begin{enumerate}
\item \textbf{TBinaryProtocol}: A straightforward binary format encoding takes numeric values as binary, rather than converting to text
\item \textbf{TCompactProtocol}: Very efficient and dense encoding of data. This protocol writes numeric tags for each piece of data. The recipient is expected to properly match these tags with the data
\item \textbf{TDenseProtocol}: It’s similar to TCompactProtocol but strips off the meta information from what is transmitted and adds it back at the receiver side
\item \textbf{TJSONProtocol}: Uses JSON for data encoding
\item \textbf{TSimpleJSONProtocol}: A write-only protocol using JSON. Suitable for parsing by scripting languages.
\item \textbf{TDebugProtocol}: Sends data in the form of human-readable text format. It can be well used in debugging applications involving Thrift.
\end{enumerate}
\subsection{Processor Layer}
A processor encapsulates the ability to read data from input streams and write to output streams. The processor layer is the simplest layer. The input and output streams are represented by protocol objects. Service-specific processor implementations are generated by the Thrift compiler and these generated codes make the Process Layer of the architecture stack. The processor essentially reads data from the wire (using the input protocol), delegates processing to the handler (implemented by the user), and writes the response over the wire (using the output protocol).
\subsection{Server Layer}
A server pulls together all the various functionalities to complete the Thrift server layer. First, it creates a transport, then specifies input/output protocols for the transport. It then creates a processor based on the I/O protocols and waits for incoming connections. When a connection is made, it hands them off to the processor to handle the processing. Thrift provides a number of servers:
\begin{enumerate}
\item \textbf{TSimpleServer}: A single-threaded server using standard blocking I/O socket. Mainly used for testing purposes
\item \textbf{TThreadPoolServer}: A multi-threaded server with N worker threads using standard blocking I/O. It generally creates five minimum threads in the pool if not specified otherwise
\item \textbf{TNonBlockingServer}: A multi-threaded server using non-blocking I/O
\item \textbf{THttpServer}: A HTTP server (for JS clients)
\item \textbf{TForkingServer}: Forks a process for each request to server
\end{enumerate}
\section{Advantages and Limitations of Thrift}
A few reasons where Thrift is robust and efficient compared to other RPC frameworks are that Thrift leverages the cross-language serialization with lower overhead than alternatives such as SOAP due to use of binary format. Since Thrift generates the client and server code completely \cite{www-thrift-tutorial}, it leaves the user with the only task of writing the handlers and invoking the client. Everything including the parameters and returns are automatically validated and analysed. Thrift is more compact than HTTP and can easily be extended to support things like encryption, compression, non blocking IO, etc. Since Protocol Buffers \cite{www-protocol-buffers} are implemented in a variety of languages, they make interoperability between multiple programming languages simpler.\\
While there numerous advantages of Thrift over other RPC frameworks, there are a few limitations. Thrift \cite{pdf-thrift-tutorial} is limited to only one service per server. There can be no cyclic structs. Structs can only contain structs that have been declared before it. Also, a struct also cannot contain itself. Important OOP concepts like inheritance and polymorphism are not supported and neither can Null be returned by a server. Instead a wrapper struct or value is expected. No out-of-the-box authentication service available between server and client and no Bi-Directional messaging is available in Thrift.\\
\section{Conclusion}
Thrift provides flexibility in use by choosing different layers of the architecture separately. As mentioned in the advantage section, Thrift usage of the cross-language serialization with lower overheads makes it more efficient compared to other similar technologies. Thirft avoids duplicated work by writing buffering and I/O logic in one place. Thrift has enabled Facebook to build scalable back-end services efficiently. It has been employed in a wide variety of applications at Facebook, including search, logging, mobile, ads, and the developer platform. Application developers can focus on application code without worrying about the sockets layer.
\bibliography{references}
\end{document}
| {
"alphanum_fraction": 0.812039312,
"avg_line_length": 110.640776699,
"ext": "tex",
"hexsha": "3e4df0cea4410a55e1f431f0985d6b99f876517d",
"lang": "TeX",
"max_forks_count": 294,
"max_forks_repo_forks_event_max_datetime": "2018-07-13T01:32:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-01-09T13:18:39.000Z",
"max_forks_repo_head_hexsha": "42dd11b914c03c741dad8a8505c3e091dc6ec412",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "cloudmesh/sp17-i524",
"max_forks_repo_path": "paper2/S17-IR-2008/report.tex",
"max_issues_count": 98,
"max_issues_repo_head_hexsha": "42dd11b914c03c741dad8a8505c3e091dc6ec412",
"max_issues_repo_issues_event_max_datetime": "2017-10-27T11:30:50.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-01-19T04:24:02.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "cloudmesh/sp17-i524",
"max_issues_repo_path": "paper2/S17-IR-2008/report.tex",
"max_line_length": 1377,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "42dd11b914c03c741dad8a8505c3e091dc6ec412",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "cloudmesh/sp17-i524",
"max_stars_repo_path": "paper2/S17-IR-2008/report.tex",
"max_stars_repo_stars_event_max_datetime": "2017-04-02T21:02:52.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-01-19T07:25:57.000Z",
"num_tokens": 2361,
"size": 11396
} |
% \iffalse meta-comment
%
% Copyright 2018
% The LaTeX3 Project and any individual authors listed elsewhere
% in this file.
%
% This file is part of the LaTeX base system.
% -------------------------------------------
%
% It may be distributed and/or modified under the
% conditions of the LaTeX Project Public License, either version 1.3c
% of this license or (at your option) any later version.
% The latest version of this license is in
% http://www.latex-project.org/lppl.txt
% and version 1.3c or later is part of all distributions of LaTeX
% version 2005/12/01 or later.
%
% This file has the LPPL maintenance status "maintained".
%
% The list of all files belonging to the LaTeX base distribution is
% given in the file `manifest.txt'. See also `legal.txt' for additional
% information.
%
% The list of derived (unpacked) files belonging to the distribution
% and covered by LPPL is defined by the unpacking scripts (with
% extension .ins) which are part of the distribution.
%
% \fi
% Filename: ltnews29.tex
%
% This is issue 29 of LaTeX News.
\documentclass{ltnews}
\usepackage[T1]{fontenc}
\usepackage{lmodern,url,hologo}
\usepackage{csquotes}
\providecommand\acro[1]{\textsc{#1}}
\providecommand\meta[1]{$\langle$\textit{#1}$\rangle$}
\providecommand\XeTeX{\hologo{XeTeX}}
\providecommand\LuaTeX{\hologo{LuaTeX}}
\providecommand\pdfTeX{\hologo{pdfTeX}}
\newcommand\githubissue[2][]{\ifhmode\unskip\fi
\quad\penalty500\strut\nobreak\hfill
\mbox{\small\slshape(%
\href{https://github.com/latex3/latex2e/issues/\getfirstgithubissue#2 \relax}%
{github issue#1 #2}%
)}%
\par}
% simple solution right now (just link to the first issue if there are more)
\def\getfirstgithubissue#1 #2\relax{#1}
\newcommand\sxissue[1]{\ifhmode\unskip\fi
\quad\penalty500\strut\nobreak\hfill
\mbox{\small\slshape(\url{https://tex.stackexchange.com/#1})}\par}
\let\cls\pkg
\newcommand\env[1]{\texttt{#1}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\iffalse % only for TUB production
\usepackage{graphicx}
\makeatletter
% Xe\TeX{} requires reflecting the first E, hence we complain if the
% graphics package is not present. (For plain documents, this can be
% loaded via Eplain.) Also, at Barbara's suggestion, if the current
% font is slanted, we rotate by 180 instead of reflecting so there is at
% least a chance to look ok. (The magic values here seem more or less
% ok for \texttt{cmsl} and \texttt{cmti}.)
%
% \begin{macrocode}
\def\tubreflect#1{%
\@ifundefined{reflectbox}{%
\TBerror{A graphics package must be loaded for \string\XeTeX}%
}{%
\ifdim \fontdimen1\font>0pt
\raise 1.6ex \hbox{\kern.1em\rotatebox{180}{#1}}\kern-.1em
\else
\reflectbox{#1}%
\fi
}%
}
\def\tubhideheight#1{\setbox0=\hbox{#1}\ht0=0pt \dp0=0pt \box0 }
\def\XekernbeforeE{-.125em}
\def\XekernafterE{-.1667em}
\DeclareRobustCommand{\Xe}{\leavevmode
\tubhideheight{\hbox{X%
\setbox0=\hbox{\TeX}\setbox1=\hbox{E}%
\ifdim \fontdimen1\font>0pt \def\XekernbeforeE{0em}\fi
\lower\dp0\hbox{\raise\dp1\hbox{\kern\XekernbeforeE\tubreflect{E}}}%
\kern\XekernafterE}}}
\def\XeTeX{\Xe\TeX}
\def\XeLaTeX{\Xe{\kern.11em \LaTeX}}
\fi
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\publicationmonth{December}
\publicationyear{2018}
\publicationissue{29}
\begin{document}
%\addtolength\textheight{4.2pc} % only for TUB
\maketitle
\tableofcontents
\setlength\rightskip{0pt plus 3em}
\bigskip
\section{Introduction}
The December 2018 release of \LaTeX{} is a maintenance release in
which we have fixed a few bugs in the software: some are old, some
newer, and they are mostly rather obscure.
\section[Bug reports for core \LaTeXe{} and packages]
{Bug reports for core \LaTeXe{} and packages maintained by the Project Team}
In Spring 2018 we established a new issue tracking system
(Github issues at \url{https://github.com/latex3/latex2e/issues}) for both
the \LaTeX{} core and the packages maintained by the
\LaTeX{} Project team, with an updated procedure for how to report a
bug or problem.
Initial experience with this system is good, with people who report
problems following the guidelines and including helpful working
examples to show the problem---thanks for doing this.
The detailed requirements and the workflow for reporting a bug in the
core \LaTeX{} software is documented at
\begin{quote}
\url{https://www.latex-project.org/bugs/}
\end{quote}
with further details and discussion in~\cite{29:Mittelbach:TB39-1}.
\section{Changes to the \LaTeX{} kernel}
\subsection{UTF-8:\ updates to the default input encoding}
In the April 2018 release of \LaTeX{} we changed the default encoding
from 7-bit \acro{ascii} to \acro{utf}-8 when using classic \TeX\ or
\hologo{pdfTeX}; see \textit{\LaTeX{} News~28}~\cite{29:ltnews28} for
details.
Now, after half a year of experience with
this new default,
we have
made a small number of adjustments to further improve the user experience.
These include:
\begin{itemize}
\item
Some improvements when displaying error messages about \acro{utf}-8
characters that have not been set up for use with \LaTeX{},
or are invalid for some other reason;
%
\githubissue[s]{60, 62 and 63}
%
\item
The addition of a number of previously missing declarations for
characters that are in fact available with the default fonts, e.g.,
\cs{j} \enquote{\j} (0237), \cs{SS} \enquote{\SS} (1E9E),
\verb=\k{}= \enquote{\k{ }} (02DB) and \verb=\.{}= \enquote{\.{ }}
(02D9);
\item
Correcting the names for \cs{guillemetleft}
\enquote{\guillemotleft} and \cs{guillemetright}
\enquote{\guillemotright} in all encoding files. These correct
names are in addition to the old (but wrong) Adobe names: Adobe
mistakenly called them Guillemot, which is a sea bird.
%
\githubissue{65}
%
\item
Added \cs{Hwithstroke} (\enquote{\Hwithstroke}) and \cs{hwithstroke}
(\enquote{\hwithstroke}) necessary for typesetting Maltese.
%
\sxissue{q/460110}
\end{itemize}
\subsection{Fixed \cs{verb*} and friends in \texorpdfstring{\XeTeX}{XeTeX} and \LuaTeX{}}
The original \cs{verb*} and \texttt{verbatim*} in \LaTeX{} were coded
under the assumption that the position of the space character (i.e.,
\acro{ascii} 32) in a typewriter font contains a visible space glyph
``\verb*= =''. This is correct for \pdfTeX{} with the most used font
encodings \texttt{OT1} and \texttt{T1}. However, this unfortunately
does not work for Unicode engines using the \texttt{TU} encoding since
the space character slot (\acro{ascii} 32) then usually contains a real
(normal) space, which has the effect that \cs{verb*} produces the same
results as \cs{verb}.
The \cs{verb*} code now always uses the newly introduced command
\cs{verbvisiblespace} to produce the visible space character and this
command will get appropriate definitions for use with the different
engines. With \pdfTeX{} it will simply use \cs{asciispace}, which is
a posh name for ``select character 32 in the current font'', but with
Unicode engines the default definition is
\begin{verbatim}
\DeclareRobustCommand\verbvisiblespace
{\leavevmode
{\usefont{OT1}{cmtt}{m}{n}\asciispace}}
\end{verbatim}
which uses the visible space from the font Computer Modern Typewriter,
regardless of the currently chosen typewriter font. Internally the
code ensures that the character used has exactly the same width as the
other characters in the current (monospaced) font; thus, for example,
code displays line up properly.
It is possible to redefine this command to select your own character,
for example
\begin{verbatim}
\DeclareRobustCommand\verbvisiblespace
{\textvisiblespace}
\end{verbatim}
will select the the ``official'' visible space character of the
current font. This may look like the natural default, but it wasn't
chosen as our default because many fonts just don't have that Unicode
character, or they have one with a strange shape.
%
\githubissue[s]{69 and 70}
\subsection{Error message corrected}
Trying to redefine an undefined command could in a few cases generate
an error message with a missing space, e.g.,
\verb=\renewcommand\1{...}= gave
\begin{verbatim}
LaTeX Error: \1undefined.
\end{verbatim}
This is now fixed.
%
\githubissue{41}
\subsection{Fixed fatal link error with \pkg{hyperref}}
If an \cs{href} link text gets broken across pages, \pdfTeX{} and
\LuaTeX{} will generate a fatal error unless both parts of the link
are internally at the same boxing level. In two-column mode that was
not the case if one of the pages had spanning top floats. This has now
been changed so that the error is avoided.
%
\githubissue{94}
\subsection{Avoid page breaks caused by invisible commands}
Commands like \cs{label} or \cs{index} could generate a potential page
break in places where a page break was otherwise prohibited, e.g.,
when used between two consecutive headings. This has now been
corrected. If for some reason you really want a break and you relied
on this faulty behavior, you can always add one using \cs{pagebreak},
with or without an optional argument.
%
\githubissue{81}
\subsection{Prevent spurious spaces when reading table of contents data}
When table of contents data is read in from a \texttt{.toc} file, the
new-line character at the end of each line is converted by \TeX{} to a
space. In normal processing this is harmless (as \TeX{} is doing this
input reading whilst in vertical mode and each line in the file
represents a single line (paragraph) in the table of contents. If,
however, this is done in horizontal mode, which is sometimes the case,
then these spaces will appear in the output. If you then omit some of
the input lines (e.g., because you do not display \acro{toc} data below a
certain level), then these spaces accumulate in the typeset output and
you get surprising, and unwanted, gaps inside the text.
The new code now adds a \texttt{\%} sign at the end of problematic
lines in the \texttt{.toc} file so that \TeX{} will not generate such
spaces that may survive to spoil the printed result. As some third
party packages have augmented or changed the core \LaTeX{}
functionality in that area (for example, by adding additional
arguments to the commands in \acro{toc} files) the code uses a conservative
approach and the \texttt{\%} signs are added only when certain
conditions are met. Therefore some packages might require updates if
they want to benefit from this correction, especially if they
unconditionally overwrite \LaTeX{}'s \cs{addcontentsline} definition.
%
\githubissue{73}
\subsection{Prevent protrusion in table of contents lines}
In \TeX{}'s internal processing model,
paragraph data is one of the
major data structures. As a result,
many things are internally modeled
as paragraphs even if they are not conceptually
``text paragraphs'' in
the traditional sense.
In a few cases this has some surprising effects
that are not always
for the better. One example is
standard \acro{toc} entries,
where you have
heading data followed by some dot leaders and a page
number at the right, produced, for example, from this:
\begin{quote}
\contentsline {subsection}{Error message corrected}{2}{section*.7}
\end{quote}
The space reserved for the page number is of a fixed width, so that
the dots always end in the same place. Well, they did end in the same
place until the advent of protrusion support in the \TeX{} engines.
Now, with the \pkg{microtype} package loaded, it is possible that the
page number will protrude slightly into the margin (even though it’s
typeset inside a box) and as a result this page number box gets
shifted. With enough bad luck this can get you another dot in the
line, sticking out like the proverbial sore thumb, as exhibited in the
question on StackExchange that triggered the correction.
\LaTeX{} now takes care that there will be no protrusion happening on
such lines, even if it is generally enabled for the whole document.
%
\sxissue{q/172785}
\subsection{Start L-R mode for \cs{thinspace} and friends}
In \LaTeX{}, commands that are intended only for paragraph (L-R) mode
are generally careful to start paragraph mode if necessary; thus they
can be used at the start of a paragraph without surprising and
unwanted consequences. This important requirement had been overlooked
for a few horizontal spacing commands, such as \cs{thinspace}
(a.k.a.\ ``\cs{,}''), and for some other support commands such as
\cs{smash} or \cs{phantom}. Thus they ended up adding vertical space
when used at the beginning of a paragraph or, in the case of
\cs{smash}, creating a paragraph of their own. This has now been
corrected, and a corresponding update has been made to the
\pkg{amsmath} package, in which these commands are also defined.
%
\githubissue[s]{49 and 50}
\subsection{Guarding \cs{pfill} in \pkg{doc}}
For presenting index entries pointing to
code fragments and the like,
the \pkg{doc} package has a \cs{pfill} command that
generates within the index a line of dots leading from
the command name to the page or code line numbers.
If necessary it would automatically split the entry
over two lines. That worked well enough for a quarter century, but we
discovered recently that it is broken inside
the \cls{ltugboat} class,
where it sometimes produces
bad spacing within continuation lines.
The reason turned out to be a redefinition of the \LaTeX{} command
\cs{nobreakspace} (\verb=~=) inside the class \cls{ltugboat}, which
removed any preceding space (and thus unfortunately also removed the
dots on the continuation line). While one can argue that this is a
questionable redefinition (if only done by a single class and not generally),
it has been in the class so long that changing it would
certainly break older documents. So instead we now guard against that
removal of space.
%
\githubissue[s]{25 and 75}
\section{Changes to packages in the \pkg{tools} category}
\subsection{Sometimes the \pkg{trace} package turned off too much}
The \pkg{trace} package is a useful little tool for tracing macro
execution: it hides certain lengthy and typically uninteresting
expansions resulting from font changes and similar activities.
However, it had the problem that it also reset other tracing settings
such as \cs{showoutput} in such situations, so that you couldn't use
\cs{showoutput} in the preamble to get symbolic output of all the
pages in the document. This has now been corrected.
\subsection{Update to \pkg{xr}}
The \pkg{xr} package has been updated so that the code that reads the
\texttt{.aux} file has been made more robust. It now correctly
ignores conditionals (added by \pkg{hyperref} and other packages)
rather than generating low level parsing errors.
\sxissue{a/452321}
\subsection{Column data for \env{multicols*} sometimes vanished}
In certain situations involving \env{multicols*}, when there are more
explicit \cs{columnbreak} requests than there are columns on the
current page, data could vanish due to the removal of an internal
penalty marking the end of the environment. This has been corrected by
explicitly reinserting that penalty if necessary.
%
\githubissue{53}
\subsection{Extension to \cs{docolaction} in \pkg{multicol}}
The \cs{docolaction} command can be used to carry out actions
depending on the column you are currently in, i.e., first, any inner
one (if more than two) or last. However, if the action generates text
then there is the question: is this text part of the current column or
the one after? That is, on the next run, do we test before or after it,
to determine in which column we are?
This is now resolved as follows: if you use \cs{docolaction*} any
generated text by the chosen action is considered to be after the test
point. But if you use the command without the star then all the material
it generates will be placed before the test point to determine the
current column, i.e., the text will become part of the current column
and may affect the test result on the next run.
\subsection{Prevent color leak in \pkg{array}}
In some cases the color used inside a \env{tabular} cell could ``leak
out'' into the surrounding text. This has been corrected.
%
\githubissue{72}
\subsection{Support fragile commands in \texttt{array} or \texttt{tabular} column templates}
The preamble specifiers \texttt{p}, \texttt{m} and \texttt{b} each receives
a user supplied argument: the width of the paragraph column. Normally
that is something harmless, like a length or a simple length
expression. But in more complicated settings involving the \pkg{calc}
package it could break with a low-level error message. This has now
been corrected.
%
\sxissue{q/459285}
\section{Changes to packages in the amsmath category}
The changes in the kernel made for \cs{thinspace}, \cs{smash},
etc.\ (see above) have been reflected in the \pkg{amsmath} package
code, so that loading this package doesn't revert them.
%
\githubissue[s]{49 and 50}
\section{Website updates}
\subsection{Publications area reorganized and extended}
To help readers to find relevant information in more convenient and
easy ways, the area of the website covering publications by the
\LaTeX{} Project Team was reorganized and extended (many more
abstracts added). We now provide the articles, talks and supplementary
data structured both by year and also by major
topics~\cite{29:site-pub}. Feel free to take a look.
\subsection{Japanese translations of the user's guide}
Yukitoshi Fujimura has kindly translated
into Japanese two documents that are
distributed with standard \LaTeX{}.
These are:
\begin{itemize}
\item
\LaTeXe{} for authors;
\item
User's Guide for the \pkg{amsmath}~Package~\cite{29:amsldoc}.
\end{itemize}
They can be found on the website documentation page~\cite{29:site-doc}.
You will now also find there a typeset version of the full \LaTeXe{}
source code (with index etc.\@) and a number of other goodies.
\begin{thebibliography}{9}
\bibitem{29:Mittelbach:TB39-1} Frank Mittelbach:
\emph{New rules for reporting bugs in the \LaTeX{} core software}.
In: TUGboat, 39\#1, 2018.
\url{https://latex-project.org/publications/}
\bibitem{29:ltnews28}
\emph{\LaTeX{} News, Issue 28}.
In: TUGboat, 39\#1, 2018.\\
\rlap{\url{https://latex-project.org/news/latex2e-news/}}
\bibitem{29:site-doc}
\emph{\LaTeX{} documentation on the \LaTeX{} Project Website}.\\
\url{https://latex-project.org/documentation/}
\bibitem{29:site-pub}
\emph{\LaTeX{} Project publications on the \LaTeX{} Project Website}.\\
\url{https://latex-project.org/publications/}
\bibitem{29:amsldoc} American Mathematical Society and The \LaTeX3 Project:
\emph{User's Guide for the \texttt{amsmath} package} (Version 2.1).
April 2018.
Available from
\url{https://www.ctan.org}
and distributed as part of every \LaTeX{} distribution.
\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.7468580744,
"avg_line_length": 36.0853889943,
"ext": "tex",
"hexsha": "8e7b1bdc7e4a872b7003cf4803c8d0bc896a0c32",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e77bc35853d4c07b3245b08a0b7dd01a5541e778",
"max_forks_repo_licenses": [
"LPPL-1.3c"
],
"max_forks_repo_name": "Adamrasz/LaTeX-Clone",
"max_forks_repo_path": "doc/ltnews29.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e77bc35853d4c07b3245b08a0b7dd01a5541e778",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"LPPL-1.3c"
],
"max_issues_repo_name": "Adamrasz/LaTeX-Clone",
"max_issues_repo_path": "doc/ltnews29.tex",
"max_line_length": 92,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e77bc35853d4c07b3245b08a0b7dd01a5541e778",
"max_stars_repo_licenses": [
"LPPL-1.3c"
],
"max_stars_repo_name": "Adamrasz/LaTeX-Clone",
"max_stars_repo_path": "doc/ltnews29.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5110,
"size": 19017
} |
% Created 2021-12-14 Tue 09:07
% Intended LaTeX compiler: pdflatex
\documentclass[letterpaper]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{graphicx}
\usepackage{longtable}
\usepackage{wrapfig}
\usepackage{rotating}
\usepackage[normalem]{ulem}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{capt-of}
\usepackage{hyperref}
\usepackage{punchagan}
\hypersetup{colorlinks=true, urlcolor={url-gray}}
\usepackage{enumitem}
\setlist{leftmargin=0.25in,nosep}
\author{Sudhanv Apte}
\date{\today}
\title{}
\hypersetup{
pdfauthor={Sudhanv Apte},
pdftitle={},
pdfkeywords={},
pdfsubject={},
pdfcreator={Emacs 29.0.50 (Org mode 9.5)},
pdflang={English}}
\begin{document}
\resheader{ Sudhanv Apte }{ [email protected] }{ +886 905172614 }{ 218, Huanhe S Rd., Wanhua Dist }{ Taipei, Taiwan }
\section{Experience}
\label{sec:org441382b}
\subsection{Intern at \href{http://getpeppermint.co}{Peppermint}}
\label{sec:org693e49f}
\subsubsection{May 2021 - Aug 2021}
\label{sec:org06c423b}
Worked on Object Detection and Tracking for obstacle avoidance in crowded areas.
\subsection{Research Assistant at \href{https://itlab.ee.ntut.edu.tw/}{Intelligent Control Lab}}
\label{sec:org65adc16}
\subsubsection{Jul 2021 - Jan 2021}
\label{sec:orgcb3f734}
\begin{itemize}
\item BLE Module for Elevator Access
\item Implementing Local Planner on Food Delivery Robot
\end{itemize}
\section{Projects}
\label{sec:org2264f8a}
\subsection{\href{https://github.com/sudhanv09/Warehouse-Bot}{Warehouse-Bot}}
\label{sec:org753be5e}
\subsubsection{Python}
\label{sec:org72fc07a}
Warehouse bot is a robot designed to replace AS/RS systems in warehouses. It is
designed to be autonomous with dynamic obstacle avoidance capabilities.
\subsection{\href{https://github.com/sudhanv09/BLE-Advertiser-Listener}{BLE}}
\label{sec:org314c2a3}
\subsubsection{C}
\label{sec:org1da0fac}
A BLE Advertiser/Listener module to allow robot to access the elevator.
\subsection{Deep Learning}
\label{sec:org4a51829}
\subsubsection{Python}
\label{sec:orgc500d5a}
Implemented DQN, GAN
\section{Education}
\label{sec:orge8f8733}
\subsection{NTUT, Taiwan}
\label{sec:org076ddca}
\subsubsection{Sep 2021 - Jul 2023}
\label{sec:org155cf7e}
\emph{M.Sc, Electrical Engineering}
\subsection{SRM IST, Chennai, India}
\label{sec:org3d5c7b3}
\subsubsection{Aug 2017 - Jun 2021}
\label{sec:org086b80f}
\emph{B.Tech, Mechatronics Engineering}, GPA: 7.2/10
\section{Skills}
\label{sec:orgf86a58e}
\begin{description}
\item[{Most comfortable with}] Python, ROS, Git, Linux
\item[{Have worked with}] C/C++, C\#, Torch
\end{description}
\end{document}
| {
"alphanum_fraction": 0.7711026616,
"avg_line_length": 29.5505617978,
"ext": "tex",
"hexsha": "70339cd2674d2339179c43cf47aa9a7fde7e9d19",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "79706f6849a299c808cfdb1c11887bd3106e8493",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "sudhanv09/MyFiles",
"max_forks_repo_path": "resume/resume.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "79706f6849a299c808cfdb1c11887bd3106e8493",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "sudhanv09/MyFiles",
"max_issues_repo_path": "resume/resume.tex",
"max_line_length": 124,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "79706f6849a299c808cfdb1c11887bd3106e8493",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "sudhanv09/MyFiles",
"max_stars_repo_path": "resume/resume.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 880,
"size": 2630
} |
% Copyright 2019 by Till Tantau
%
% This file may be distributed and/or modified
%
% 1. under the LaTeX Project Public License and/or
% 2. under the GNU Free Documentation License.
%
% See the file doc/generic/pgf/licenses/LICENSE for more details.
\section{Animations}
\label{section-tikz-animations}
\begin{tikzlibrary}{animations}
This library must be loaded in order to use animations with \tikzname.
\end{tikzlibrary}
\subsection{Introduction}
An \emph{animation} changes the appearance of some part of a graphic over time.
The archetypical animation is, of course, a \emph{movement} of some part of a
picture, but a change of, say, the opacity of a path is also an animation.
\tikzname\ allows you to specify such animations using special keys and
notations.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
width=8cm,
animation list={0.7,1.4,2.1,2.8},
animation scale=.25,
]
\begin{tikzpicture}[
animate/orbit/.style 2 args = {
myself:shift = {
along = {
(0,0) circle [radius=#1]
} sloped in #2s/10,
repeats }} ]
\node :color = {0s = "orange",
2s = "red",
4s = "orange",
repeats}
{Sun};
\begin{scope}[animate={orbit={2.5cm}{365}}]
\node {Earth};
\node [animate={orbit={1cm}{28}}] {Moon};
\end{scope}
\useasboundingbox (-3.8,-3.8) (3.8,3.8);
\end{tikzpicture}
\end{codeexample}
Adding an animation to a \tikzname\ picture is done as follows:
%
\begin{enumerate}
\item \emph{Before} or \emph{in the options of} the to-be-animated object
you specify the object together with an \emph{attribute} that you wish
to animate. Attributes are things like the fill color or the line width
or the position of the object.
\item You specify \emph{when} this attribute should have \emph{which}
values using a so-called \emph{timeline}. This is just a curve that
specifies for each point in time which value the attribute should have.
\item You can additionally use further options to configure the animation,
for instance you can specify that the animation should repeat or that
it should only start when a certain object is clicked.
\end{enumerate}
As a simple example, let us move a circle within thirty seconds by three
centimeters to the left:
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \draw :xshift = {0s = "0cm", 30s = "-3cm", repeats} (0,0) circle (5mm);
\end{codeexample}
As can be seen, a special syntax is used in several places: Entries with a
colon such as |:xshift| specify an attribute, values are specified in quotation
marks. This syntax will be explained in more detail later on.
\subsubsection{Animations Change Attributes}
Before we plunge into the details of how animations are specified, it is
important to understand what \tikzname\ actually does when creating an
animation: It does \emph{not} (as all other animation packages do) precompute a
sequence of pictures that are later somehow displayed in rapid succession.
Neither does it insert an external video into the document. Rather, a
\tikzname\ animation is just an ``annotation'' in the output that a certain
attribute of a certain object should change over time in some specific way when
the object is displayed. It is the job of the document viewer application to
actually compute and display the animation. The big advantage of this approach
is that animations neither increase the output file sizes noticeably nor do
they really slow down \TeX: The hard and complicated calculations are done by
the viewer application. The disadvantage is, of course, that a document viewer
application must understand the annotations and actually compute and display
the animations. The \textsc{svg} format is a format for which this is possible,
the popular \textsc{pdf} format is not. For the \textsc{svg} format, there are
actually different possible ways of ``formulating'' the animations (using
\textsc{smil} or \textsc{css} or JavaScript) and they have different advantages
and disadvantages.
To make a long story short: \tikzname\ animations currently work only with
\textsc{svg} output (and use the \textsc{smil} ``flavor'' of describing
animations). In future, it may well happen that other ``flavor'' of describing
animations will be added, but it is very unlikely that \textsc{pdf} will ever
support animations in a useful way.
It is, however, possible to create ``snapshots'' of an animation and insert
these into \textsc{pdf} files (or any other kind of file including \textsc{svg}
files), see Section~\ref{section-anim-snap} for details. Snapshots are also
useful for creating ``printed versions'' of animations and all of the small
sequences of pictures in the manual that are used for showing what an animation
key does have been creating using snapshots.
\subsubsection{Limitations of the Animation System}
There are a certain limitations of the animation system that you should keep in
mind when considering how and when to use it:
%
\begin{enumerate}
\item As pointed out earlier, animations require a specific output format
(currently only \textsc{svg} is supported).
\item It is extremely difficult to animate ``lines between moving nodes''
correctly. Consider code like |\draw(a)--(b);| where |a| and |b| are
nodes. Now, when you animate the position of~|(a)|, the line connecting
|(a)| and |(b)| will, unfortunately, not ``move along'' automatically
(but it is easy to move the whole group of |(a)|, |(b)|, and the
connecting line as whole). You must ``cheat'' and introduce some
``virtual'' nodes, which leads to rather complex and bloated code.
\item Animations are taken into consideration for bounding box computations,
but only for shifts, not for rotations, scaling, or skewing and also
possibly not when multiple shifts are active at the same time for the
same object.
\end{enumerate}
\subsubsection{Concepts: (Graphic) Objects}
During an animation an attribute of a certain ``object'' changes over time. The
term ``object'' is deliberately a bit vague since there are numerous different
``things'' whose attributes can change. In detail, the following objects have
attributes that can be animated:
%
\begin{enumerate}
\item Nodes, which are created by the |\node| command (and, also,
internally by commands such as |\graph|). For nodes, different parts of
the node can be animated separately; for instance, you can animate the
color of the background path, but also the color of the text, and also
the color of the foreground path (though most nodes do not have a
foreground path) and also the color of different text parts (though
only few nodes have multiple text parts).
\item Graphic scopes, which are created by numerous command, including the
|{scope}| environment, the |\scopes| command, but also |\tikz| itself
creates a graphic scope and so does each node and even each path.
\item View boxes, which can only be created using the |views| library.
\item Paths, which you create using the |\path| command or commands like
|\draw| that call |\path| internally. However, the (usually background)
path of a node can also be animated. Note that ``animating the path''
really means that the path itself should change over time; in essence,
you can ``warp'' a path over time.
\end{enumerate}
In all of these cases, you must either specify the animation inside the
object's options using |animate| or use the |name| key to name the object and,
then, refer to it in an |animate|. For nodes you can, of course, use the
|(|\meta{node name}|)| syntax to name the node. Recall that you must
\emph{always} specify the animation \emph{before} the object is created; it is
not possible to animate an already created object.
There is a special syntax for choosing the object of an animation, see
Section~\ref{section-anim-syntax-obj}, but you can also use the |object| key to
choose them directly, see Section~\ref{section-anim-def-obj}.
\subsubsection{Concepts: Attributes}
In addition to the to-be-animated object, you must also choose an
\emph{attribute} that you wish to animate. Attributes are things like the color
of an object, the position, but also things like the line width. The syntax for
choosing attributes and the list of attributes will be explained in detail
later on.
Most attributes correspond directly to attributes that are directly supported
by the backend driver (\textsc{svg}), but this is not always the case. For
instance, for a node, \tikzname\ differentiates between the fill color, the
draw (stroke) color, and the text color, while \textsc{svg} treats the text
color are a special case of the fill color. \tikzname\ will do some internal
mappings to ensure that you can animate the ``\tikzname\ attributes'' even when
they are not directly supported.
The same syntax that is used for specifying object is also used to specify
attributes, see Section~\ref{section-anim-syntax-obj}, but you could also set
them directly using the |attribute| key see
Section~\ref{section-anim-def-attr}.
\subsubsection{Concepts: Timelines}
Once an object and an attribute have been chosen, a \emph{timeline} needs to be
established. This is, essentially, a curve that specifies for each ``moment in
time'' which value the attribute should have.
A timeline has a \emph{start} and an \emph{end}, but the start need not be the
``moment zero'' (we will come to that) and may even be negative, while the end
may be at infinity. You specify the timeline by specifying for certain points
in time what the value is at that moment; for all other moments the value is
then interpolated. For instance, if you specify that the attribute |:xshift|
(the ``horizontal position'' of the object) is 0\,mm at time 5\,s and 10\,mm at
time 10\,s, then at 7.5\,s it will be 5\,mm and at 9\,s it will be 8\,mm
(assuming a linear interpolation). The resulting optical effect will be that
the object \emph{smoothly moves} by one centimeter to the right over a period
of five seconds, starting five seconds after ``moment zero''.
Now, what is the ``moment zero'', the ``beginning of an animation''? If nothing
else is specified, an animation starts immediately when the graphic is shown
and this is the moment zero relative to which the timeline is measured.
However, it is also possible to change this. In particular, you can specify
that the moment zero is when a particular \emph{event} occurs such as the user
clicking on another object or another animation ending or starting.
The interpolation of values is not always a straightforward affair. Firstly,
for certain kinds of values is not clear how an interpolation should be
computed. How does one interpolate between two paths? Between the colors red
and green? Between the values |"true"| and |"false"|? In these cases, one must
define carefully what the interpolation should be. Secondly, you may wish to
use a non-linear interpolation, which is useful for ``easing'' motions: The
visual effect of the movement specified above is that the object sits still
from moment $0$ for five seconds, then there is an ``infinite acceleration''
causing the object to suddenly move at the speed of 2\,mm per second, then
there is no acceleration at all for five seconds, causing the object to move
for one centimeter, followed by an ``infinite negative acceleration'' that
makes the object come to a full stop. As a viewer you experience these infinite
accelerations as ``unrealistic'', spoiling the effect of watching a (virtual)
physical process. Non-linear interpolations allow you to avoid this effect.
Just as for specifying objects and attributes, there is also a special syntax
for specifying times and values.
\subsection{Creating an Animation}
\subsubsection{The Animate Key}
In order to animate a picture, you create timelines for all objects and
attributes that change during the animation. The key |animate| is used for
creating these timelines.
\begin{key}{/tikz/animate=\meta{animation specification}}
You must place all specifications of animations inside uses of |animate|.
You can, and usually should, place the specification of all timelines of a
single picture inside a single use of this key since it will reset the time
and the fork time (explained in Section~\ref{section-anim-def-times}). You
can, however, use this key several times, in principle. Note that if you
animate the same attribute of the same object in two different uses of
|animate|, two separate timelines will result (and complicated rules are
used to determine which one ``wins'' in case they specify conflicting
values for the attribute at different times).
The key can be used at all places where a \tikzname\ key is used; typically
you will use it with a |{scope}| environment, inside the options of a node,
or directly with the |\tikz| command:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list = {0.5,1,1.5,2},
]
\tikz \node [fill, text = white, animate = {
myself:fill = {0s = "red", 2s = "blue", begin on = click }}] {Click me};
\end{codeexample}
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list = {0.5,1,1.5,2},
]
\tikz [animate = {a node:fill = {0s = "red", 2s = "blue",
begin on = click}}]
\node (a node) [fill, text = white] {Click me};
\end{codeexample}
The details of what, exactly, happens in the \meta{animation specification}
will be described in the rest of this section. However, basically, an
\meta{animation specification} is just a sequence of normal \tikzname\
key--value pairs that get executed with the path prefix |/tikz/animate| and
with some special syntax handlers installed. In particular, you can define
styles for this key path and use them. For instance, we can define a
|shake| animation like this:
%
\begin{codeexample}[width=4cm,preamble={\usetikzlibrary{animations}}]
\tikzset{
animate/shake/.style = {myself:xshift = { begin on=click,
0s = "0mm", 50ms = "#1", 150ms = "-#1", 250ms = "#1", 300ms = "0mm" }}}
\tikz \node [fill = blue!20, draw=blue, very thick, circle,
animate = {shake = 1mm}] {Shake};
\tikz \node [fill = blue!20, draw=blue, very thick, circle,
animate = {shake = 2mm}] {SHAKE};
\end{codeexample}
Note that, as stressed earlier, you can only use the |animate| key to
specify animations for objects that do not yet exist. The node and object
names mentioned in a specification always refer to ``upcoming'' objects;
already existing objects of the same name are not influenced.
You can use the |name| key inside |animate| to ``name'' the animation. Once
named, you can later reference the animation in other animations; for
instance, you can say that another animation should start when the present
animation has ended.
\end{key}
\subsubsection{Timeline Entries}
The ``job'' of the options passed to the |animate| key is to specify the
timelines of the animation of (a part of) a picture. For each object and each
attribute there may or may not be a timeline and, if present, the timeline
consist of sequences of pairs of times and values. Thus, the most basic entity
of an animation specification is a tuple consisting of five parts, which are
selected by five different keys:
%
\begin{itemize}
\item |object| for selecting the object,
\item |attribute| for selecting the attribute,
\item |id| for selecting the timeline id (explained in
Section~\ref{section-anim-def-id}),
\item |time| for selecting a time, and
\item |value| for selecting a value.
\end{itemize}
%
When all of these parts have been set up (using the above keys, which will be
explained in more detail in a moment), you can use the following key to create
an entry:
\begin{key}{/tikz/animate/entry}
Each time this key is used in the options of |animate|, \tikzname\ checks
whether the five keys |object|, |attribute|, |id|, |time|, and |value| are
set. If one of them is not set, nothing happens. (The |id| key is set to
the value |default| by default, all other keys must be set explicitly.)
If all of these keys are set, a \emph{time--value} pair is created and
added to the timeline of attribute of the object. Additionally, all options
starting with |/tikz/animate/options/|, which also influence the timeline
like |begin on|, are also added to the timeline of the object--attribute
pair.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz [animate = {
object = node, attribute = fill, time = 0s, value = red, entry,
object = node, attribute = fill, time = 2s, value = blue, entry,
object = node, attribute = fill, begin on = click, entry}]
\node (node) [fill, text=white] { Click me };
\end{codeexample}
%
In the above example, it would not have been necessary the specify the
object and the attribute in each line, they retain their values unless they
are overwritten. Thus, we could also have written:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz [animate = {
object = node, attribute = fill, time = 0s, value = red, entry,
time = 2s, value = blue, entry,
begin on = click, entry}]
\node (node) [fill, text=white] { Click me };
\end{codeexample}
%
Note, however, that in both examples we actually add the time--value pair
$(2\mathrm{s}, \mathrm{blue})$ twice since the |time| and |value| keys also
retain their settings and, thus, for the third |entry| they have the same
values as before and a new pair is added. While this superfluous pair is
not a problem in the example (it has no visual effect), we will see later
on how such pairs can be avoided by using the |scope| key.
A sequence of calls of |entry| can freely switch between objects and
attributes (that is, between timelines), but the times for any given
timeline must be given in non-decreasing order:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz [animate = {
object = node, attribute = fill, time = 0s, value = red, entry,
object = node2, attribute = draw, entry,
object = node, attribute = fill, time = 2s, value = blue, entry,
object = node2, attribute = draw, entry,
object = node, attribute = fill, begin on = click, entry,
object = node2, attribute = draw, begin on = click, entry}] {
\node (node) [fill, text=white] { Node 1 };
\node (node2) [draw, ultra thick] at (0,-1) { Node 2 };
}
\end{codeexample}
%
In the above example, we could not have exchanged the first two lines of
the |animate| options with the third and fourth line since the values for
time |0s| must come before the values for time |2s|.
\end{key}
In the following, we have a closer look at the five keys the influence the
|entry| key and then have a look at ways of grouping keys more easily.
\subsubsection{Specifying Objects}
\label{section-anim-def-obj}
You use the |object| key to select the object(s) to which the next use of
|entry| applies. There is also a special syntax for this, which is explained in
Section~\ref{section-anim-syntax-obj}.
\begin{key}{/tikz/animate/object=\meta{list of objects}}
The \meta{list of objects} is a comma-separated list of strings of the form
\meta{object}\opt{|.|\meta{type}}. All of the objects in the list are
selected as to-be-animate object for the next use of the |entry| key. The
objects referred to by \meta{object} will be the \emph{next} objects with
the |name| key set to \meta{object}. You can apply the |name| key to nodes
(where you can also use the special parentheses-syntax and put the name in
parentheses, it has the same effect), but also to scopes and paths. (The
|name path| key is not the same as |name|; it is an older key from the
intersections package and not related.)
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz [animate = { object = b, :fill = {0s = "red", 2s = "blue",
begin on = click }}] {
\node (a) [fill, text = white, minimum width=1.5cm] at (0,1cm) {a};
\node (b) [fill, text = white, minimum width=1.5cm] at (0,5mm) {b};
\node (c) [fill, text = white, minimum width=1.5cm] at (0,0mm) {c}; }
\end{codeexample}
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz [animate = { object = b, :fill = {0s = "red", 2s = "blue",
begin on = click },
object = c, :fill = {0s = "green", 2s = "blue",
begin on = click } }] {
\scoped [name = a, yshift=1cm] \fill (0,0) rectangle (1.5cm,2mm);
\scoped [name = b, yshift=5mm] \fill (0,0) rectangle (1.5cm,2mm);
\scoped [name = c, yshift=0mm] \fill (0,0) rectangle (1.5cm,2mm); }
\end{codeexample}
If the \meta{object} name is never used later in the file, no animation is
created.
The \meta{object} may also be the special text |myself|. In this case, the
referenced object is the scope or object to which the |animate| key is
given. If an object is named |myself| (as in |\node (myself) ...|), you
cannot reference this node using the |object| key, |myself| \emph{always}
refers to the object where the |animate| key is given (of course, you can
animate the node named |myself| by placing the |animate| key inside the
options of this node; you only cannot ``remotely'' add an animation to it).
The \meta{object} may be followed by a dot and a \emph{type}. This is need
in rare cases where you want to animate only a special ``part'' of an
object that is not accessible in other ways. Normally, \tikzname\ takes
care of choosing these types automatically, you only need to set these ``if
you know what you are doing''.
\end{key}
\subsubsection{Specifying Attributes}
\label{section-anim-def-attr}
\begin{key}{/tikz/animate/attribute=\meta{list of attributes}}
The list of attributes must be a comma-separated list of attribute names.
The timelines specified later will apply to all of these attributes (and to
all objects previously selected using |object|). Possible attributes
include colors, positions, line width, but even the paths themselves. The
exact list of possible attributes is documented in
Section~\ref{section-anim-attrs}.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz [animate = {attribute = fill, n: = { 0s = "red", 2s = "blue",
begin on = click } }]
\node (n) [fill, text = white] {The node};
\end{codeexample}
\end{key}
\subsubsection{Specifying IDs}
\label{section-anim-def-id}
\begin{key}{/tikz/animate/id=\meta{id} (initially default)}
Timelines are use to defined how the values of an attribute of an object
change over time. In many cases, you will have at most one timeline for
each object--attribute pair, but, sometimes, you may wish to have more than
one timeline for the same object and the same attribute. For instance, you
might have a timeline that specifies a changing |shift| of a node in some
direction and, at the same time, another timeline that specifies an
additional |shift| in some other direction(s). The problem is that there is
only one |shift| attribute and it would be difficult to compute the joint
effect of the two timelines.
For this purpose, timelines are actually identified not only by the
object--attribute pair but, in reality, by the triple consisting of the
object, the attribute, and the value of this key. We can now specify two
separate timelines:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz [animate = {
id = 1, n:shift = { 0s = "{(0,0)}", 2s = "{(0,5mm)}", begin on = click },
id = 2, n:shift = { 0s = "{(0,0)}", 2s = "{(5mm,0)}", begin on = click }
}]
\node (n) [fill = blue!20, draw=blue, very thick] {The node};
\end{codeexample}
The default value of |id| is |default|.
\end{key}
Because of the possibility of creating multiple timelines for the same
attribute, it may happen that there is more than one timeline active that is
``trying to modify'' a given attribute. In this case, the following rules are
used to determine, which timeline ``wins'':
%
\begin{enumerate}
\item If no animation is active at the current time (all animation either
have not yet started or they have already ended), then the |base| value
given in the animation encountered last in the code is used. (If there
are no base values, the attribute is taken from the surrounding scope
and the animations have ``no effect''.)
\item If there are several active animations, the one that has started last
is used and its value is used.
\item If there are several active animations that have started at the same
time, the one that comes last in the code is used.
\end{enumerate}
Note that these rules do not apply to transformations of the canvas since these
are always additive (or, phrased differently, they are always all active and
the effects accumulate).
\subsubsection{Specifying Times}
\label{section-anim-def-times}
\begin{key}{/tikz/animate/time=\meta{time}\opt{|later|}}
Sets the time for the next time--value pair in a call of |entry| to
\meta{time} plus the current fork time. The text |later| is optional. Both
``fork times'' and the optional |later| will be explained in a moment.
\medskip\textbf{Time Parsing.}
The \meta{time} is parsed using the command |\pgfparsetime|, which is
essentially the same as the usual math parser of \tikzname, and the result
is interpreted as a time in seconds. Thus, a \meta{time} of |2+3| means ``5
seconds'' and a \meta{time} of |2*(2.1)| means ``4.2 seconds''. (You could
even specify silly times like |1in|, which results in the time ``72.27
seconds''. Please do not do that.) The ``essentially'' refers to the fact
that some extras are installed when the time parser is running:
%
\begin{itemize}
\item The postfix operator |s| is added, which has no effect. Thus,
when you write |5s| you get the same results as |5|, which is
exactly 5 seconds as desired.
\item The postfix operator |ms| is added, which divides a number by
1000, so |2ms| equals 0.002s.
\item The postfix operator |min| is added, which multiplies a number by
60.
\item The postfix operator |h| is added, which multiplies a number by
3600.
\item The infix operator |:| is redefined, so that it multiplies its
first argument by 60 and adds the second. This implies that |1:20|
equals 80s and |01:00:00| equals 3600s.
\item The parsing of octal numbers is switched off to allow things like
|01:08| for 68s.
\end{itemize}
Note that you cannot use the colon syntax for times in things like
|01:20 = "0"| would (falsely) be interpreted as: ``For the object named |01|
and its attribute named |20|, do something.'' You can, however, use |01:20|
in arguments to the |time| key, meaning that you would have to write
instead: |time = 1:20, "0"|, possibly surround by a |scope|.
\medskip\textbf{Relative Times.}
You can suffix a |time| key with ``|later|''. In this case, the \meta{time}
is interpreted as an offset to the time in the previous use of the time
key:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :fill = { begin on = click,
0s = "white",
500ms later = "red",
500ms later = "green", % same as 1s = "-5mm"
500ms later = "blue"} % same as 1.5s = "-2.5mm"
[fill=blue!20, draw=blue, very thick, circle] {Click me};
\end{codeexample}
In reality, the offset is not taken to just any previous use of the |time|
key, but to the most recent use of this key or of the |resume| key in the
current local \TeX\ scope. Here is an example:
%
\begin{codeexample}[code only]
time = 2s,
time = 1s later, % same as time = 3s
time = 500ms later, % same as time = 3.5s
time = 4s,
time = 1s later, % same as time = 5s
scope = { % opens a local scope
time = 1s later, % same as time = 6s
time = 10s
time = 1s later % same as time = 11s
}, % closes the scope, most recent time is 5s once more
time = 2s later % same as time = 7s
\end{codeexample}
\medskip\textbf{Fork Times.}
The time meant by the value \meta{time} passed to the |time| key is not
used directly. Rather, \tikzname\ adds the current \emph{fork time} to it,
which is |0s| by default. You can change the fork time using the following
key:
%
\begin{key}{/tikz/animate/fork=\meta{t} (default 0s later)}
Sets the fork time for the local scope to \meta{t} and sets the current
time to |0s|. In this scope, when you use ``absolute'' times like |0s|
or |2s|, you actually refer to later times that have started as
\meta{t}.
One application of forks is in the definition of keys that add a
certain part to a longer animation. Consider for instance the
definition of a |highlight| key:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={1.05,1.1,1.15,1.2,2.05,2.1,2.15,2.2},
]
\tikz [animate/highlight/.style = {
scope = { fork = #1,
:fill = { 0s = "black", 0.1s = "white", 0.2s = "black"} }
}]
\node [animate = { myself: = {
:fill = { 0s = "black", begin on = click },
highlight = 1s, highlight = 2s } },
fill = blue, text=white, very thick, circle] { Click me };
\end{codeexample}
%
In the above example, we could also have written |0.1s later| instead
of |0.2s| and, indeed, the whole style could have been defined using
only times with |later|, eliminating the need for the |fork| key.
However, using forks you can specify absolute times for things
happening in a conceptual ``subprocess'' and also relative times. The
name |fork| for the key is also borrowed from operating system theory,
where a ``fork'' is the spawning of an independent process.
\end{key}
\medskip\textbf{Remembering and Resuming Times.}
When you have a complicated animation with a long timeline, you will
sometimes wish to start some animation when some other animation has
reached a certain moment; but this moment is only reached through heavy use
of |later| times and/or forks. In such situations, the following keys are
useful:
%
\begin{key}{/tikz/animate/remember=\meta{macroname}}
This key stores the current time (the time of the last use of the
|time| key) globally in the macro \meta{macroname}. This time will
include the offset of the fork time:
%
\begin{codeexample}[code only]
time = 2s,
fork = 2s later, % fork time is now 4s
time = 1s, % local time is 1s, absolute time is 5s (1s + fork time)
time = 1s later, % local time is 2s, absolute time is 6s (2s + fork time)
remember = \mytime % \mytime is now 6s
\end{codeexample}
\end{key}
%
\begin{key}{/tikz/animate/resume=\meta{absolute time}}
The \meta{absolute time} is evaluated using |\pgfparsetime| and, then,
the current time is set to the resulting time minus the fork time. When
the \meta{absolute time} is a macro previously set using |remember|,
the net effect of this is that we return to the exact ``moment'' in the
global time line when |remember| was used.
%
\begin{codeexample}[code only]
fork = 4s,
time = 1s,
remember = \mytime % \mytime is now 5s
fork = 2s, % fork time is now 2s, local time is 0s
resume = \mytime % fork time is still 2s, local time is 3s
\end{codeexample}
%
Using resume you can easily implement a ``join'' operation for forked
times. You simply remember the times at the ends of the forks and then
resume the maximum time of these remembered times:
%
\begin{codeexample}[code only]
scope = {
fork,
time = 1s later,
...
remember = \forka
},
scope = {
fork,
time = 5s later,
...
remember = \forkb
},
scope = {
fork,
time = 2s later,
...
remember = \forkc
},
resume = {max(\forka,\forkb,\forkc)} % "join" the three forks
\end{codeexample}
\end{key}
\end{key}
\subsubsection{Values}
\label{section-anim-def-values}
\begin{key}{/tikz/animate/value=\meta{value}}
This key sets the value of the next time--value pair created by |entry| to
\meta{value}. The syntax of the \meta{value} is not fixed, it depends on
the type of the attribute. For instance, for an attribute like |opacity|
the \meta{value} must be an expression that can be evaluated to a number
between 0 and 1; for the attribute |color| the \meta{value} must, instead,
be a color; and so on. Take care that when a value contains a comma, you
must surround it by braces as in |"{(1,1)}"|.
The allowed texts for the \meta{value} is always the same as the one you
would pass to the \tikzname\ option of the same name. For instance, since
the \tikzname\ option |shift| expects a coordinate, you use coordinates as
\meta{value} with the usual \tikzname\ syntax (including all sorts of
extensions, the animation system calls the standard \tikzname\ parsing
routines). The same is true of dimensions, scalar values, colors, and so
on.
In addition to the values normally use for setting the attribute, you can
also (sometimes) use the special text |current value| as \meta{value}. This
means that the value of the point in the timeline should be whatever the
value the attribute has at the beginning of the timeline. For instance,
when you write
%
\begin{codeexample}[code only]
animate = { obj:color = { 0s = "current value", 2s = "white" } }
\end{codeexample}
%
the color of |obj| will change from whatever color it currently has to
white in two seconds. This is especially useful when several animations are
triggered by user events and the current color of |obj| cannot be
determined beforehand.
There are several limitations on the use of the text |current value|, which
had to be imposed partly because of the limited support of this feature in
\textsc{svg}:
%
\begin{itemize}
\item You can use |current value| only with the first time in a
timeline.
\item You can only have two times in a timeline that starts with
|current value|.
\item You cannot use |current value| for timelines of which you wish to
take a snapshot.
\end{itemize}
\end{key}
\subsubsection{Scopes}
\label{section-anim-scopes}
When you specify multiple timelines at the same time, it is often useful and
sometimes even necessary to have keys be set only locally. The following key
makes this easy:
\begin{key}{/tikz/animate/scope=\meta{options}}
Executed the \meta{options} inside a \TeX\ scope. In particular, all
settings made inside the scope have no effect after the end of the |scope|.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node [animate = { myself: = { begin on = click,
scope = { attribute = fill, repeats = 3, 0s = "red", 2s = "red!50" },
scope = { attribute = draw, 0s = "red", 2s = "red!50" }
}},
fill=blue!20, draw=blue, very thick, circle] {Click me};
\end{codeexample}
Without the use of the |scope| key, the |repeats| key would also affect the
draw attribute.
\end{key}
While the |scope| key is useful for structuring timeline code, it also keeps
the current time local to the scope, that is, if you use something like
|1s later| after the scope, this will refer to one second after the last use of
|time| \emph{before} the scope. The times set inside the |scope| do not matter.
While this is desirable effect for forks, you may also sometimes wish to
synchronize the local time after the scope with the last time reached in the
scope. The following key makes this easy:
\begin{key}{/tikz/animate/sync=\meta{options}}
A shorthand for |scope={| \meta{options} |, remember=\temp},resume=\temp|
where |\temp| is actually an internal name. The effect is that after a
|sync| the local time just continues as if the scope where not present --
but regarding everything else the effects are local to the |sync| scope.
\end{key}
\subsection{Syntactic Simplifications}
\label{section-anim-syntax-animate}
In the previous subsection we saw how timelines can be created by specifying
the individual entries of the timelines sequentially. However, most of the time
you will wish to use a simpler syntax that makes it easier to specify
animations. This syntax is only available inside the |animate| key (it is
switched on at the beginning) and consists of three ``parts'': The colon
syntax, the time syntax, and the quote syntax.
\subsubsection{The Colon Syntax I: Specifying Objects and Attributes}
\label{section-anim-syntax-obj}
Inside the \meta{animation specification} passed to the |animate| key, you can
specify an object and an attribute of this object using the following syntax,
whose use is detected by the presence of a colon inside a key:
%
\begin{quote}
\normalfont
\opt{\meta{object name(s)}}|:|\opt{\meta{attribute(s)}}
|={|\meta{options}|}|
or
\opt{\meta{object
name(s)}}|:|\opt{\meta{attribute(s)}}|_|\opt{\meta{id}}
|={|\meta{options}|}|
\end{quote}
%
In the place to the left of an equal sign, where you would normally use a key,
you can instead place an object name and an attribute separated by a colon.
Additionally, the attribute may be followed by an underscore and an \meta{id},
which identifies the timeline (see Section~\ref{section-anim-def-id}).
Each of these values may be missing, in which case it is not changed from its
previous value.
The effect of the above code is the same as:
%
\begin{quote}
\normalfont
|sync = { object = |\meta{objects}|, attribute = |\meta{attribute}|, id = |\meta{id}|, |\meta{options}|, entry }|
\end{quote}
%
although when the object, the attribute, or the id is left empty in the colon
syntax, the corresponding setting will be missing in the above call of |sync|.
Note that because of the |sync| the last time used inside the \meta{options}
will be available afterwards as the last time. Also note that an |entry| is
added at the end, so any settings of keys like |begin| or |repeats| inside the
\meta{options} will get added to the timeline.
Let us now have a look at some examples. First, we set the \meta{object name}
to |mynode| and |othernode| and the \meta{attribute} to |opacity| and to
|color|:
%
\begin{codeexample}[code only]
animate = {
mynode:opacity = { 0s = "1", 5s = "0" },
mynode:color = { 0s = "red", 5s = "blue" },
othernode:opacity = { 0s = "1", 5s = "0" },
}
\end{codeexample}
Next, we do the same, but ``in two steps'': First, we set the object to
|mynode|, but leave the attribute open and, then, set the attribute, but leave
the object:
%
\begin{codeexample}[code only]
animate = {
mynode: = {
:opacity = { 0s = "1", 5s = "0" },
:color = { 0s = "red", 5s = "blue" }
},
othernode:opacity = { 0s = "1", 5s = "0" },
}
\end{codeexample}
%
Note how both in |mynode:| and in |:opacity| and |:color| you must provide the
colon. Its presence signals that an object--attribute pair is being specified;
only now either the object or the attribute is missing.
We can also do it the other way round:
%
\begin{codeexample}[code only]
animate = {
:opacity = {
mynode: = { 0s = "1", 5s = "0" },
othernode: = { 0s = "1", 5s = "0" }
},
mynode:color = { 0s = "red", 5s = "blue" }
}
\end{codeexample}
%
Finally, if several objects should get the exact same values, we can also group
them:
%
\begin{codeexample}[code only]
animate = {
{mynode,othernode}:opacity = { 0s = "1", 5s = "0" },
mynode:color = { 0s = "red", 5s = "blue" }
}
\end{codeexample}
As mentioned earlier, all references to objects will be interpreted to future
objects, never to objects already created. Furthermore, also as mentioned
earlier, \tikzname\ allows you to specify |myself| as \meta{object}, which is
interpreted as the scope or node where the |animate| is given (you cannot
animate a node or scope named |myself|, this special name always refers to the
current node). In order to have all attributes refer to the current object, you
write:
%
\begin{codeexample}[code only]
\begin{scope} [animate = {
myself: = { % Animate the attribute of the scope
:opacity = { ... },
:xshift = { ... }
}
}]
...
\end{scope}
\end{codeexample}
The list of permissible attributes is given in
Section~\ref{section-anim-attrs}.
\subsubsection{The Colon Syntax II: Animating Myself}
A frequent use of the |animate| key is for animating attributes of the current
object |myself|. In these cases, it is a bit length to write
%
\begin{codeexample}[code only]
[animate = { myself: = { :some attribute = {...} } } ]
\end{codeexample}
%
\noindent in the options of a node or a scope. For this reason, \tikzname\
allows you to use a special syntax with nodes and scopes:
%
\begin{enumerate}
\item In a \meta{node specification}, which is everything following a
|node| command up to the content of the node (which is surrounded by
curly braces), you can write
%
\begin{quote}
|:some attribute = {|\meta{options}|}|
\end{quote}
%
and this will have the same effect as if you had written
%
\begin{quote}
|[animate = { myself: = { :some attribute = {|\meta{options}|}}}]|
\end{quote}
%
Note that you can use this syntax repeatedly, but each use creates a
new use of the |animate| key, resulting in a new timeline. In order to
create complex timelines for several objects, use the |animate| key.
\item For the commands |\tikz|, |\scoped| and the environments
|{tikzpicture}| and |{scope}|, when they are followed immediately by
%
\begin{quote}
|:some attribute = {|\meta{options}|}|
\end{quote}
%
then
%
\begin{quote}
|animate = { myself: = { :some attribute = {|\meta{options}|}}}|
\end{quote}
%
is added to the options of the command or scope. Again, you can use the
syntax repeatedly. Note that when an opening square bracket is
encountered, this special parsing stops.
\end{enumerate}
Let us have a look at some examples. First, we use the syntax to set the fill
opacity of a node:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node
:fill opacity = { 0s="1", 2s="0", begin on=click }
[fill = blue!20, draw = blue, ultra thick, circle] {Here!};
\end{codeexample}
%
Next, we additionally rotate the node:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node
:fill opacity = { 0s="1", 2s="0", begin on=click }
:rotate = { 0s="0", 2s="90", begin on=click }
[fill = blue!20, draw = blue, ultra thick, circle] {Here!};
\end{codeexample}
%
Note that there is no comma between consecutive uses of the colon syntax in
this case. We could have exchanged the order of the options and the uses of the
colon syntax:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node
:fill opacity = { 0s="1", 2s="0", begin on=click }
[fill = blue!20, draw = blue, ultra thick, circle]
:rotate = { 0s="0", 2s="90", begin on=click } {Here!};
\end{codeexample}
We can also use the special syntax with the |\tikz| command itself:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz :fill opacity = { 0s="1", 2s="0", begin on=click }
:rotate = { 0s="0", 2s="90", begin on=click }
[ultra thick]
\node [fill = blue!20, draw = blue, circle] {Here!};
\end{codeexample}
Note that we could \emph{not} have moved the |[ultra thick]| options before
|:rotate| since the options in square brackets end the special parsing.
\subsubsection{The Time Syntax: Specifying Times}
For each object--attribute pair you must specify the \emph{timeline} of the
attribute. This is a curve that specifies for each ``moment in time'' which
value the attribute should have. In the simplest case, you specify such a
time--value pair as follows:
%
\begin{quote}
\normalfont
\meta{time} |="|\meta{value}|"|
\end{quote}
When you specify time--value pairs, you must specify the times in chronological
order (so earlier times come first), but you may specify the same time several
times (this is useful in situations where you have a ``jump'' from one value to
another at a certain moment in time: you first specify the value ``from which
the attribute jumps'' and then you specify the value ``to which the attribute
jumps'' for the same moment).
The above syntax is just a special case of a more general situation. Let us
start with the times. The general syntax for specifying times is as follows:
%
\begin{quote}
\normalfont
\meta{time} \opt{|=| \meta{options}}
\end{quote}
Here, \meta{time} is a text that ``looks like a time'', which means that:
%
\begin{enumerate}
\item It is not a key and does not contain a colon and does not start with
a quotation mark.
\item It starts with a digit, a plus or minus sing, a dot, or a
parenthesis.
\end{enumerate}
If these two things are the case, the above code is transformed to the
following call:
%
\begin{quote}
\normalfont
|sync = {time = |\meta{time}|, |\meta{options}|, entry}|
\end{quote}
\subsubsection{The Quote Syntax: Specifying Values}
We saw already in several examples that values are put in quotation marks
(similar to the way this is done in \textsc{xml}). This quote syntax is as
follows:
%
\begin{quote}
\normalfont
|"|\meta{value}|"| \opt{|base|} \opt{|=| \meta{options}}
\end{quote}
This syntax is triggered whenever a key starts with a quotation mark%
\footnote{Of catcode 12 for those knowledgeable of such things.} (and note that
when the \meta{value} contains a comma, you have to surround it by curly braces
\emph{inside} the quotation marks as in |"{(1,1)}"|). Then, the following code
is executed:
%
\begin{quote}
\normalfont
|sync = {value = |\meta{value}|, |\meta{options}|, entry}|
\end{quote}
This means that when you write |1s = "red"|, what actually happens is that
\tikzname\ executes the following:
%
\begin{codeexample}[code only]
sync = { time = 1s, sync = { value = red, entry }, entry }
\end{codeexample}
%
Note that the second entry has no effect since no value is specified and the
|entry| key only ``takes action'' when both a time and a value have been
specified. Thus, only the innermost |entry| does, indeed, create a time--value
pair as desired.
In addition to the above, if you have added |base| after the closing quote, the
following gets executed before the above |sync|:
%
\begin{quote}
\normalfont
|base = {value = |\meta{value}|}|
\end{quote}
This makes it easy to specify base values for timelines.
Interestingly, instead of |1s="red"| you can also write |"red"=1s|. Let us now
have a look at situations where this can be useful.
\subsubsection{Timesheets}
Using the |sync| key or using the three different syntactic constructs
introduced earlier (the color syntax, the time syntax, the value syntax), you
can organize the specification of an animation in different ways. Basically,
the two most useful ways are the following:
%
\begin{enumerate}
\item You first select an object and an attribute for which you wish to
establish a timeline and then provide the time--value pairs in a
sequence:
%
\begin{codeexample}[code only]
animate = {
obj:color = {
0s = "red",
2s = "blue",
1s later = "green",
1s later = "green!50!black",
10s = "black"
}
}
\end{codeexample}
%
When you specify timelines for several attributes of the same object,
you can group these together:
%
\begin{codeexample}[code only]
animate = {
obj: = {
:color = { 0s = "red", 2s = "green" },
:opacity = { 0s = "1", 2s = "0" }
}
}
\end{codeexample}
%
In this way of specifying animations the ``object comes first''.
\item Alternatively, you can also group the animation by time and, for each
``moment'' (known as \emph{keyframes}) you specify which values the
attributes of the object(s) have:
%
\begin{codeexample}[code only]
animate = {
0s = {
obj:color = "red",
obj:opacity = "1"
},
2s = {
obj:color = "green",
obj:opacity = "0"
}
}
\end{codeexample}
%
Naturally, in this case it would have been better to ``move the object
outside'':
%
\begin{codeexample}[code only]
animate = {
obj: = {
0s = {
:color = "red",
:opacity = "1"
},
2s = {
:color = "green",
:opacity = "0"
}
}
}
\end{codeexample}
%
When there are several objects involved, we can mix all of these
approaches:
%
\begin{codeexample}[code only]
animate = {
0s = {
obj: = {
:color = "red",
:opacity = "1"
},
main node: = {
:color = "black"
}
},
2s = {
obj: = {
:color = "green",
:opacity = "0"
},
main node: = {
:color = "white"
}
}
}
\end{codeexample}
%
\end{enumerate}
\subsection{The Attributes That Can Be Animated}
\label{section-anim-attrs}
The following \meta{attributes} are permissible (actually, the attribute names
do not include a colon, but since they will almost always be used with the
colon syntax, it makes it easier to identify them):
%
\begin{itemize}
\itemsep0pt
\item |:dash phase|
\item |:dash pattern|
\item |:dash|
\item |:draw opacity|
\item |:draw|
\item |:fill opacity|
\item |:fill|
\item |:line width|
\item |:opacity|
\item |:position|
\item |:path|
\item |:rotate|
\item |:scale|
\item |:stage|
\item |:text opacity|
\item |:text|
\item |:translate|
\item |:view|
\item |:visible|
\item |:xscale|
\item |:xshift|
\item |:xskew|
\item |:xslant|
\item |:yscale|
\item |:yshift|
\item |:yskew|
\item |:yslant|
\end{itemize}
These attributes are detailed in the following sections, but here is a quick
overview of those that do not have a \tikzname\ key of the same name (and which
thus do not just animate the attribute set using this key):
%
\begin{itemize}
\item |:shift| allows you to add an animated shifting of the canvas, just
like \tikzname's |shift| key. However, in conjunction with the |along|
key, you can also specify the shifting along a path rather than via a
timeline of coordinates.
\item |:position| works similar to |:shift|, only the coordinates are not
relative movements (no ``shifts''), but refer to ``absolute positions''
in the picture.
\item |:path| allows you to animate a path (it will morph). The ``values''
are now paths themselves.
\item |:view| allows you to animate the view box of a view.
\item |:visible| decides whether an object is visible at all.
\item |:stage| is identical to |:visible|, but when the object is not
animated, it will be hidden by default.
\end{itemize}
\subsubsection{Animating Color, Opacity, and Visibility}
\label{section-animation-painting}
You can animate the color of the target object of an animation using the
attributes |fill|, |draw|, and |text|. When the target of a color animation is
a scope, you animate the color ``used in this scope'' for filling or stroking.
However, when an object inside the scope has its color set explicitly, this
color overrules the color of the scope.
\begin{tikzanimateattribute}{fill, draw}
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz :fill = {0s = "red", 2s = "blue", begin on = click}
[text = white, fill = orange ] {
\node [fill] at (0mm,0) {A};
\node [fill] at (5mm,0) {B};
\node [fill = green!50!black ] at (1cm,0) {C};
}
\end{codeexample}
\end{tikzanimateattribute}
\begin{tikzanimateattribute}{text}
The |text| attribute only applies to nodes and you need to directly animate
the |text| attribute of each node individually.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz [my anim/.style={ animate = {
myself:text = {0s = "red", 2s = "blue", begin on = click}}},
text = white, fill = orange ] {
\node [fill, my anim] at (0,0) {A};
\node [fill, my anim] at (1,0) {B};
}
\end{codeexample}
%
Unlike the |fill| and |draw| colors, you cannot animate the |text| color
for scopes:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz [animate = {myself:text = {0s = "red", 2s = "blue",
begin on = click}},
text = white, fill = orange ] {
\node [fill] at (0,0) {A};
\node [fill] at (1,0) {B};
}
\end{codeexample}
%
\end{tikzanimateattribute}
\begin{tikzanimateattribute}{color}
The |color| attribute is not really an attribute. Rather, it is a shorthand
for |{draw,fill,text}|. This means that |color| does not start a separate
timeline, but continues the |draw| timeline, the |fill| timeline, and the
|text| timeline.
\end{tikzanimateattribute}
\begin{tikzanimateattribute}{opacity, fill opacity, stroke opacity}
Similarly to the color, you can also set the opacity used for filling and
for drawing using the attributes |fill opacity| and |draw opacity|, which
are exactly the same as the usual \tikzname\ keys of the same names.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :fill opacity = { 0s="1", 2s="0", begin on=click }
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
Unlike colors, where there is no joint attribute for filling and stroking,
there is a single |opacity| attribute in addition to the above two
attributes. If supported by the driver, it treats the graphic object to
which it is applied as a transparency group. In essence, ``this attribute
does what you want'' at least in most situations.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :opacity = { 0s="1", 2s="0", begin on=click }
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
\end{tikzanimateattribute}
\begin{tikzanimateattribute}{visible, stage}
The difference between the |visible| attribute and an opacity of |0| is
that an invisible object cannot be clicked and does not need to be
rendered. The (only) two possible values for this attribute are |false| and
|true|.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={1,2,3,4},
]
\tikz :visible = {begin on=click, 0s="false", 2s="false"}
\node (node) [fill = blue!20, draw = blue, very thick, circle] {Click me!};
\end{codeexample}
This |stage| attribute is the same as the |visible| attribute, only
|base="false"| is set by default. This means that the object is \emph{only}
visible when you explicitly during the time the entries are set to |true|.
The idea behind the name ``stage'' is that the object is normally ``off
stage'' and when you explicitly set the ``stage attribute'' to |true| the
object ``enters'' the stage and ``leaves'' once more when it is no longer
``on stage''.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={-1,0,1,2,3},
animation bb={(1.3,-0.7) rectangle (2.7,0.7)},
]
\tikz [animate = {example:stage = {
begin on = {click, of next=node},
0s="true", 2s="true" }}] {
\node (node) [fill = blue!20, draw = blue, very thick, circle] {Click me!};
\node at (2,0) (example) [fill = blue!20, circle] {Effect};
}
\end{codeexample}
\end{tikzanimateattribute}
\subsubsection{Animating Paths and their Rendering}
\label{section-animation-paths}
The attributes of the appearance of a path that you can animate include the
line width and the dash pattern, the path itself, as well as the arrow tips
attached to the paths. Animating the line width and the dash pattern is easy
since the animation attributes simply have that same names as the properties
that they animate and the syntax for setting is also the same:
\begin{tikzanimateattribute}{line width}
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :line width = { 0s="1pt", 2s="5mm", begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
Note that you must specify number (or expressions that evaluate to numbers)
as values, you cannot say |thin| or |thick| (these are styles, internally,
and you also cannot say |line width=thick|).
\end{tikzanimateattribute}
\begin{tikzanimateattribute}{dash, dash phase, dash phase}
The values for an animation of the dashing are specifications (see the
|dash| key for details) consisting of a sequence of |on| and |off| numbers.
In each value of the animation the length of these sequences \emph{must} be
identical. The interpolation of the values is done for each position of the
sequences individually, and also on the phase.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :dash = { 0s="on 10pt off 1pt phase 0pt",
2s="on 1pt off 10pt phase 0pt", begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :dash = { 0s="on 1cm off 1pt phase 0pt",
2s="on 1cm off 1pt phase 1cm", begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
This |dash pattern| key allows you to animate the dash phase only. However,
due to the way dashing is handled by certain drivers, the dash pattern is
also set, namely to the current dash pattern that is in force when the
animation is created.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :dash phase = { 0s="0pt", 2s="1cm", begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle, dashed] {Click me!};
\end{codeexample}
%
\end{tikzanimateattribute}
The above attributes ``only'' influence how the path is rendered. You can,
however, also animate the path itself:
\begin{tikzanimateattribute}{path}
When you animate a path, the values are, of course, paths themselves:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :path = {
0s = "{(0,-1) .. controls (0,0) and (0,0) .. (0,1) -- (1,1)}",
2s = "{(0,-1) .. controls (-1,0) and (-1,0) .. (-1,1) -- (.5,-1)}",
begin on=click }
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
There a number of things to keep in mind when you animate a path:
%
\begin{itemize}
\item The path ``values'' are parsed and executed in an especially
protected scope to ensure that they have only little side effects,
but you should not do ``fancy things'' on these paths.
\item As for the dash pattern, you must ensure that all paths in the
timeline have the same structure (same sequence of path
construction commands); only the coordinates may differ. In
particular, you cannot say that the path at |1s| is a rectangle
using |rectangle| and at |2s| is a circle using |circle|. Instead,
you would have to ensure that at both times the path consists of
appropriate Bézier curves (which is cumbersome as the following
example shows, where we used the fact that a circle consists of
four Bézier curves):
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :path = {
0s = "{(0,0) circle [radius=1cm]}",
2s = "{(0,0)
(1,0) .. controls +(0,0) and +(0,0) .. (0,1)
.. controls +(0,0) and +(0,0) .. (-1,0)
.. controls +(0,0) and +(0,0) .. (0,-1)
.. controls +(0,0) and +(0,0) .. (1,0)
-- cycle (0,0)}",
begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
\item You must specify arrow tips for an animated path in a special
way, namely using the |arrows| key for \emph{animations}, not the
normal |arrows| key (see below).
\end{itemize}
\end{tikzanimateattribute}
\begin{key}{/tikz/animate/arrows=\meta{arrow spec}}
This key only has an effect on |:path| animations. It causes the arrow tips
specified in \meta{arrow spec} to be added to the path during the animation
(the syntax is the same as for the normal |arrows| key). If you have
several different animations for a paths, these may contain different arrow
tips, but each animation must stick to one kind of arrow tips.
What happens internally when this key is used is the following: The
specified arrow tips are rendered internally as so-called \emph{markers,}
which are small graphics that can be placed at the beginning and ends of
paths and which ``rotate along'' as a path changes. Note that these markers
are used \emph{only} in the context of animated paths, the arrow tips of
normal, ``static'' paths are drawn without the use of markers. Normally,
there is no visual difference between an arrow tip drawn using markers or
those drawn for static paths, but in rare cases there may be differences.
You should only add arrows to open path consisting of a single segment with
sufficiently long first and last segments (so that \tikzname\ can shorten
these segments correctly when necessary).
As pointed out earlier, the only way to add arrow tips to a path that is
animated is using this key, you can \emph{not} say something like
%
\begin{codeexample}[code only]
\draw :path = { 1s = "{(0,0) -- (1,0)}", 2s = "{(0,1) -- (1,0)}" }
[->] (0,0) -- (1,0);
\end{codeexample}
%
This will raise an error since you try to animate a path (|:path = ...|)
that has normal arrow tips attached (|[->]|).
Instead, you must specify the arrow tips inside the animation command:
%
\begin{codeexample}[code only]
\draw :path = { 1s = "{(0,0) -- (1,0)}", 2s = "{(0,1) -- (1,0)}", arrows = -> }
(0,0) -- (1,0);
\end{codeexample}
However, the above code now has a big shortcoming: While the animation is
\emph{not} running, \emph{no} arrow tip is shown (the |arrows| key only
applies to the animation.
The trick is to use the |base| key. It allows you to install a path as the
``base'' path that is used when no animation is running and the arrows
specified for the animation will also be used for the base. All told, the
``correct'' way to specify the animation is the following (note that no
static path is specified, any specified path would be overruled by the
|base| path anyway):
%
\begin{codeexample}[code only]
\draw :path = { 1s = "{(0,0) -- (1,0)}" base, 2s = "{(0,1) -- (1,0)}", arrows = -> };
\end{codeexample}
Here is an example:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
animation bb={(-0.1,-0.1) rectangle (1.1,1.1)},
]
\tikz [very thick] {
\node (node) at (-2,0)
[fill = blue!20, draw = blue, very thick, circle] {Click me!};
\draw :path = {
0s = "{(0,0) to[out=90, in=180] (.5,1) to[out=0, in=90] (.5,.5)}" base,
2s = "{(1,0) to[out=180, in=180] (.25,.5) to[out=0, in=180] (1,.5)}",
arrows = <.<->, begin on = {click, of=node} }; }
\end{codeexample}
%
\end{key}
\begin{key}{/tikz/animate/shorten < = \meta{dimension}}
\end{key}
\begin{key}{/tikz/animate/shorten > = \meta{dimension}}
For animated paths, just as the key |arrows| has to be passed to the
animation (to |:path|) instead of to the static path, the keys |shorten >|
and |shorten <| also have to be passed to the |:path| key.
\end{key}
\subsubsection{Animating Transformations: Relative Transformations}
In order to animate the canvas transformation matrix, you do not animate an
attribute called ``|:transform|''. Rather, there are several attributes that
all manipulate the canvas transformation matrix in different ways. These keys,
taken in appropriate combination, allow you to achieve any particular canvas
transformation matrix. All keys that animate the transformation matrix always
accumulate.
Let us start with the ``standard'' attributes that are also available as keys
in \tikzname:
\begin{tikzanimateattribute}{scale, xscale, yscale}
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :scale = { 0s="1", 2s="0.2", begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
\end{tikzanimateattribute}
\begin{tikzanimateattribute}{rotate}
The |rotate| key adds an animation of the rotation:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :rotate = { 0s="45", 2s="90", begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
Note that there is no |rotate around| attribute, but you can use the
|origin| key to change the origin of the rotation.
\end{tikzanimateattribute}
\begin{tikzanimateattribute}{xskew, yskew, xslant, yslant}
The keys add an animation of the skew (given in degrees) or slant (given as
in the |xslant| and |yslant| key):
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :xskew = { 0s="0", 2s="45", begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :xslant = { 0s="-1", 2s="1", begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
\end{tikzanimateattribute}
\begin{tikzanimateattribute}{xshift, yshift}
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :shift = { 0s="{(0,0)}", 2s="{(5mm,-5mm)}",
begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :xshift = { 0s="0pt", 2s="5mm", begin on=click}
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
\end{tikzanimateattribute}
\begin{tikzanimateattribute}{shift}
This |:shift| attribute can be animated in two ways. First, you can simply
specify a sequence of coordinates in the same way as you would use the
|shift| key in \tikzname:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz \node :shift = { 0s = "{(0,0)}", 2s = "{(5mm,-5mm)}",
begin on = click }
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
However, you can also specify the sequence of positions along which the
shift should occur in a different way, namely by \emph{specifying a path
along which the object should be moved.} This is often not only more
natural to do, but also allows you to specify movements along curves.
\begin{key}{/tikz/animate/options/along=\marg{path}\meta{|sloped| or
|upright|}\opt{| in|\meta{time}}%
}
Use this key with a |:shift| (or a |:position|) to make \tikzname\
shift the object by the coordinates along the \meta{path}. When this
key is used, the values may no longer be coordinates, but must be
fractions of the distance along the path. A value of |"0"| refers to
the beginning of the path and |"1"| refers to the end:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz {
\draw [help lines] (-0.2,-0.2) grid (2.2,1.2);
\draw (1,.5) circle [radius=1mm];
\node :shift = {
along = {(0,0) circle[radius=5mm]} upright,
0s="0", 2s=".25", begin on=click }
at (1,.5) [fill = blue, opacity=.5, circle] {Click};
}
\end{codeexample}
\end{key}
Following the \meta{path}, which must be put in braces, you must either
specify |upright| or |sloped|. In the first case, the to-be-animated object
is moved along the path normally (and stays ``upright''), whereas when you
use |sloped|, the object will be continuously rotated so that it always
points along the path.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz {
\draw [help lines] (-0.2,-0.2) grid (2.2,1.2);
\draw (1,.5) circle [radius=1mm];
\node :shift = {
along = {(0,0) circle[radius=5mm]} sloped,
0s="0", 2s=".25", begin on=click }
at (1,.5) [fill = blue, opacity=.5, circle] {Click};
}
\end{codeexample}
In most motion animations that use |along|, you will set the value for |0s|
to |"0"| and the value for some specific \meta{time} to |"1"|. Because of
this, you can add |in| \meta{time} after the path, to achieve exactly this
effect.
\end{tikzanimateattribute}
For the above attributes, it is not immediately clear which coordinate system
should be used for the animation. That is, when you move an object 1cm ``to the
right'', where is ``the right''? By default, movements and transformations like
|:shift| or |:scale| are relative to the \emph{animation coordinate system,}
which defaults to the local coordinate system of the to-be-animated object.
Consider the following example:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz {
\draw [help lines] (-0.2,-0.2) grid (2.2,2.2);
\node :rotate = { 0s="0", 2s="45", begin on=click}
at (1,1) [fill = blue!20, draw = blue, ultra thick] {Click me};
}
\end{codeexample}
%
Note how the node rotates around its center even though this center is at
position |(1,1)| in the picture's coordinate system. This is because |at (1,1)|
actually only does a shift of the coordinate system and the node is then drawn
at the origin of this shifted coordinate system. Since this shifted coordinate
system becomes the animation coordinate system, the rotation ``around the
origin'' is actually a rotation around the origin of the animation coordinate
system, which is at |(1,1)| in the picture's coordinate system.
Let us, by comparison, do a rotation of a scope surrounding the node where the
origin is not (yet) shifted:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz {
\draw [help lines] (-0.2,-0.2) grid (2.2,2.2);
\scoped :rotate = { 0s="0", 2s="45", begin on={click, of next=n} }
\node (n) at (1,1) [fill = blue!20, draw = blue, ultra thick] {Click me};
}
\end{codeexample}
%
Now the rotation is really around the origin of the picture.
Most of the time the animation coordinate system will be setup in the way ``you
expect'', but you can modify it using the following keys:
\begin{key}{/tikz/animate/options/origin=\meta{coordinate}}
Shifts the animation coordinate system by \meta{coordinate}. This has the
effect that the ``origin'' for scalings and rotations gets shifted by this
amount. In the following example, the point around which the rotation is
done is the right border at |(2,1)| since the origin of the animation is at
|(1,1)| relative to the picture's origin and the |origin| key shifts it one
centimeter to the right.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz {
\draw [help lines] (-0.2,-0.2) grid (2.2,2.2);
\node :rotate = { 0s="0", 2s="45", begin on=click,
origin = {(1,0)}}
at (1,1) [fill = blue!20, draw = blue, ultra thick] {Click me};
}
\end{codeexample}
%
\end{key}
\begin{key}{/tikz/animate/options/transform=\meta{transformation keys}}
While the |origin| key does only a shift, the |transform| key allows you to
add an arbitrary transformation to the animation coordinate system using
keys like |shift|, |rotate| or even |reset cm| and |cm|. In particular,
|origin=|\meta{c} has the same effect as |transform| |=|
|{shift=|\meta{c}|}|. Note that the transformation only influences the
animation, not the object itself.
As an example, when you say |transform={scale=2}|, an |:xshift| with a
value of |"1cm"| will actually shift the object by 2cm. Similarly, after
you say |transform={rotate=90,scale=2}|, the same |:xshift| of |"1cm"| will
actually shift the object by 2cm upwards.
Note that, internally, \tikzname\ has to invert the transformation matrix
resulting from the \meta{transformation keys} (plus the original animation
transformation matrix), which can by numerically instable when you use
ill-conditioned transformations.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz {
\draw [help lines] (-0.2,-0.2) grid (2.2,2.2);
\node :xshift = { 0s="0cm", 2s="5mm", begin on=click,
transform = {rotate=-90} }
at (1,1) [fill = blue!20, draw = blue, ultra thick] {Click me};
}
\end{codeexample}
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz {
\draw [help lines] (-0.2,-0.2) grid (2.2,2.2);
\node :xshift = { 0s="0cm", 2s="5mm", begin on=click,
transform = {rotate=-45, scale=2} }
at (1,1) [fill = blue!20, draw = blue, ultra thick] {Click me};
}
\end{codeexample}
%
\end{key}
\subsubsection{Animating Transformations: Positioning}
The attributes for specifying transformations and, in particular, the |:shift|
attribute are always expressed in the local animation coordinate system. This
makes it easy to ``shift around a node a little bit'', but makes it hard to
move a node ``from one position to another'' since coordinates need to be
expressed relative to the node's coordinate system, which leads to all sorts of
problems: Suppose you wish to have a node move from $(1,1)$ to $(2,1)$ and then
to $(2,0)$. Now, if the node has already been placed at $(1,1)$ in the usual
manner using |at|, then from the ``node's point of view'' you need to move the
node to $(0,0)$, $(1,0)$, and $(1,-1)$. To make matters worse, when you use
named coordinates as in
%
\begin{codeexample}[code only]
\coordinate(A) at (1,1);
\coordinate(B) at (2,1);
\coordinate(C) at (2,0);
\end{codeexample}
%
and then say that the movement should be from |(A)| to |(B)| to |(C)|, what
should you expect? On the one hand, |(A)| and |(1,1)| should normally be
interchangeable; on the other hand, |(A)| is a specific point in the plane, no
matter from which coordinate system we look at it. It turns out that \tikzname\
will stick to the second interpretation and actually turn |(A)| into |(0,0)|
when it is parsed in the local coordinate system of a node starting at |(A)| --
while |(1,1)| will stay the same.
Because of all these confusing effects, there is another attribute |:position|,
which is similar to a |:shift|, but the coordinates are not interpreted in the
local coordinate system of the node, but in the coordinate system that is in
force when the |animate| key is used. For a node, this is \emph{prior} to the
setup of the node's coordinate system and, thus, usually the picture's
coordinate system.
\begin{tikzanimateattribute}{position}
Compare the two animations, one with |:position|, one with |:shift|.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz {
\draw [help lines] (-0.2,-0.2) grid (2.2,1.2);
\draw (1,.5) circle [radius=1mm] (1.5,0) circle [radius=1mm];
\node :position = { 0s="{(1,.5)}", 2s="{(1.5,0)}", begin on=click }
at (1,.5) [fill = blue, opacity=.5, circle] {Click};
}
\end{codeexample}
%
Compare this to a shift:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz {
\draw [help lines] (-0.2,-0.2) grid (2.2,1.2);
\draw (1,.5) circle [radius=1mm] (1.5,0) circle [radius=1mm];
\node :shift = { 0s="{(1,.5)}", 2s="{(1.5,0)}", begin on=click }
at (1,.5) [fill = blue, opacity=.5, circle] {Click};
}
\end{codeexample}
%
You can use the |along| key with |:position| in the same way as with
|:shift|, which is especially useful for specifying that a node ``travels''
between positions of the canvas:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2},
]
\tikz {
\draw [help lines] (-0.2,-0.2) grid (2.2,1.2);
\draw (1,1) circle [radius=1mm] (1.5,0) circle [radius=1mm];
\node :position = {
along = {(1,1) to[bend left] (1.5,0)} sloped in 2s,
begin on = click }
at (1,1) [fill = blue, opacity=.5, circle] {Click};
}
\end{codeexample}
%
\end{tikzanimateattribute}
\subsubsection{Animating Transformations: Views}
\label{section-animation-views}
The final method of changing the transformation matrix is to animate a
\emph{view}.
\begin{tikzanimateattribute}{view}
A view is a canvas transformation that shifts and scales the canvas in such
a way that a certain rectangle ``matches'' another rectangle: The idea is
that you ``look through'' a ``window'' (the view) and ``see'' a certain
area of the canvas. View animation do not allow you to do anything that
cannot also be done using the |shift| and |scale| keys in combination, but
it often much more natural to animate which area of a graphic you wish to
see than to compute and animate a scaling and shift explicitly.
In order to use a view, you first need to create a view, which is done
using the |meet| or |slice| keys from the |views| library, see
Section~\ref{section-library-views}. You can then animate the view using
the |view| attribute. The values passed to the |entry| key follow the same
syntax as the views in the |views| library (though you only animate the
to-be-viewed rectangle).
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations,views}},
animation list={0.5,1,1.5,2},
animation bb={(1.1,-0.9) rectangle (2.9,0.9)},
]
\tikz [very thick] {
\node (node) [fill = blue!20, draw = blue, very thick, circle] {Click me!};
\draw [green!50!black] (1.2,-0.8) rectangle (2.7,0.8);
\begin{scope}[view = {(0,0) (2,2) at (1.2,-0.8) (2.7,0.8)},
animate = {myself:view = {
begin on = {click, of=node},
0s = "{(0,0) (2,2)}",
2s = "{(1,1) (1.5,1.5)}" }}]
\draw [red] (10mm,10mm) rectangle (15mm,15mm);
\node at (10mm,10mm) [circle, fill=red, text=white, font=\tiny] {red};
\end{scope}
}
\end{codeexample}
%
\end{tikzanimateattribute}
\subsection{Controlling the Timeline}
\label{section-anim-timeline}
We can already specify timelines by giving a sequence of times in
non-decreasing order along with corresponding values. In this section we have a
look at further options that allow us to extend or control the timeline.
\subsubsection{Before and After the Timeline: Value Filling}
When you specify the timeline, you specify it for a certain interval
$[t_1,t_2]$. By default, outside this interval the animation has no effect on
the to-be-animated attribute. The following keys allows you to change this:
\begin{key}{/tikz/animate/base=\meta{options}}
A ``base'' value is a value that is used for the attribute whenever the
timeline is \emph{not} active:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list = {0.5,1,1.5,2,2.5},
]
\tikz \node [fill = green, text = white] :fill =
{ 1s = "red", 2s = "blue", base = "orange", begin on = click }
{Click me};
\end{codeexample}
Syntactically, the |base| key works much like special time syntax: It sets
up a local |sync| scope and executes the \meta{options} in it and creates
an |entry|. However, instead of setting the |time| attribute to a time, it
sets it to a special value that tells \tikzname\ that when the entry is
created, the current \meta{value} should be used as the |base| value.
This means that you can write |base = "orange"| as in the above example to
set the base. However, you can also use the |base| key in other ways; most
noticeably, you can use it \emph{after} some value:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list = {0.5,1,1.5,2,2.5},
]
\tikz \node [fill = green, text = white] :fill =
{ 1s = {"red" = base}, 2s = "blue", begin on = click }
{Click me};
\end{codeexample}
Instead of using |base| as a key, you can also add |base| directly after
the quotes of a value. This is particularly useful for setting up a base
value that is also used in a timeline:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list = {0.5,1,1.5,2,2.5},
]
\tikz \node [fill = green, text = white] :fill =
{ 1s = "red" base, 2s = "blue", begin on = click }
{Click me};
\end{codeexample}
%
\end{key}
\begin{key}{/tikz/animate/options/forever}
This key causes the timeline to continue ``forever'' after the last time
with the last value. You can also think of this as having the animation
``freeze'' at the end.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list = {0.5,1,1.5,2,2.5},
]
\tikz \node :fill = { 1s="red", 2s="blue", forever, begin on=click}
[fill = green!50!black, text = white] {Click me};
\end{codeexample}
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list = {0.5,1,1.5,2,2.5},
]
\tikz \node [fill = green!50!black, text = white]
:fill = { 1s = "red", 2s = "blue", begin on = click }
{Click me};
\end{codeexample}
%
\end{key}
\begin{key}{/tikz/animate/options/freeze}
An alias for |forever|.
\end{key}
\subsubsection{Beginning and Ending Timelines}
\label{section-anim-begin-end}
The \meta{time} used with the first use of the |entry| key in a timeline is the
start time and the \meta{time} in the last |entry| key is the stop time.
However, this leaves open then question of when the whole timeline is to be
started: The moment the document is opened? When the page is displayed? When
the user scrolls to the to-be-animated object? When some other object is
clicked? The key |begin|, and also the key |end|, allow you to specify answers
to these questions.
\begin{key}{/tikz/animate/options/begin=\meta{time}}
This key specifies when the ``moment |0s|'' should be relative to the
moment when the current graphic is first displayed. You can use this key
multiple times, in this case the timeline is restarted for each of the
times specified (if it is already running, it will be reset). If no |begin|
key is given at all, the effect is the same as if |begin=0s| had been
specified.
It is permissible to set \meta{time} to a negative value.
Note that this key has no effect for snapshots.
\end{key}
\begin{key}{/tikz/animate/options/end=\meta{time}}
This key will truncate the timeline so that it ends \meta{time} after the
display of the graphic, provided the timeline begins before the specified
end time. For instance, if you specify a timeline starting at 2\,s and
ending at 5\,s and you set |begin| to 1\,s and |end| to 4\,s, the timeline
will run, relative to the moment when the graphic is displayed from 3\,s to
4\,s.
%
\begin{codeexample}[preamble={\usetikzlibrary{animations}}]
\tikz \node [fill = green!50!black, text = white]
:rotate = { 1s = "0", 5s = "90", begin = 2s, end = 4s }
{Click me};
\end{codeexample}
%
\end{key}
Instead of specifying the beginning of the timeline relative to the moment to
to-be-animated graphic is displayed, you can also set the ``moment |0s|'' to
the moment a specific \emph{event} happens using the following key:
\begin{key}{/tikz/animate/options/begin on=\meta{options}}
The \meta{options} will be executed with the path |/pgf/animation/events|
and will cause a new beginning to be added to the list of possible
beginnings for the timeline (so the uses of this key accumulate). Each
``beginning'' is just another possible ``moment |0s|'' for the timeline.
For instance, when the \meta{options} are set to |click|, then each time
the graph is clicked a moment |0s| starts for the timeline.
Most events are ``caused'' or ``happen to'' some object. For instance, the
|click| event happens when you click on a certain object. In order to
specify this object, use the following two keys inside the \meta{options}:
|of| and |of next|. If neither of these keys are given, the to-be-animated
object is used.
\begin{key}{/pgf/animation/events/of=\meta{id}\opt{|.|\meta{type}}}
This specifies a graphic object id in the same way as the |whom| key,
also with an optional \meta{type}. This is the object that ``causes''
the event to happen.
Unlike the |whom| key, which always refers to a not-yet-existing
object, this key always refers to an already existing object, namely to
the most recent use of the \meta{id}. In the following example, the
referenced object is the node with the label |2| since it is the most
recently referenced node with \meta{id} |X|.
%
\begin{codeexample}[width=3cm,preamble={\usetikzlibrary{animations}}]
\tikz [very thick] {
\node (X) at (1,1.2) [fill = blue!20, draw = blue, circle] {1};
\node (X) at (1,0.4) [fill = orange!20, draw = orange, circle] {2};
\node (node) :rotate = {0s="0", 2s="90", begin on = {click, of = X}}
[fill = red!20, draw = red, rectangle] {Anim};
\node (X) at (1,-0.4) [fill = blue!20, draw = blue, circle] {3};
\node (X) at (1,-1.2) [fill = blue!20, draw = blue, circle] {4}; }
\end{codeexample}
\end{key}
\begin{key}{/pgf/animation/events/of next=\meta{id}\opt{|.|\meta{type}}}
This key works like the |of| key, only it refers to a future (actually,
the next) object with the given \meta{id}, not to a previous one. This,
in the next example, the referenced node is the one with label |3|.
%
\begin{codeexample}[width=3cm,preamble={\usetikzlibrary{animations}}]
\tikz [very thick] {
\node (X) at (1,1.2) [fill = blue!20, draw = blue, circle] {1};
\node (X) at (1,0.4) [fill = blue!20, draw = blue, circle] {2};
\node (node) :rotate = {
0s="0", 2s="90", begin on = {click, of next = X}}
[fill = red!20, draw = red, rectangle] {Anim};
\node (X) at (1,-0.4) [fill = orange!20, draw = orange, circle] {3};
\node (X) at (1,-1.2) [fill = blue!20, draw = blue, circle] {4}; }
\end{codeexample}
\end{key}
The following key allows you to specify the event that should cause the
animation to start:
%
\begin{key}{/pgf/animation/events/event=\meta{event name}}
Specifies the name of the event whose occurrence should start the
timeline. Which events are supported depends on the device on which the
animation is displayed, the output format (\textsc{svg} or some other
format), and the setup of scripts, but here is a list of events
supported by ``plain \textsc{svg}'': |click|, |focusin|, |focusout|,
|mousedown|, |mouseup|, |mouseover|, |mousemove|, |mouseout|, |begin|,
|end|. However, the following keys make using these events simpler:
%
\begin{key}{/pgf/animate/events/click}
This is a shorthand for |event=click|. This event gets triggered
when the user clicks on the triggering object with a mouse (or
something equivalent).
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \node :rotate = { 0s="0", 2s="90", begin on = {click}}
[fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
\end{key}
%
\begin{key}{/pgf/animation/events/mouse down}
Shorthand for |event=mousedown|. The event gets triggered when the
user presses a mouse button down on the object.
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \node :rotate = { 0s="0", 2s="90", begin on = {mouse down}}
[fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
\end{key}
%
\begin{key}{/pgf/animation/events/mouse up}
Shorthand for |event=mouseup| and gets triggered, of course, when a
pressed button is released on the object.
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \node :rotate = { 0s="0", 2s="90", begin on = {mouse up} }
[fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
\end{key}
%
\begin{key}{/pgf/animation/events/mouse over}
Shorthand for |event=mouseover|. The event gets triggered the
moment the mouse cursor moves over the object.
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \node :rotate = { 0s="0", 2s="90", begin on = {mouse over} }
[fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
\end{key}
%
\begin{key}{/pgf/animation/events/mouse move}
Shorthand for |event=mousemove|. The event gets triggered lots of
times, namely each time the mouse moves while being ``over'' the
object.
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \node :rotate = { 0s="0", 2s="90", begin on = {mouse move} }
[fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
\end{key}
%
\begin{key}{/pgf/animation/events/mouse out}
Shorthand for |event=mouseout|. The opposite of |mouse over|:
triggered when the mouse leaves the object.
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \node :rotate = { 0s="0", 2s="90", begin on = {mouse out} }
[fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
\end{key}
%
\begin{key}{/pgf/animation/events/begin}
Shorthand for |event=begin|. The ``begin'' refers to the beginning
of another animation, namely the one referenced by |of| or
|of whom|. This means that the current animation will begin when
some other animation begins.
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \node [animate = {
myself:rotate = { 0s="0", 2s="90", begin on = {begin, of next=anim}},
myself:xshift = { 0s="0mm", 2s="5mm", begin on = {click}, name=anim}
},
fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
\end{key}
%
\begin{key}{/pgf/animation/events/end}
Shorthand for |event=end|. Again, the ``end'' refers to the end of
another animation, namely the one referenced by |of| or |of whom|.
This means that the current animation will \emph{begin} when some
other animation \emph{ends}.
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \node [animate = {
myself:rotate = { 0s="0", 2s="90", begin on = {end, of next=anim}},
myself:xshift = { 0s="0mm", 2s="5mm", begin on = {click}, name=anim }
},
fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
\end{key}
%
\begin{key}{/pgf/animation/events/focus in}
This is a shorthand for |event=focusin|. This event gets triggered
when the graphic object gets the focus (this usually makes sense
only for text input fields).
\end{key}
%
\begin{key}{/pgf/animation/events/focus out}
This is a shorthand for |event=focusout|.
\end{key}
\end{key}
In addition to the events specified using the generic |event| key, there
are two further events that take a parameter:
%
\begin{key}{/pgf/animation/events/repeat=\meta{number}}
The event is triggered when a repeating animation has been repeated
\meta{number} times.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={
0.333/\frac{1}{3},0.666/\frac{2}{3},1,
1.333/1\frac{1}{3},1.666/1\frac{2}{3},2,
2.333/2\frac{1}{3},2.666/2\frac{2}{3},3,
3.333/2\frac{1}{3},3.666/2\frac{2}{3},4%
},
]
\tikz
\node [animate = { myself: = {
:rotate = { 0s="0", 2s="90", begin on = {repeat = 2, of next = anim },
begin snapshot = 2 },
:xshift = { 0s="0mm", 2s="5mm", begin on=click, name=anim, repeats=4 }}},
fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
\end{key}
\begin{key}{/pgf/animation/events/key=\meta{key}}
The event is triggered when the keyboard key \meta{key} has been
pressed. For security reasons, a viewer may suppress this.
\end{key}
Having specified the event, you can also specify a delay relative to this
event:
\begin{key}{/pgf/animation/events/delay=\meta{time}}
Specifies that the timeline should not start with the event, but,
rather, be delayed by \meta{time}.
\end{key}
\end{key}
When you use |begin on| to start an animation when a certain event is
triggered, it is not clear what should happen when the event is triggered
\emph{again}. Should this be ignored completely? Should it only be ignored
while the animation is running? The following key allows you to specify when
should happen:
\begin{key}{/tikz/animate/options/restart=\meta{choice} (default true)}
You can set \meta{choice} to one of the following:
%
\begin{itemize}
\item |true| means that the animation will restart each time the event
is triggered. If the animation is already running, it will be reset
to its beginning.
\item |false| means that once the animation has started once, it will
never be restarted.
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \node :rotate = { 0s="0", 2s="90",
restart = false, begin on = {click}}
[fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
%
\item |never| means the same as |false|.
\item |when not active| means that the animation will restart when the
event is triggered, but \emph{not} while the animation is running.
%
\begin{codeexample}[width=2cm,preamble={\usetikzlibrary{animations}}]
\tikz \node :rotate = { 0s="0", 2s="90",
restart = when not active, begin on = {click}}
[fill = blue!20, draw = blue, circle, ultra thick] {Here!};
\end{codeexample}
\end{itemize}
\end{key}
Just like |begin on| specifies when a timeline begins relative to some event,
the |end on| allows you to stop is early when some event happens:
\begin{key}{/tikz/animate/options/end on=\meta{options}}
Works exactly like |begin on|, one possible end of the timeline is
specified using the \meta{options}.
\end{key}
\subsubsection{Repeating Timelines and Accumulation}
\begin{key}{/tikz/animate/options/repeats=\meta{specification}}
Use this key to specify that the timeline animation should repeat at the
end. The \meta{specification} must consist of two parts, each of which may
be empty. The first part is one of the following:
%
\begin{itemize}
\item Empty, in which case the timeline repeats forever.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={1,2,3,4,5},
]
\tikz \node :rotate = { 0s = "0", 2s = "90",
repeats, begin on = click }
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
\item A \meta{number} (like |2| or |3.25|), in which case the timeline
repeats \meta{number} times.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={1,2,3,4,5},
]
\tikz \node :rotate = { 0s = "0", 2s = "90",
repeats = 1.75, begin on = click }
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
\item The text ``|for| \meta{time}'' (like |for 2s| or |for 300ms|), in
which case the timeline repeats however often necessary so that it
stops exactly after \meta{time}.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={1,2,3,4,5},
]
\tikz \node :rotate = { 0s = "0", 2s = "90",
repeats = for 3.5s, begin on = click }
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
\end{itemize}
%
The second part of the specification must be one of the following:
%
\begin{itemize}
\item Empty, in which case each time the timeline is restarted, the
attribute's value undergoes the same series of values it did
previously.
\item The text |accumulating|. This has the effect that each time the
timeline is restarted, the last values specified by the timeline is
\emph{added} to the value from the previous iteration(s). A typical
example is an animation that shifts a scope by, say, 1\,cm over a
time of 1\,s. Now, if you repeat this five times, normally the
scope will shift 1\,cm for 1\,s then ``jump back'', shift again,
jump back, and so on for five times. In contrast, when the repeats
are accumulating, the scope will move by 5\,cm over 5\,s in total.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={1,2,3,4,5},
]
\tikz \node :rotate = { 0s = "0", 2s = "90", begin on = click,
repeats = accumulating }
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={1,2,3,4,5},
]
\tikz \node :rotate = { 0s = "0", 2s = "90", begin on = click,
repeats = for 4s accumulating }
[fill = blue!20, draw = blue, ultra thick, circle] {Click me!};
\end{codeexample}
\end{itemize}
\end{key}
\begin{key}{/tikz/animate/options/repeat=\meta{specification}}
An alias for |repeats|.
\end{key}
\subsubsection{Smoothing and Jumping Timelines}
\label{section-anim-smooth}
Your specification of the timeline will consist of a sequence of times along
with values that the attribute should have at these ``key times''. Between
these key times, the attribute's value needs to be interpolated.
Suppose that an animation is supposed to interpolate a attribute's value
between the two values |50| and |100| over a time of 10\,s. The simplest way of
doing so is to do a linear interpolation, where the value as, say, 1\,s is 55,
at 2\,s it is 60, and so on. Unfortunately, the linear interpolation does not
``look'' nice in many cases since the acceleration of a linear interpolation is
zero during the animation, but infinite at the beginning and at the end; which
looks ``jerky''.
To avoid this, you can specify that the time--attribute curve should not be a
straight line, but rather a curve. You specify this curve using a spline. The
most logical ``coordinate rectangle'' used for this spline in our example would
be |(0s,50)| and |(10s,100)| and we would like to specify something like
%
\begin{codeexample}[code only]
(0s,50) .. controls (5s,50) and (9s,100) .. (10s,100)
\end{codeexample}
%
This would result in a time--attribute curve where the attribute at |50|
changes slowly at 0\,s and also arrives slowly at |100| at 10\,s, but speeds up
between these values.
We call the first control point |(5s,50)| the ``exit control'' and call
|(9s,100)| the ``entry control'': The first control dictates how quickly or
slowly a time point is left, the second dictates how quickly or slowly we enter
the next one.
The control points are, however, not specified in the coordinate system
indicated above. Rather, the rectangle |(0s,50)| to |(10s, 100)| gets
normalized to |(0,0)| to |(1,1)|. The control point |(5s,50)| would thus become
|(0.5,0)| and |(9s,100)| becomes |(0.9,1)|.
\begin{key}{/tikz/animate/options/exit control=\marg{time fraction}\marg{value fraction}}
Specifies an exit control using two values as above. The spline from above
would be specified as follows:
%
\begin{codeexample}[code only]
exit control={0.5}{0},
entry control={0.9}{1},
0s = "50",
10s = "100"
\end{codeexample}
Note that the curve specified using exit and entry controls must be
``well-behaved'' in the sense that exactly one value must be specified for
each point in time in the time interval.
In the next three example, we first specify a ``smooth'' exit from the
start position, then a smooth arrival at the end position, and, finally
both.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.333/\frac{1}{3},0.666/\frac{2}{3},1,1.333/1\frac{1}{3},1.666/1\frac{2}{3}},
]
\tikz {
\foreach \i in {0,0.1,...,1} \draw (-0.9,.9-\i) -- ++(1.8,0);
\node :yshift = { begin on = click,
0s = { exit control = {1}{0}, "0cm" },
1s = "-5mm",
2s = "-10mm" }
[fill = blue!20, draw = blue, very thick, circle] {Click me!};
}
\end{codeexample}
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.333/\frac{1}{3},0.666/\frac{2}{3},1,1.333/1\frac{1}{3},1.666/1\frac{2}{3}},
]
\tikz {
\foreach \i in {0,0.1,...,1} \draw (-0.9,.9-\i) -- ++(1.8,0);
\node :yshift = { begin on = click,
0s = "0cm",
1s = "-5mm",
2s = { entry control = {0}{1}, "-10mm" } }
[fill = blue!20, draw = blue, very thick, circle] {Click me!};
}
\end{codeexample}
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.333/\frac{1}{3},0.666/\frac{2}{3},1,1.333/1\frac{1}{3},1.666/1\frac{2}{3}},
]
\tikz {
\foreach \i in {0,0.1,...,1} \draw (-0.9,.9-\i) -- ++(1.8,0);
\node :yshift = { begin on = click,
0s = { exit control = {1}{0}, "0cm" },
1s = "-5mm",
2s = { entry control = {0}{1}, "-10mm" } }
[fill = blue!20, draw = blue, very thick, circle] {Click me!};
}
\end{codeexample}
%
\end{key}
\begin{key}{/tikz/animate/options/entry control=\marg{time fraction}\marg{value fraction}}
Works like |exit control|.
\end{key}
\begin{key}{/tikz/animate/options/ease in=\marg{fraction} (default 0.5)}
A shorthand for |entry control={1-|\meta{fraction}|}{1}|.
\end{key}
\begin{key}{/tikz/animate/options/ease out=\marg{fraction} (default 0.5)}
A shorthand for |exit control={|\meta{fraction}|}{1}|.
\end{key}
\begin{key}{/tikz/animate/options/ease=\marg{fraction} (default 0.5)}
A shorthand for |ease in=|\meta{fraction}|, ease out=|\meta{fraction}.
Note that since for the first time the entry control is ignored and,
similarly, for the last time the exit control is ignored, using the |ease|
key with an animation having only two times is particularly easy, since we
only need to set |ease| once:
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.333/\frac{1}{3},0.666/\frac{2}{3},1,1.333/1\frac{1}{3},1.666/1\frac{2}{3}},
]
\tikz {
\foreach \i in {0,0.1,...,1} \draw (-0.9,.9-\i) -- ++(1.8,0);
\node :yshift = { begin on = click, ease, 0s = "0cm", 2s = "-10mm" }
[fill = blue!20, draw = blue, very thick, circle] {Click me!};
}
\end{codeexample}
%
\end{key}
The opposite of having a smooth curve between two values, is to have a ``jump''
from one value to the next. There are two keys for this:
\begin{key}{/tikz/animate/options/stay}
Specifies that inside the time interval the value ``stays put'' at the
first value till the end of the interval, where it will jump to the second
value. This is similar to an exit control where the curve is ``infinitely
flat''.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2,2.5},
]
\tikz {
\foreach \i in {0,0.1,...,1} \draw (-0.9,.9-\i) -- ++(1.8,0);
\node :yshift = { begin on = click,
0s = "0cm",
1s = {stay, "-5mm"},
2s = "-10mm" }
[fill = blue!20, draw = blue, very thick, circle] {Click me!};
}
\end{codeexample}
%
\end{key}
\begin{key}{/tikz/animate/options/jump}
Works like the |stay| key, but will cause the value to ``jump to'' the new
value right at the beginning of the time interval. It is similar to an
entry control specifying a ``flat'' curve.
%
\begin{codeexample}[
preamble={\usetikzlibrary{animations}},
animation list={0.5,1,1.5,2,2.5},
]
\tikz {
\foreach \i in {0,0.1,...,1} \draw (-0.9,.9-\i) -- ++(1.8,0);
\node :yshift = { begin on = click,
0s = "0cm",
1s = {jump, "-5mm"},
2s = "-10mm" }
[fill = blue!20, draw = blue, very thick, circle] {Click me!};
}
\end{codeexample}
%
\end{key}
\subsection{Snapshots}
\label{section-anim-snap}
Snapshots are a way of taking a ``photographic snapshot'' of an animation at a
certain time and then insert these into \textsc{pdf} files (or, for that
matter, Postscript files or files in any other format, including \textsc{svg}):
You specify a time like |2s| and then \tikzname\ will compute what the
animation ``would look like after 2s'' and insert the necessary graphics
command for rendering the graphic objects in the correct way. Since this
computation is done by \tikzname\ and since only ``normal'' graphics command
are inserted into the output, snapshots work with all output formats.
Apart from providing a fallback for \textsc{pdf}, snapshots are very useful by
themselves: They make it easy to ``show'' how an animation unfolds on paper.
For this, you simply typeset the same picture with the same animation several
times (using a simple |\foreach| loop), but each time you set a different
snapshot time. This will result in a sequence of pictures that depict the
animation at different points in time and which can then be inserted alongside
each other into the printed document. This approach has been used with the
examples of animations in this manual.
%
\begin{codeexample}[preamble={\usetikzlibrary{animations}}]
\foreach \t in {0.5, 1, 1.5, 2}
\tikz [make snapshot of = \t]
\fill :fill = {0s="black", 2s="red"} (0,0) circle [radius = 5mm];
\end{codeexample}
Creating snapshots is done using the following key:
\begin{key}{/tikz/make snapshot of=\meta{time}}
When this key is used in a \TeX\ scope, animation commands given in the
scope do not add animation code to the output. Instead, \tikzname\ computes
the values the attributes of the animation would have at the specified
\meta{time} and inserts the necessary system layer command to set the
attribute to the computed values (some care has been taken to make this
computation match the computations done by viewer applications as best as
possible).
%
\begin{codeexample}[preamble={\usetikzlibrary{animations}}]
\tikz [make snapshot of = 1s] {
\fill :fill = { 0s = "black", 2s = "white" } (0,0) rectangle ++(1,1);
\fill :fill = { 1s = "black", 3s = "white" } (2,0) rectangle ++(1,1);
}
\end{codeexample}
The moment \meta{time} is best thought of as \meta{time} seconds after the
``moment zero'' where all timelines start by default. Now, ``real''
animation may start at different time through user interaction, which
clearly makes no sense for snapshots. Nevertheless, you will sometimes wish
to have more control over when a timeline starts for the purposes of taking
snapshots. You can use the following key for this:
\begin{key}{/tikz/animate/options/begin snapshot=\meta{start time}}
Use this key on a timeline to specify that, only for purposes of taking
snapshots, the timeline starts at \meta{start time} rather than at
``moment zero''. (Think of this as saying that the animation starts
when a virtual user clicks on the animation and this click occurs
\meta{start time} seconds after the general ``moment zero'', causing
the animation to ``lag behind'' by this amount of time.)
Computationally, for the timeline the \meta{start time} is subtracted
from the snapshot's \meta{time} when the value needs to be determined:
%
\begin{codeexample}[preamble={\usetikzlibrary{animations}}]
\tikz [make snapshot of = 1s] {
\fill :fill = { 0s = "black", 2s = "white",
begin snapshot = 1s } (0,0) rectangle ++(1,1);
\fill :fill = { 1s = "black", 3s = "white" } (2,0) rectangle ++(1,1);
}
\end{codeexample}
\end{key}
The computations of the values the animation ``would have'' are done
entirely by \tikzname, which has the big advantage is that no support from
the viewer application or the output format is needed -- snapshots work
with all output formats, not just with \textsc{svg}. However, computations
done by \tikzname\ are not always very precise and can be slow because of
\TeX's limitations. In addition, there are some further limitations when it
comes to \tikzname's computation of snapshot values:
%
\begin{itemize}
\item As mentioned above, except for |begin snapshot|, other commands
for specifying the beginning or end of a timeline based on user
interaction make no sense for timelines: The keys |begin|,
|begin on|, |end|, and |end on| are silently ignored.
\item The value |current value| for a value is forbidden since this
value is almost impossible to compute by \tikzname.
\item Accumulating repeats of a motion are (currently) not supported,
but should not rely on this.
\end{itemize}
When \meta{time} is empty, ``snapshot taking'' is switched off and
animation commands are inserted once more.
\end{key}
\begin{key}{/tikz/make snapshot after=\meta{time}}
Works exactly like |make snapshot of|, only the \meta{time} is interpreted
as $\meta{time} + \epsilon$. This only makes a difference at the end of a
timeline and when there are two or more values specified for the same time:
When there are several values specified for time~$t$, a normal snapshot for
time~$t$ uses the first value given for the attribute. In contrast, this
command would use the last one given. Similarly, when an animation timeline
ends at time $t$, a normal snapshot of time $t$ would use the last value of
the timeline, while this key would not apply the animation at all (it has
already ended at time $t + \epsilon$).
%
\begin{codeexample}[preamble={\usetikzlibrary{animations}}]
\tikz [make snapshot of = 2s]
\fill :fill = { 0s = "green", 2s = "red" } (0,0) rectangle ++(1,1);
\tikz [make snapshot after = 2s]
\fill :fill = { 0s = "green", 2s = "red" } (0,0) rectangle ++(1,1);
\end{codeexample}
%
\end{key}
\begin{key}{/tikz/make snapshot if necessary=\meta{time} (default 0s)}
This key makes a snapshot of \meta{time} only when the output format does
not provide support for animations; if the output format supports
animations (like \textsc{svg}), then the command has no effect and
animations are created normally.
This manual is typeset with the following being set once are for all in
preamble:
%
\begin{codeexample}[code only]
\tikzset{make snapshot if necessary}
\end{codeexample}
Because of this setting, in the \textsc{pdf} version of this document, all
animations are shown at the value they would have at moment~$0s$. In
contrast, in the \textsc{svg} version, the animations are created normally.
In both versions, the smaller pictures showing how the animation proceeds
over time are created using |make snapshot of| for the indicated times.
\end{key}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "pgfmanual"
%%% End:
| {
"alphanum_fraction": 0.6689477295,
"avg_line_length": 41.0374198147,
"ext": "tex",
"hexsha": "3ce6b452dcf98932593f5d69b9c19a1869f2e7c3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "52fe6e0cd5af6b4610fd344a7392cca11bc5a72e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "waqas4afzal/LatexUrduBooksTools",
"max_forks_repo_path": "Texlive_Windows_x32/2020/texmf-dist/doc/generic/pgf/text-en/pgfmanual-en-tikz-animations.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "52fe6e0cd5af6b4610fd344a7392cca11bc5a72e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "waqas4afzal/LatexUrduBooksTools",
"max_issues_repo_path": "Texlive_Windows_x32/2020/texmf-dist/doc/generic/pgf/text-en/pgfmanual-en-tikz-animations.tex",
"max_line_length": 115,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "52fe6e0cd5af6b4610fd344a7392cca11bc5a72e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "waqas4afzal/LatexUrduBooksTools",
"max_stars_repo_path": "Texlive_Windows_x32/2020/texmf-dist/doc/generic/pgf/text-en/pgfmanual-en-tikz-animations.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 32623,
"size": 115151
} |
\chapter[The verb complex]{The verb complex}\label{chap:7}
\hypertarget{RefHeading1212081525720847}{}
Moloko does not have a simple verb word. Rather, \citet{FriesenMamalis2008} named this structure the ‘verb complex’ since affixes and extensions attach to the verb stem that comprises a close phonological unit that is not always one phonological word. The verb complex may be made up of from one to three phonological words as defined by prosody spread and word-final allophones (\sectref{sec:2.6.1} and \sectref{sec:2.6.2}).
There are two fundamental aspects of Moloko grammar that are expressed in the verb complex. The first is the concept of the point of reference. The point of reference involves both place and time. Actions in Moloko are usually placed with respect to a set locational point of reference, which in normal speech is usually the speaker. In a narrative or other discourse, the speaker can set the point of reference. Verbs are aligned with respect to the locational point of reference by means of directional\is{Directionals} verbal extensions (\sectref{sec:7.5.2}). These extensions determine the direction of the event with respect to the point of reference, and can be towards the speaker, away from the speaker, or back and forth. Directionals are different from adpositionals (\sectref{sec:7.5.1}), since adpositionals\is{Adpositionals} align the action with respect to other elements in the immediate context. The temporal point of reference\is{Tense, mood, and aspect!Perfect} is set in Moloko by mood and the Perfect. Mood involves what is real or not yet experienced in the world shared by the speaker and his or her audience (realis and irrealis, \sectref{sec:7.4.3}). The speaker and audience are, as it were, walking backwards into the future.\footnote{I first heard this image at a First Nations languages conference in Canada in 2011 to express an Indigenous view of time. } What has happened and is happening is ‘visible’ to them (realis) and they move together into the ‘invisible’ world behind them (irrealis). The point of reference will be the time of communication in normal speech. However, again in a narrative or other type of discourse, the speaker can set the point of reference (usually to the time the events took place). The Perfect extension is employed whenever the speaker needs to make sure that the hearer understands that an event is already completed before the point of reference, with ongoing effects to that point.
\largerpage
Another fundamental concept in Moloko verbs expressed in the verb complex is expectation, accomplished through mood\is{Tense, mood, and aspect!Irrealis mood|(}. The realis world is the realm of the visible or real; it includes the past and what is present as it happens before the speaker and audience and what is shared knowledge or expectations about the world and how it works. It is presented by the speaker as being real or known -- events and states that happened, are happening, or which are part of the expected ‘frame’ of a situation. Within the realis world, the distinctions coded in verbs are for events that are complete/accomplished (Perfective, \sectref{sec:7.4.1}), incomplete/unachieved (Imperfective\is{Tense, mood, and aspect!Imperfective aspect}, \sectref{sec:7.4.2}), in progress (\sectref{sec:8.2.1}), repeated (three types, \sectref{sec:7.4.4}, \sectref{sec:7.4.5}, \sectref{sec:7.5.2}). The irrealis world is the realm of desire and will and the unknown world of the future. Within that world, verbs in Moloko are marked as to the degree of desire and perhaps the control the speaker has over the accomplishment of the event.\is{Tense, mood, and aspect!Irrealis mood|)}
There is no system of tense as such in Moloko \citep{FriesenMamalis2008}.\footnote{\citet{Bow1997c} considered tense and mood.} Perfective versus Imperfective aspect is expressed through\is{Tense, mood, and aspect!Perfective aspect} changes in the tone of the subject prefix (\sectref{sec:7.4.1} and \sectref{sec:7.4.2}). Irrealis mood is differentiated from realis mood by vowel changes in the subject prefix (\sectref{sec:7.4.3}). For the imperative (\sectref{sec:7.4.2}), the subject prefix is absent.
The verb stem as defined in \chapref{chap:6} can take up to two prefixes and only one suffix. Morphemes on the stem include the subject pronominal affixes (a prefix and a suffix for {\oneP} and {\twoP} subjects, \sectref{sec:7.3.1}) and an indirect object pronominal enclitic (\sectref{sec:7.3.1.1}). Two prefixes are derivational -- one prefix nominalises the verb (\sectref{sec:7.6}) and the other subordinates the entire clause in which it occurs (\sectref{sec:7.7}).
Another noteworthy feature is that Moloko has three ways to indicate repeated actions. Reduplication in the root is one of the ways that pluractionals are formed in other Chadic languages \citep{Newman1990}. Contrary to many Chadic languages, Moloko does not have a productive pluractional. Only a few verb stems take the pluractional extension (used for actions that are made up of repetitive motions, \sectref{sec:7.5.2}).\footnote{The only stems which take the pluractional which we have so far identified are \textit{a-h=aya} ‘he/she grinds,’ \textit{a-s=əya} ‘he/she cuts,’ and \textit{d=əya} ‘take many’.} However, two kinds of reduplication of the verb stem in Moloko express iterative aspect. Reduplication of a consonant in the stem indicates an iterative action that is habitual (\sectref{sec:7.4.4}) and reduplication of the entire verb word indicates an iterative action that is intermittent (\sectref{sec:7.4.5}).
\largerpage
The verbal extensions, which include locational and directional information and Perfect aspect, are also described in this chapter (\sectref{sec:7.5}). They and the indirect object pronominal enclitic are discussed as part of the verb complex because they form a close phonological unit with the verb stem, even though they may sometimes be part of a separate phonological word.
\section{The phonological structure of the verb word}\label{sec:7.1}%%\is{Clitics!Verb clitics}
\hypertarget{RefHeading1212101525720847}{}
The phonological structure of the Moloko verb word is interesting in that, although its elements can each be part of a phonological unit with the verb stem, combinations of different elements can cause the entity to be broken into up to three phonological words. Its complexity is especially located in the post-verbal elements of the verb complex. The subject prefix and verb stem are the only necessary parts of the basic inflected verb complex.\footnote{The structure of the nominalised or dependent forms of the verb is similar. The derivational prefixes are in the same location as the subject prefix. All other affixes and extensions are possible with the exception of the Perfect extension. } All other affixes and extensions are structurally optional and are determined by the context and the lexical requirements of the particular verb.
\citet{FriesenMamalis2008} discovered that Moloko has three types of verb complexes. The first type of verb complex is one phonological word (\figref{fig:7.1}), and occurs when there is no plural suffix (see \sectref{sec:7.3.1}), no indirect object pronominal enclitic (see \sectref{sec:7.3.1.1}), and no direct object pronominal (see \sectref{sec:7.3.2}). In this case, the extensions (see \sectref{sec:7.5}) cliticise directly to the verb stem.
\begin{figure}
\resizebox{\textwidth}{!}{%
\begin{tikzpicture}[baseline]
\node at (0,0) [draw] (stem) {\strut Verb stem};
\node[above=.25\baselineskip of stem] (word) {\strut Verb word};
\node[right=1em of stem] (adpo) {\strut =adpositional\is{Adpositionals}};
\node[right=1em of adpo] (dir) {\strut =directional\is{Directionals}};
\node[right=1em of dir] (perf) {\strut =Perfect\is{Tense, mood, and aspect!Perfect}};
\node[left=1em of stem] (irr) {\strut Irrealis--\is{Tense, mood, and aspect!Irrealis mood}};
\node[left=1em of irr] (subj) {\strut subject+\textsc{aspect}--};
\node[above left=.25\baselineskip and .1mm of subj] (left) {\strut};
\node[above right=.25\baselineskip and .1mm of perf] (right) {\strut};
\draw[thick, -{Stealth}] (word) -- (left);
\draw[thick, -{Stealth}] (word) -- (right);
\end{tikzpicture}}
\caption{\label{fig:7.1}One phonological word verb complex}
\end{figure}
In the examples, the verb word is delineated by square brackets.
\ea\label{ex:7:1}
Gaka ala.\\
\gll {}[g=aka=ala ]\\
do[{\twoS}.{\IMP}]=on=to\\
\glt ‘Put some more on!’\footnote{Note that the verb stem is /g -j\textsuperscript{e}/. The palatalisation drops with the extensions.} (lit. do on towards)
\z
\ea\label{ex:7:2}
Alala va.\\
\gll {}[à-l=ala=va ]\\
\oldstylenums{3}\textsc{s}+{\PFV}-go=to={\PRF}\\
\glt ‘He came back.’
\z
The second type necessitates two phonological words -- a verb word and an ‘extension word’ – because of the presence of either a direct or indirect object pronominal (or both). The verb word may have either a subject suffix or an indirect object pronominal enclitic (but not both). The structure of this second verb complex is illustrated in \figref{fig:7.2}.
\begin{figure}
\resizebox{\textwidth}{!}{%
\begin{tabular}{llcl|llll}
\tikzmark{Fig2L} & & \tikzmark{Fig2wordL} Verb word \tikzmark{Fig2wordR} & \multicolumn{1}{r|}{\tikzmark{Fig2C}} & \multicolumn{2}{l}{Extension word \tikzmark{Fig2extR}} & & \multicolumn{1}{r}{\tikzmark{Fig2R}}\\
\multicolumn{4}{r|}{}\\
subject+\textsc{aspect}- & Irrealis- & \begin{tikzpicture}[remember picture, baseline=(stem.base)] \node [draw] (stem) {Verb stem};\end{tikzpicture} & -\oneP\slash{\twoP} subject\# & direct object & =adpositional & =directional & =Perfect\\
derivational prefix- & & & {}-indirect object & pronominal & & & \\
& & & pronominal\# & \begin{tikzpicture}[remember picture]
\draw [overlay,thick, -{Stealth}] (Fig2wordL.north) -- (Fig2L.north east);
\draw [overlay,thick, -{Stealth}] (Fig2wordR.north) -- (Fig2C.north west);
\draw [overlay,thick, -{Stealth}] (Fig2extR.north) -- (Fig2R.north east);
\end{tikzpicture}\\
\end{tabular}}
\caption{Two phonological word verb complex\label{fig:7.2}\label{fig:12}}
\end{figure}
The word break is initiated by both the direct and indirect object pronominals such that when either is present, there will be a word break. The word break after the \oldstylenums{3}\textsc{s} indirect object pronominal enclitic is indicated by word-final changes in /n/; in slow speech the \oldstylenums{3}\textsc{s} indirect object pronominal enclitic /=\textit{an }/ is pronounced [aŋ] (showing word-final changes) even when there are other clitics following the verb word (\ref{ex:7:3}, see \sectref{sec:7.3.1.1}). The word break before the \oldstylenums{3}\textsc{s} \DO pronominal is indicated by the fact that the \oldstylenums{3}\textsc{s} \DO pronominal does not neutralise the prosody on the verb stem, and does not cause the /-j/ suffix to drop (\ref{ex:7:4}--\ref{ex:7:5}, see \sectref{sec:7.3.2}).\footnote{The first line in each example is the orthographic form. The second is the phonetic form (slow speech) with morpheme breaks.}
\ea\label{ex:7:3}
Ambaɗan aka alay.\\
verb word \hspace{50pt} ‘extension word’\\
\gll [à-mbaɗ=aŋ] \hspace{5pt} [=aka=alaj]\\
\oldstylenums{3}\textsc{s}+{\PFV}-change=\oldstylenums{3}\textsc{s}.{\IO} \hspace{5pt} =on=away\\
\glt ‘He/she replied.’ (lit. he changed on away)
\z
\ea\label{ex:7:4}
Aslay na.\\
\gll {}[à-ɬ{}-aj] \hspace{35pt} [na]\\
\oldstylenums{3}\textsc{s}+{\PFV}-slay-{\CL} {} \oldstylenums{3}\textsc{s}.{\DO}\\
\glt ‘He killed it.’
\z
\ea\label{ex:7:5}
Ege na.\\
\gll {}[\`ɛ-g-ɛ] \hspace{45pt} [na]\\
\oldstylenums{3}\textsc{s}+{\PFV}-do -{\CL}{} \oldstylenums{3}\textsc{s}.{\DO}\\
\glt ‘He did it.’
\z
When there is no indirect object pronominal enclitic, the extensions cliticise to the direct object pronominal \REF{ex:7:6}. When both direct and indirect object pronominals are present, again the extensions cliticise to the direct object pronominal \REF{ex:7:7}. When there is an indirect object pronominal enclitic but no direct object pronominal, the extensions form a separate phonological word in and of themselves (\ref{ex:7:8}, see also \ref{ex:7:3}).
\ea\label{ex:7:6}
Abək ta aya va məlama ahan ahay jəyga.\\
verb word \hspace{9pt} ‘extension word’\\
\gll {}[a-bək] \hspace{15pt} [ta=aja=va] \hspace{10pt} məlama=ahaŋ=ahaj dzijga\\
\oldstylenums{3}\textsc{s}-invite \hspace{15pt} \oldstylenums{3}\textsc{p}.{\DO}={\PLU}={\PRF} {} brothers=\oldstylenums{3}\textsc{p}.{\POSS}=Pl all\\
\glt ‘He had already invited all of his brothers.’
\z
\ea\label{ex:7:7}
Akaɗaw na va. \\
verb word \hspace{36pt} ‘extension word’\\
\gll\relax [à-kaɗ=aw] {} [na=va]\\
\oldstylenums{3}\textsc{s}+{\PFV}-club ={\oneS}.{\IO} \oldstylenums{3}\textsc{s}.{\DO}={\PRF}\\
\glt ‘He/she has killed it for me.’
\z
\ea\label{ex:7:8}
Hor agaw aka ala.\\
\hspace{30pt} verb word \hspace{28pt} ‘extension word’\\
\gll hʷɔr [à-g=aw] [=aka=ala]\\
woman \oldstylenums{3}\textsc{s}+{\PFV}-do=\oldstylenums{3}\textsc{s}.{\IO} =on=to\\
\glt ‘The woman liked me [as I liked her].’ (lit. she did to me on toward)
\z
The third type of verb complex consists of three phonological words (a verb word, an ‘indirect object word,’ and an ‘extension word’). This type occurs when the verb complex has both a subject suffix and an indirect object pronominal enclitic. Phonological rules will not allow two morphemes suffixed or cliticised to the verb; nor can the indirect object pronominal enclitic commence another word. So, the morpheme \textit{an} is inserted and the indirect object pronominal clitic attaches to the inserted morpheme. The overall structure is then as shown in \figref{fig:7.3}.
\begin{figure}
\resizebox{\textwidth}{!}{%
\begin{tabular}{llll|l|llll}
\tikzmark{Fig3L} & \multicolumn{2}{c}{\tikzmark{Fig3wordL} Verb word \tikzmark{Fig3wordR}} \tikz[remember picture] \draw[overlay, thick, {Stealth}-] (Fig3L.north west) -- (Fig3wordL.north); & \multicolumn{1}{r|}{\tikzmark{Fig3VWR}\tikz[remember picture] \draw[overlay, thick, {Stealth}-] (Fig3VWR.north east) -- (Fig3wordR.north);} & Indirect object word & \multicolumn{3}{l}{Extension word \tikzmark{Fig3Ext}} & \multicolumn{1}{r}{\tikzmark{Fig3R}} \tikz[remember picture] \draw[overlay, thick, {Stealth}-] (Fig3R.north east) -- (Fig3Ext.north);\\
% & \multicolumn{2}{c}{} & & & \\
\multicolumn{4}{r|}{} & & \\
subject & Irrealis- & \tikz[baseline=(verbstem.base)] \node [draw] (verbstem) {\strut Verb stem}; & -\oneP\slash{\twoP} & an=indirect & direct object & =adpositional & =directional & =Perfect\\
+\textsc{aspect}- & & & subject\# & object pronominal\# & pronominal & & & \\
\end{tabular}}
\caption{Three phonological word verb complex\label{fig:7.3}}
\end{figure}
In \REF{ex:7:9} and \REF{ex:7:10}, the verb \textit{kəslom }has the {\twoP} imperative suffix attached (\textit{-om}). The indirect object pronominal enclitic and the inserted morpheme \textit{an}. Other extensions must make a third phonological word since there is a word break following the indirect object pronominal enclitic.
\ea\label{ex:7:9}
Kəslom anan na aka awak.\\
verb word \hspace{10pt} ‘indirect object word’ \hspace{10pt} ‘extension word’\\
\gll [kʊ-ɬ-ɔm] \hspace{15pt} [an=aŋ] \hspace{60pt} [na=aka] \hspace{30pt} awak\\
2-slay-{\twoP} \hspace{15pt} {\DAT}=\oldstylenums{3}\textsc{s}.{\IO} \hspace{60pt} \oldstylenums{3}\textsc{s}.{\DO}=on \hspace{30pt} goat\\
\glt ‘You (p) kill another goat for him.’ (lit. you slay a goat for him on top of [another time a goat was slain]
\z
\ea\label{ex:7:10}
Kəslom anan aka awak.\\
verb word \hspace{10pt} ‘indirect object word’ \hspace{10pt} ‘extension word’\\
\gll {}[kʊ-ɬ-ɔm] \hspace{15pt} [an=aŋ] \hspace{60pt} [=aka] \hspace{50pt} awak\\
2-kill-{\twoP} \hspace{15pt} {\DAT}=\oldstylenums{3}\textsc{s}.{\IO} \hspace{60pt} =on \hspace{50pt} goat\\
\glt ‘You kill another goat for him.’
\z
The three types of verb complexes seen in Moloko are shown in \figref{tab:53}.
\begin{figure}
\resizebox{\textwidth}{!}{\begin{tabular}{llllllllll}
1 & \tikzmark{Fig4-1L} & \multicolumn{4}{c}{\tikzmark{Fig4-1VWL} Verb word \tikzmark{Fig4-1VWR}} & \multicolumn{1}{r}{\tikzmark{Fig4-1R}}
\begin{tikzpicture}[remember picture]
\draw[overlay, thick, {Stealth}-] (Fig4-1L.north west) -- (Fig4-1VWL.north);
\draw[overlay, thick, -{Stealth}] (Fig4-1VWR.north) -- (Fig4-1R.north east);
\end{tikzpicture}\\
\\
& subject+\textsc{aspect}- & Irrealis- & \begin{tikzpicture}[remember picture, baseline=(stem.base)] \node [draw] (stem) {Verb stem};\end{tikzpicture} & =adpositional & =directional & =Perfect\\
\\
2 & \tikzmark{Fig4-2L} & & \tikzmark{Fig4-2wordL} Verb word \tikzmark{Fig4-2wordR} & \multicolumn{1}{r|}{\tikzmark{Fig4-2C}} & \multicolumn{2}{l}{Extension word \tikzmark{Fig4-2extR}} & & \multicolumn{1}{r}{\tikzmark{Fig4-2R}}\\
& \multicolumn{4}{r|}{}\\
& subject+\textsc{aspect}- & Irrealis- & \begin{tikzpicture}[remember picture, baseline=(stem.base)] \node [draw] (stem) {Verb stem};\end{tikzpicture} & \multicolumn{1}{l|}{-\oneP\slash{\twoP} subject\#} & direct object & =adpositional & =directional & =Perfect\\
& derivational prefix- & & & \multicolumn{1}{l|}{-indirect object\#} & pronominal & & & \\
\begin{tikzpicture}[remember picture]
\draw[overlay, thick, -{Stealth}] (Fig4-2wordL.north) -- (Fig4-2L.north west);
\draw[overlay, thick, -{Stealth}] (Fig4-2wordR.north) -- (Fig4-2C.north east);
\draw[overlay, thick, -{Stealth}] (Fig4-2extR.north) -- (Fig4-2R.north east);
\end{tikzpicture}\\
3 &\tikzmark{Fig4-3L} & \multicolumn{2}{c}{\tikzmark{Fig4-3wordL} Verb word \tikzmark{Fig4-3wordR}} \tikz[remember picture] \draw[overlay, thick, {Stealth}-] (Fig4-3L.north west) -- (Fig4-3wordL.north); & \multicolumn{1}{r|} {\tikzmark{Fig4-3VWR}\tikz[remember picture] \draw[overlay, thick, {Stealth}-] (Fig4-3VWR.north east) -- (Fig4-3wordR.north);} & \multicolumn{1}{l|}{Indirect object word} & \multicolumn{3}{l}{Extension word \tikzmark{Fig4-3Ext}} & \multicolumn{1}{r}{\tikzmark{Fig4-3R}} \tikz[remember picture] \draw[overlay, thick, {Stealth}-] (Fig4-3R.north east) -- (Fig4-3Ext.north);\\
% & \multicolumn{2}{c}{} & & & \\
& \multicolumn{4}{r|}{} & \multicolumn{1}{r|}{}& \\
& subject +\textsc{aspect}- & Irrealis- & \tikz[baseline=(verbstem.base)] \node [draw] (verbstem) {\strut Verb stem}; & \multicolumn{1}{l|}{-\oneP\slash{\twoP} subject\#} & \multicolumn{1}{l|}{an=indirect} & direct object & =adpositional & =directional & =Perfect\\
& & & & \multicolumn{1}{r|}{} & \multicolumn{1}{l|}{object pronominal\#} & pronominal & & & \\
\end{tabular}}
\caption{Three types of verb complexes\label{tab:53}}
\end{figure}
\section{Imperative}\label{sec:7.2}
\hypertarget{RefHeading1212121525720847}{}
The {\twoS} imperative form is the basic citation form of the verb as the {\twoS} form gives the clearest presentation of the verb stem. The imperative occurs in {\twoS}, \oldstylenums{1}\textsc{Pin} and {\twoP} forms. The {\twoS} form is simply the verb stem. The plural\is{Plurality!Verb plurals|(} forms carry suffixes which correspond to their respective subject pronominal suffixes in indicative verb stems (see \sectref{sec:7.3.1}). The singular and plural imperative forms are shown in \tabref{tab:54}. (from \citealt{FriesenMamalis2008}).
\begin{table}
\begin{tabular}{lll}
\lsptoprule
{\twoS} form & {\oneP inclusive form} & {{\twoP} form}\\\midrule
\textit{faɗ} & \textit{fəɗ{}-ok} & \textit{fəɗ{}-om}\\
‘Put! ({\twoS})’ & ‘Let’s put! (\oldstylenums{1}\textsc{Pin})’ & ‘Put! ({\twoP})’\\\midrule
\textit{zom}~ & \textit{zəm-ok} & \textit{zəm-om}\\
‘Eat! ({\twoS})’ & ‘Let’s eat! (\oldstylenums{1}\textsc{Pin})’ & ‘Eat! ({\twoP})’\\\midrule
\textit{s{}-e} & \textit{s-ok} & \textit{s-om}\\
‘Drink! ({\twoS})’ & ‘Let’s drink! (\oldstylenums{1}\textsc{Pin})’ & ‘Drink! ({\twoP})’\\\midrule
\textit{fat-ay} & \textit{fot-ok} & \textit{fot-om}\\
‘Descend! ({\twoS})’ & ‘Let’s descend! (\oldstylenums{1}\textsc{Pin})’ & ‘Descend! ({\twoP})’\\
\lspbottomrule
\end{tabular}
\caption{Singular and plural imperative forms\label{tab:54}}
\end{table}
\section{Verb complex pronominals}\label{sec:7.3}
\hypertarget{RefHeading1212141525720847}{}
\citet{FriesenMamalis2008} showed that the verb complex can carry pronominals that indicate the subject, direct object, and indirect object. These markers in the verb complex are all bound forms. They are called pronominals and not just agreement markers because all of them can be the only indication of their referent in the clause. Because the pronominals are present, there is no need for a noun phrase or free pronoun in the clause. Participants are tracked\is{Cohesion!Participant tracking} in discourse solely by pronominals, and free pronouns and noun phrases only occur in discourse to introduce a participant or to switch the referent.
\tabref{tab:55} lists all the pronominals. Subject is indicated by a verbal prefix for singular subjects and third person plural. Plural subjects for first and second person are indicated by a combination of a prefix and a suffix. These subject pronominals (discussed in \sectref{sec:7.3.1}) are given in their underlying form because the surface vowel and tone on the prefix is determined by mood and aspect, respectively. Also, the underlying form is given to show the prosody, because the labialisation prosody in the plural subject suffixes will spread over the entire verb stem. The direct object pronominal (\sectref{sec:7.3.2}) only occurs for third person singular and plural. The indirect object pronominal (\sectref{sec:7.3.1.1}) cliticises to the right edge of the verb stem and the direct object pronominal follows it. In \tabref{tab:55}, the independent pronouns are also given for comparison since there are similarities between the free pronoun and its corresponding pronominal.
\begin{table}
\resizebox{\textwidth}{!}{\begin{tabular}{lllll}
\lsptoprule
{Person} & {Pronominal} & {Indirect object} & {Third person} & {Independent}\\
& {subject affixes} & {pronominal } & {direct object} & {pronouns}\\
& & {enclitics} & {pronominals} & \\\midrule
{{\oneS}} & \textit{n-} & =\textit{aw} & & \textit{ne}\\
{{\twoS}} & \textit{k-} & =\textit{ok} & & \textit{nok}\\
{\oldstylenums{3}\textsc{s}} & \textit{a-} / \textit{ma-}\footnote{The third person Hortative subject pronominal, see \tabref{tab:65} in \sectref{sec:7.4.3}.} & \textit{=an} & \textit{na} & \textit{ndahan}\\
{\oneP inclusive\footnote{i.e. speaker (+others) + hearer} } & \textit{m-}…-\textit{ok} & \textit{=aloko} & & \textit{loko}\\
{\oneP exclusive\footnote{i.e. speaker + others}} & \textit{n-}…-\textit{om} & \textit{=aləme} & & \textit{ləme}\\
{{\twoP}} & \textit{k-}…-\textit{om} & \textit{=aləkwəye} & & \textit{ləkwəye}\\
{\oldstylenums{3}\textsc{p}} & \textit{t-} & \textit{=ata} & \textit{ta} & \textit{təta}\\
\lspbottomrule
\end{tabular}}
\caption{Pronominals}\label{tab:55}
\end{table}
\subsection{Subject pronominal affixes}\label{sec:7.3.1}
\hypertarget{RefHeading1212161525720847}{}
The subject is always marked on the finite form of the verb, regardless of whether there is a free subject phrase in the clause.\footnote{The presence of both subject pronominal and corresponding noun phrase occurs for pragmatic reasons.} In fact, the subject pronominal marker in the verb can be the only indication of subject in the entire clause.\footnote{In a non-finite verb form, the subject pronominal is absent and the subject of the clause is either understood from the context or indicated by a free pronoun or noun phrase in the clause (Sections \ref{sec:7.6.2}, \ref{sec:7.7}, and \ref{sec:8.2.3}).} As noted in \tabref{tab:56} and \tabref{tab:57} (adapted from \citealt{FriesenMamalis2008}), subject is marked by a prefix or combination of prefix and suffix. In the examples below, the pronominal affixes are bolded. The prefix carries aspectual tone (see \sectref{sec:7.4}), and the vowel quality is influenced by the prosody on the verb stem (see \sectref{sec:6.6}), the presence of the /a-/ prefix (see \sectref{sec:6.5}), and the mood of the verb (see \sectref{sec:7.4.3}). The \oneP and {\twoP} suffixes are labialised. This prosody will spread over the entire verb stem.
\begin{table}
\begin{tabular}{lll}
\lsptoprule
{Person} & {Singular} & {Plural}\\\midrule
{1} & \textit{\textbf{nə-}mənjar awak} & \textbf{\textit{mə-}}\textit{mənjor}\textbf{\textit{{}-ok}} \textit{awak}\\
& ‘I saw a goat’ & ‘we (inclusive) saw a goat’\\
& & \textbf{\textit{nə-}}\textit{mənjor}\textbf{\textit{{}-om}} \textit{awak}\\
& & ‘we (exclusive) saw a goat’\\\midrule
{2} & \textbf{\textit{kə-}}\textit{mənjar awak} & \textbf{\textit{kə-}}\textit{mənjor-}\textbf{\textit{om}} \textit{awak}\\
& ‘you saw a goat’ & ‘you (plural) saw a goat’\\\midrule
{3} & \textbf{\textit{a-}}\textit{mənjar awak} & \textbf{\textit{tə-}}\textit{mənjar awak}\\
& ‘he/she saw a goat’ & ‘they saw a goat’\\
\lspbottomrule
\end{tabular}
\caption{Conjugations with subject pronominal affixes for /m nzar/ ‘see’}\label{tab:56}
\end{table}
\begin{table}
\begin{tabular}{lll}
\lsptoprule
{Person} & {Singular} & {Plural}\\\midrule
{1} & \textbf{\textit{nə-}}\textit{həm-ay} & \textbf{\textit{mə{}-}}\textit{həm}\textbf{\textit{{}-ok}} \\
& ‘I ran’ & ‘we (inclusive) ran’\\
& & \textbf{\textit{nə{}-}}\textit{həm}\textbf{\textit{{}-om}} \\
& & ‘we (exclusive) ran’\\\midrule
{2} & \textbf{\textit{kə-}}\textit{həm-ay} & \textbf{\textit{kə{}-}}\textit{həm}\textbf{\textit{{}-om}}\\
& ‘you ran’ & ‘you (plural) ran’\\\midrule
{3} & \textbf{\textit{a-}}\textit{həm-ay} & \textbf{\textit{tə-}}\textit{həm-ay}\\
& ‘he/she ran’ & ‘they ran’\\
\lspbottomrule
\end{tabular}
\caption {\label{tab:57} Conjugations with subject pronominal affixes for /h m-j/ ‘run’}
\end{table}
\largerpage \citet{Bow1997c} found that a prosody on the verb stem will spread leftwards from the verb stem over the singular subject prefixes. The fact that palatalisation and labialisation spread over the subject prefixes indicates that the subject markers are fully bound to the verb stem and are not separate words. \REF{ex:7:11} presents the palatalised verb /g\textsuperscript{ e}/ ‘do,’ and \REF{ex:7:12} presents the labialised verb /l\textsuperscript{o}/ ‘go.’
\ea\label{ex:7:11}
Nege.\\
\gll {}[nɛ\textbf{{}-}g-ɛ]\\
{\oneS}-do-{\CL}\\
\glt ‘I did.’
\z
\ea\label{ex:7:12}
Olo.\\
\gll {}[ɔ\textbf{{}-}lɔ]\\
\oldstylenums{3}\textsc{s}-go\\
\glt ‘he/she went.’
\z
\citet{Bow1997c} also discovered that labialisation on the {\oneP} and {\twoP} subject suffixes will spread leftwards from the suffix onto the entire verb word. This fact indicates that these morphemes are fully bound to the verb stem and are not separate words. The verb /ts k-j\textsuperscript{e}/ ‘stand’, shown in example \REF{ex:7:13} in its {\oneS} form, loses its palatalisation and becomes labialised when the (labialised) plural\is{Plurality!Verb plurals|)} suffixes are added \REF{ex:7:14}:
\ea\label{ex:7:13}
Necəke.\\
\gll nɛ\textbf{{}-}tʃɪk-ɛ\\
{\oneS}-stand-{\CL} \\
\glt ‘I stand.’
\z
\ea\label{ex:7:14}
Nəcəkom. \\
\gll nʊ\textbf{{}-}tsʊkʷ{}-ɔm\\
{\oneS}-stand-\oldstylenums{1}\textsc{Pex}\\
\glt ‘We (exclusive) stand.’
\z
\citet{Bow1997c} also determined that the subject pronominal prefixes in Moloko appear to be toneless. The aspect of the verbal construction will allocate tone to the pronoun. In the Imperfective aspect\is{Tense, mood, and aspect!Imperfective aspect}, the pronoun always takes high tone (see \sectref{sec:7.4.2}). In the Perfective aspect\is{Tense, mood, and aspect!Perfective aspect}, the pronoun copies the first tone of the root if it is low or mid. If the first tone of the root is high, the pronoun takes on mid tone.
\subsection{Indirect object pronominal enclitic}\label{sec:7.3.1.1}
An indirect object pronominal enclitic can attach to the verb word to express the indirect object, which is a core argument of the verb. The indirect object in Moloko is the participant that represents the place where the direct object is directed to – the recipient or beneficiary of the action.\footnote{Employing the Agent-Theme-Location analysis developed by \citet{DeLancey1991}, the indirect object in Moloko expresses the semantic \LOC (see \chapref{chap:9}). The direct object pronominal expresses the semantic Theme -- the participant that changes position or state (see \sectref{sec:7.3.2}).} In \REF{ex:7:15}, the verb /dz -j/ ‘help’ takes the indirect object. The indirect object represents the participant who receives the help.
\ea\label{ex:7:15}
Ajənaw.\\
\gll a-dzən=aw\\
\oldstylenums{3}\textsc{s}-help={\oneS}.{\IO}\\
\glt ‘He/she helped me.’
\z
The indirect object pronominal enclitic allows the core indirect object argument to be expressed in a prepositional phrase \textit{ana Mana} ‘to Mana’ \REF{ex:7:16}.
\ea\label{ex:7:16}
Ajənan ana Mana.\\
\gll a-dzən=aŋ ana Mana\\
\oldstylenums{3}\textsc{s}-help=\oldstylenums{3}\textsc{s}.{\IO} {\DAT} Mana\\
\glt ‘He/she helped Mana.’
\z
The indirect object pronominal enclitic can also stand in the place of the prepositional phrase \REF{ex:7:17}.
\ea\label{ex:7:17}
Ajən\textbf{an}.\\
\gll a-dzən\textbf{=aŋ}\\
\oldstylenums{3}\textsc{s}-help=\oldstylenums{3}\textsc{s}.{\IO}\\
\glt ‘He/she helped him.’
\z
\tabref{tab:7.58} (adapted from \citealt{FriesenMamalis2008}) shows the verb /v l/ ‘give’ conjugated for the indirect object argument. The indirect object expresses the recipient.
\begin{table}
\begin{tabular}{lll}
\lsptoprule
{Person} & {Singular} & {Plural}\\\midrule
{1} & \textit{a-vəl\textbf{=aw}} & \textit{a-vəl}\textbf{\textit{=aloko}}\\
& ‘he/she gave to me’ & ‘he/she gave to us (inclusive)’\\
& & \textit{a-vəl}\textbf{\textit{=aləme}}\\
& & ‘he/she gave to us (exclusive)’\\\midrule
{2} & \textit{a-vəl}\textbf{\textit{=ok}} & \textit{a-vəl}\textbf{\textit{=aləkwəye}}\\
& ‘he/she gave to you’ & ‘he/she gave to you (plural)’\\\midrule
{3} & \textit{a-vəl}\textbf{\textit{=an}} & \textit{a-vəl}\textbf{\textit{=ata}}\\
& ‘he/she gave to him/her’ & ‘he/she gave to them’\\
\lspbottomrule
\end{tabular}
\caption{Verb /v l/ ‘give’ conjugated for indirect object pronominal enclitic\label{tab:7.58}}
\end{table}
The indirect object pronominal enclitics are phonologically bound to the verb stem and do not comprise separate words. When an indirect object pronominal cliticises to the verb stem, there are no word-final alternations in the verb stem. Compare the following pairs of examples showing verb stems with and without indirect object pronominal enclitics. When the indirect object pronominal enclitic is attached \REF{ex:7:19}, there is no word-final alternation of /h/ → [x] / \_\#.\footnote{See \sectref{sec:2.6.1}, c.f. \REF{ex:7:18}. Likewise, we do not see the word-final process of n → [ŋ] / \_\# between the verb stem and the indirect object pronominal.}
\ea\label{ex:7:18}
Aɓah zana.\\
\gll a-ɓax zana\\
\oldstylenums{3}\textsc{s}-sew clothing\\
\glt ‘He/she sews clothing.’
\z
\ea\label{ex:7:19}
Aɓahaw zana. \\
\gll a-ɓah=aw zana \\
\oldstylenums{3}\textsc{s}{}-sew={\oneS}.{\IO} clothing \\
\glt ‘He/she sews clothing for me.’
\z
Similarly, the example pairs \REF{ex:7:20} and \REF{ex:7:21} illustrate that the /-j/ suffix is dropped when the indirect object pronominal is present \REF{ex:7:21}, indicating that the pronominal is phonologically bound to the stem (see \sectref{sec:6.3}).
\ea\label{ex:7:20}
Ajay.\\
\gll a-dz-aj\\
\oldstylenums{3}\textsc{s}-speak{}-{\CL}\\
\glt ‘He/she speaks.’
\z
\ea\label{ex:7:21}
Ajan.\\
\gll a-dz=aŋ\\
\oldstylenums{3}\textsc{s}-speak=\oldstylenums{3}\textsc{s}.{\IO}\\
\glt ‘He/she speaks to him/her.’
\z
The indirect object pronominal enclitic is not phonologically a true suffix, because the prosody of the indirect object pronominal enclitic does not affect the prosody on the verb stem. Compare \REF{ex:7:22} and \REF{ex:7:23} which illustrate the verb stem /s/ conjugated with second person singular and plural indirect objects. If the prosody of the indirect object pronominal enclitic affected the verb stem, one would expect that the /s/ in example \REF{ex:7:23} would be affected by the palatalisation prosody of the plural indirect object pronominal enclitic and be expressed as [ʃ].
\ea\label{ex:7:22}
Asok aka ɗaf.\\
\gll a-s=ɔk =aka ɗaf\\
\oldstylenums{3}\textsc{s}-please={\twoS}.{\IO} =on {millet loaf}\\
\glt ‘You want to have more millet loaves.’ (lit. millet loaf is pleasing to you)
\z
\ea\label{ex:7:23}
Asaləkwəye aka ɗaf.\\
\gll a-s=alʊkʷøjɛ =aka ɗaf\\
\oldstylenums{3}\textsc{s}-please={\twoP}.{\IO} =on {millet loaf}\\
\glt ‘You want to have more millet loaves.’ (lit. millet loaf is pleasing to you)
\z
The fact that the indirect object pronominal can attach to verb stems as well as other particles confirms that it is in fact a clitic pronoun. Normally, the indirect object pronominal enclitic attaches directly to the verb stem \REF{ex:7:24}. However, if the plural subject pronominal suffix is required on the verb \REF{ex:7:25}, the indirect object pronominal can no longer attach to the verb, because the verb stem can take only one suffix (see \sectref{sec:7.1}). Instead, the indirect object pronominal cliticises to the particle \textit{an}\textit{.} This particle may be related to \textit{ana}, the dative preposition ‘to.’
\ea\label{ex:7:24}
Kasl\textbf{an} awak.\\
\gll ka-ɬ\textbf{=aŋ} awak\\
{\twoS}-slay=\oldstylenums{3}\textsc{s}.{\IO} goat\\
\glt ‘You slay the goat for him.’
\z
\ea\label{ex:7:25}
Kəslom \textbf{anan} awak.\\
\gll kə-ɬ{}-ɔm \textbf{an=aŋ} awak\\
\oldstylenums{2}-slay-{\twoP} to=\oldstylenums{3}\textsc{s}.{\IO} goat\\
\glt ‘You (plural) slay the goat for him.’
\z
There is a word break after the indirect object pronominal enclitic (the phonological words are indicated by square brackets in the examples immediately below). The word break is indicated by the fact that the \oldstylenums{3}\textsc{s} indirect object pronominal enclitic /=\textit{an}/ in slow speech is pronounced [aŋ] even when there are other clitics following the verb word (see \ref{ex:7:26}--\ref{ex:7:27}). The word-final [ŋ] will delete in fast speech (see \sectref{sec:2.5.2}). These clitics (e.g., the adpositional clitics in these examples, see \sectref{sec:7.5.1}) would otherwise attach to the verb (compare with example \ref{ex:7:28}):
\ea\label{ex:7:26}
As\textbf{an} \textbf{aka} ɗaf.\\
\gll {}[a-s\textbf{=aŋ}] [\textbf{=aka}] ɗaf\\
\oldstylenums{3}\textsc{s}-please=\oldstylenums{3}\textsc{s}.{\IO} =on {millet loaf}\\
\glt ‘He/she wants to have more millet loaves.’ (lit. millet loaf is pleasing to him)
\z
\ea\label{ex:7:27}
Ad\textbf{an} \textbf{aka} ɗaf.\\
\gll {}[a-d\textbf{=aŋ}] [\textbf{=aka}] ɗaf\\
\oldstylenums{3}\textsc{s}-prepare=\oldstylenums{3}\textsc{s}.{\IO} =on {millet loaf}\\
\glt ‘She made more loaves of millet for him.’
\z
\ea\label{ex:7:28}
Ad\textbf{aka} ɗaf.\\
\gll {}[a-d\textbf{=aka}] ɗaf\\
\oldstylenums{3}\textsc{s}-prepare=on {millet loaf}\\
\glt ‘She made more loaves of millet.’
\z
\subsection{Third person direct object pronominal}\label{sec:7.3.2}
\hypertarget{RefHeading1212181525720847}{}
\tabref{tab:55} (from \citealt{FriesenMamalis2008}) shows the direct object (\DO) pronominals. The third person \DO pronominals replace or double a full noun phrase in a discourse -- the \textit{na} (\oldstylenums{3}\textsc{s}.{\DO}) or \textit{ta} (\oldstylenums{3}\textsc{p}.{\DO}) refer back to something in the immediately preceding context. Examples \REF{ex:7:29} and \REF{ex:7:30} show two clauses that might occur in a discourse. In \REF{ex:7:30} the \textit{na} refers back to \textit{sla} ‘cow’ in \REF{ex:7:29}.
\ea\label{ex:7:29}
Kaslay sla. \\
\gll kà-ɬ{}-aj ɬa \\
{\twoS}+{\PFV}-slay{}-{\CL} cow \\
\glt ‘You slew the cow.’
\z
\ea\label{ex:7:30}
Kaslay \textbf{na}\textbf{.}\\
\gll kà-ɬ{}-aj \textbf{na}\\
{\twoS}+{\PFV}-slay{}-{\CL} \oldstylenums{3}\textsc{s}.{\DO} \\
\glt ‘You slew it.’
\z
A third person \DO pronominal can be the only expression of direct object in a clause if its identity is known in the discourse (\ref{ex:7:30}, \ref{ex:7:32}, and \ref{ex:7:36}). The only time that a clause will contain both a third person \DO pronominal and a noun phrase that co-refer to the direct object in the clause is when a special focus on the direct object is required (‘all his brothers’ in \ref{ex:7:31}, ‘that fruit-bearing tree’ in \ref{ex:7:38}).
\ea\label{ex:7:31}\corpussource{Race Story (\citealt{Friesen2003}).}\\
Moktonok na, abək \textbf{ta} aya va məlama ahan ahay jəyga.\\
\gll mɔkʷtɔnɔkʷ na a-bək \textbf{ta}=aja=va məlama=ahaŋ=ahaj dzijga\\
toad {\PSP} \oldstylenums{3}\textsc{s}-invite \oldstylenums{3}\textsc{p}={\PLU}={\PRF} brothers=\oldstylenums{3}\textsc{p}.{\POSS}=Pl all\\
\glt ‘The toad, he had already invited all of his brothers.’
\z
We know that the third person \DO pronominals are phonologically separate words (not clitics like the other verbal extensions) because the /-j/ suffix does not drop when the \DO pronominal is added to a clause \REF{ex:7:32}. Normally the /-j/ suffix drops off when extensions or suffixes are added to the clause (\ref{ex:7:33}, see also \sectref{sec:6.3}).
\ea\label{ex:7:32}
Apaɗay \textbf{na}. \\
\gll a-paɗ-aj \textbf{na} \\
\oldstylenums{3}\textsc{s}-crunch{}-{\CL} \oldstylenums{3}\textsc{s}.{\DO}\\
\glt ‘He/she crunches it.’
\z
\ea\label{ex:7:33}
Apaɗaka.\\
\gll a-paɗ=aka\\
\oldstylenums{3}\textsc{s}-crunch=on\\
\glt ‘He/she crunches on.’
\z
Another indication that the \DO pronominal is phonologically a separate word is that the neutral prosody on the \DO pronominal does not affect the prosody of the verb word. Compare \REF{ex:7:34} and \REF{ex:7:35}. In both examples the verb complex is palatalised in spite of the addition of the \DO pronominal. This situation is in contrast to what happens with the Perfect enclitic (see \sectref{sec:7.5.3}).
\ea\label{ex:7:34}
Nese.\\
\gll nɛ-ʃ{}-ɛ\\
{\oneS}-drink{}-{\CL}\\
\glt ‘I drink.’
\z
\ea\label{ex:7:35}
Nese na.\\
\gll nɛ-ʃ{}-ɛ na\\
{\oneS}-drink{}-{\CL} \oldstylenums{3}\textsc{s}.{\DO}\\
\glt ‘I drink it.’
\z
A third indication is that word-final changes (like word-final /n/ being realised as [ŋ] (see \sectref{sec:2.6.1} and example \ref{ex:7:36}) are preserved when followed by \textit{na} or \textit{ta}.
\ea\label{ex:7:36}
Nəvəlan \textbf{na}. \\
\gll nə-vəl=aŋ \textbf{na} \\
{\oneS}-give=\oldstylenums{3}\textsc{s}.{\IO} \oldstylenums{3}\textsc{s}.{\DO}\\
\glt ‘I gave it to him.’
\z
The normal slot for the \DO pronominal is within the verb complex between the verb stem and the directional extension. In each example below, the verb complex is delineated by square brackets and the third person \DO pronominal is bolded.
\ea\label{ex:7:37}
Baba ango avəlan \textbf{na} alay ana məze.\\
\gll baba=aŋgʷɔ [a-vəl=aŋ \textbf{na}=alaj] ana mɪʒɛ\\
father={\twoS}.{\POSS} \oldstylenums{3}\textsc{s}-give=\oldstylenums{3}\textsc{s}.{\IO} \oldstylenums{3}\textsc{s}.{\DO}=away {\DAT} person\\
\glt ‘Your father gave it to that person.’
\z
Any further verbal extensions will cliticise to a third person \DO pronominal. In example \REF{ex:7:38}, the directional extension \textit{=ala} ‘toward’ cliticises to \textit{na} and vowels will elide resulting in the pronunciation [nala]. See also example \REF{ex:7:31}, where the pluractional and perfect extensions \textit{=}\textit{aya} and \textit{=}\textit{va} cliticise to the \DO pronominal \textit{ta} to result in the pronunciation [tajava].
\ea\label{ex:7:38}\corpussource{Cicada, S. 12}\\
Tolo [təmənjar \textbf{na} ala] mama agwazla nəndəye.\\
\gll tɔ{}-lɔ [tə-mənzar \textbf{na}=ala] mama agʷaɮa nɪndijɛ\\
\oldstylenums{3}\textsc{p}-go \oldstylenums{3}\textsc{p}-see \oldstylenums{3}\textsc{s}.{\DO}=to mother {spp. of tree} {\DEM}\\
\glt ‘They went and saw that fruit-bearing tree.’
\z
\largerpage
The first and second person direct objects are expressed by free pronouns (see \sectref{sec:3.1.1.1}) or noun phrases. The free pronouns are distributionally and phonologically distinct from the third person direct object pronominals. The free pronouns occur after the verb complex. Note that they occur after the directional extensions in \REF{ex:7:39} and \REF{ex:7:40}. In each example, the verb complex is delineated by square brackets and the first or second person independent pronoun is bolded.
\ea\label{ex:7:39}
[Kazalay] \textbf{ne} a kosoko ava ɗaw?\\
\gll {}[ka-z=alaj] \textbf{nɛ} a kɔsɔkʷɔ ava ɗaw\\
{\twoS}-take=away {\oneS} at market in {\textsc{q}}\\
\glt ‘Will you take me to the market?’
\z
\ea\label{ex:7:40}
Baba ango [avəlata] \textbf{nok} va a ahar ata ava\\
\gll baba=aŋgʷɔ [à-vəl=ata] \textbf{nɔkʷ}=va a ahar=atəta ava\\
father={\twoS}.{\POSS} \oldstylenums{3}\textsc{s}-give=\oldstylenums{3}\textsc{p}.{\IO} {\twoS}={\PRF} at hand=\oldstylenums{3}\textsc{p}.{\POSS} in\\
\glt ‘Your father gave you to them’ (lit. your father gave you into their hands)
\medskip
waya aməmbeɗe hor ata.\\
\gll waja amɪ-mbɛɗ-ɛ hʷɔr=atəta\\
because {\DEP}-change-{\CL} woman=\oldstylenums{3}\textsc{p}.{\POSS}\\
\glt ‘to become a wife [for their relative].’ (lit. because to change their woman)
\z
The \oldstylenums{3}\textsc{s} pronominal is employed in discourse to track participants\is{Cohesion!Participant tracking} (along with the subject and indirect object pronominals, see Sections~\ref{sec:7.3.1} and \ref{sec:7.3.1.1}, respectively). Examples \REF{ex:7:41} and \REF{ex:7:42} are from the Snake story (see \sectref{sec:1.4}). The snake is introduced with a noun phrase \textit{gogolvan} ‘snake’ \REF{ex:7:41}. Further on in the narrative, the snake is referred to by the \oldstylenums{3}\textsc{s} \DO pronominal \textit{na} \REF{ex:7:42}.
\ea\label{ex:7:41}\corpussource{Snake story, S. 4}\\
Alala na, gogolvan na, olo alay.\\
\gll a-l=la na gʷɔgʷɔlvaŋ na \`{ɔ}-lɔ=alaj\\
\oldstylenums{3}\textsc{s}-go=to {\PSP} snake {\PSP} \oldstylenums{3}\textsc{s}+{\PFV}-go =away\\
\glt ‘Some time later, the snake went.’
\z
\ea\label{ex:7:42}\corpussource{Snake story, S. 18}\\
Ne dəyday məkəɗe na aka.\\
\gll nɛ {dijdaj} mɪ-kɪɗ-ɛ na=aka \\
{\oneS} \textsc{id}:approximately {\NOM}{}-kill-{\CL} \oldstylenums{3}\textsc{s}.{\DO}=on\\
\glt ‘I clubbed it to death (approximately).’
\z
In a clause where the referent is clear, the \oldstylenums{3}\textsc{s} \DO pronominal \textit{na} can sometimes be left out in a clause. Four consecutive lines from a narrative not illustrated in this work are shown in \REF{ex:7:43}. In the narrative, the head of the household brings home some things he bought at the market. He tells his workers to carry the things into the house. In his instructions \textit{horom alay ayva} ‘carry [all the things] into the house,’ there is no grammatical indication of ‘those things.’ The absence of the \DO pronominal is indicated in the clause by the symbol Ø. In this case, the referent is clear and is not required in the clause.\footnote{The \DO pronominal in Moloko does not function in the way Frajzyngier has postulated for some Chadic languages. \citet{FrajzyngierShay2008} say that the \DO pronoun codes the definiteness of the referent in some Chadic languages. While it is true in Moloko that when the \DO pronominal (or any other pronoun) is used, then the referent is definite, the converse is not true. For example, the referent in \REF{ex:7:43} is definite yet there is no \DO pronominal. }
\ea\label{ex:7:43}
Bahay a hay olo a kosoko ava.\\
\gll bahaj a haj ɔ{}-lɔ a kɔsɔkʷɔ ava \\
chief {\GEN} house \oldstylenums{3}\textsc{s}-go at market in\\
\glt ‘The head of the house went to the market.’\\
\medskip
Askomala ele ahay gam.\\
\gll a-sʊkʷɔm=ala ɛlɛ=ahaj gam\\
\oldstylenums{3}\textsc{s}-buy=to thing=Pl many\\
\glt ‘He bought many things.’\\
\medskip
Awəɗakata ele ngəndəye ana ndam slərele ahan ahay, awəy,\\
\gll a-wuɗak=ata ɛlɛ ŋgɛndijɛ ana ndam ɬɪrɛlɛ=ahaŋ=ahaj awij\\
\oldstylenums{3}\textsc{s}-divide=\oldstylenums{3}\textsc{p}.{\IO} thing {\DEM} {\DAT} people work=\oldstylenums{3}\textsc{s}.{\POSS}=Pl said\\
\glt ‘[When he got home], he divided the things among his workmen, saying,’\\
\medskip
“Horom alay ayva!”\\
\gll hʷɔr-ɔm Ø =alaj ajva\\
carry[{\IMP}]-{\twoP} {} =away {inside house}\\
\glt ‘“Carry [all the things] into the house.”’
\z
Likewise, in the Cicada story, the direct object (the tree that the chief wanted by his door) is not grammatically indicated in the clause in S. 16 \REF{ex:7:44}. Although the referent is definite, there is no grammatical reference to it in the clause.
\ea\label{ex:7:44}\corpussource{Cicada, S. 16}\\
Taazala təta bay. \\
\gll tàà-z=ala Ø təta baj \\
\oldstylenums{3}\textsc{p}+{\HOR}-take=to {} ability {\NEG}\\
\glt ‘They were not able to bring [the tree].’
\z
Participants can be made prominent in a clause by doubling the reference to them. In \REF{ex:7:45} from S. 20 of the Cicada story, the tree that the chief desired is indicated twice in a clause, both by the presence of a noun phrase \textit{memele ga ndana} ‘that tree that you spoke of’ and also the \oldstylenums{3}\textsc{s} \DO pronominal (both are bolded in \ref{ex:7:45}). The effect is prominence.
\ea\label{ex:7:45}\corpussource{Cicada, S. 20}\\
Náamənjar \textbf{na} alay \textbf{memele ga ndana} əwɗe.\\
\gll náá-mənzar \textbf{na}=alaj \textbf{mɛmɛlɛ} \textbf{ga} \textbf{ndana} uwɗɛ\\
{\oneS}+{\POT}-see \oldstylenums{3}\textsc{s}.{\DO}=away tree {\ADJ} {\DEM} first\\
\glt ‘“First I want to see the tree that you spoke of.”’
\z
\section{Aspect and mood}\label{sec:7.4}
\hypertarget{RefHeading1212201525720847}{}
\citet{FriesenMamalis2008} showed that Moloko does not mark verb stems for tense, but uses an aspectual system, looking at realis events as complete (Perfective, see \sectref{sec:7.4.1}) or incomplete (Imperfective, see \sectref{sec:7.4.2}). The vowel in the prefix expresses realis or irrealis mood (see \sectref{sec:7.4.3}). The tonal melody on the subject prefix expresses realis events as Perfective or Imperfective aspect, and expresses the various kinds of irrealis events. Reduplication of a consonant in the verb stem indicates habitual iterative aspect (see \sectref{sec:7.4.4}). Reduplication of the entire verb stem indicates the intermittent iterative aspect -- the intermittent repetition of the same action, possibly by the same actor, over a period of time (see \sectref{sec:7.4.5}).\footnote{Another repeated aspect is the pluractional. The pluractional extension in Moloko indicates an action is back and forth, for example \textit{s=əya} ‘sawing’ or \textit{h=aya} ‘grinding’ (\sectref{sec:7.5.2}).}
\subsection{Perfective}\label{sec:7.4.1}\is{Tense, mood, and aspect!Perfective aspect|(}
\hypertarget{RefHeading1212221525720847}{}
The Perfective (\textsc{{\PFV}}) aspect in Moloko is the aspect that presents a realis event as completed (\citealt{FriesenMamalis2008}).\footnote{Usually, the term ‘Perfective’ is used to refer to a situation as a whole, whether it is completed at the time of speaking or not. The situation is viewed in its entirety for Perfective, whereas in Imperfective aspect, the situation is viewed ‘from inside.’ as an ongoing process (\citealt[3--4]{Comrie1976}; \citealt[239]{Payne1997}). \citet{Dixon2012} refers to verbs expressing completed actions as ‘perfect’ and those expressing incomplete actions as ‘imperfect.’ We have used the term ‘Perfective’ for completed actions in Moloko because there is also a morpheme representing Perfect in Moloko (\sectref{sec:7.5.3}) which collocates with both of these other aspects.} The Perfective aspect\is{Tense, mood, and aspect!Perfective aspect} is indicated by a phonetic low or mid tone on the subject prefix. Verb stems with underlyingly low tone or toneless verb stems have a phonetic low tone if the verb stem begins with a depressor consonant (see \sectref{sec:6.7.1}), and phonetic mid tone otherwise. Verb stems with underlyingly high tone are unaffected by depressor consonants and so the phonetic tone of the subject prefix is mid. \tabref{tab:59} (from \citealt{FriesenMamalis2008}) shows an example from each tone class.
\begin{table}
\resizebox{\textwidth}{!}{\begin{tabular}{llll}
\lsptoprule
{Underlying verb} & {Underlying tone } & {Phonetic tone of } & {Gloss}\\
{stem} & {of verb stem} & {Perfective verb word} & \\\midrule
/nz a k -j/ & H & [n\=ə-nzák-áj] & ‘I found’\\
/a-p a s/ & L, no depressor consonants & [n\={a}-p\={a}s-áj] & ‘I spread (something) out’\\
/a-d-a r -j/ & L, with depressor consonants & [nà-dàr-\={a}j] & ‘I planted’\\
/ɮ w -j/ & Toneless & [n\`ə-ɮ\`əw-\={a}j] & ‘I feared’\\
\lspbottomrule
\end{tabular}}
\caption{\label{tab:59}Perfective tone}
\end{table}
The default verbal aspect for the main event line in a narrative is Perfective. Perfective verb forms are found in the main event line clauses expressing the events immediately following the setting sections of narratives. This is seen in the following examples drawn from three different narratives: \REF{ex:7:46} is from lines 4-6 of the Snake story, \REF{ex:7:47} is from a story not illustrated in this work, and \REF{ex:7:48} is from line 6 of the Cicada story. In the examples, Perfective verb forms are bolded. The low tone is marked on the subject pronominal prefix.
\ea\label{ex:7:46}\corpussource{Snake, S. 4-6}\\
Alala na, gogolvan na, \textbf{olo alay}.\\
\gll a-l=ala na gʷɔgʷɔlvaŋ na \textbf{\`{ɔ}-lɔ=alaj}\\
\oldstylenums{3}\textsc{s}-go=to {\PSP} snake {\PSP} {\oldstylenums{3}\textsc{s}+{\PFV}-go=away}\\
\glt ‘Some time later, the snake went.’\\
\medskip
\textbf{Acar} a hay kəre ava fo fo fo.\\
\gll \textbf{à-tsar} a haj kɪrɛ ava {fɔ fɔ fɔ}\\
\oldstylenums{3}\textsc{s}+{\PFV}-climb at house beams in {\textsc{id}:sound of snake}\\
\glt ‘It climbed into the roof of the house \textit{fo fo fo}.’\\
\medskip
Sen ala na, okfom \textbf{adəɗala} ɓav. \\
\gll ʃɛŋ =ala na ɔkʷfɔm \textbf{à-dəɗ}\textbf{=ala} ɓav\\
\textsc{id}:go =to {\PSP} mouse \oldstylenums{3}\textsc{s}+{\PFV}-fall=to {\textsc{id}:sound of falling} \\
\glt ‘And walking, a mouse fell \textit{ɓav}!’
\z
\ea\label{ex:7:47}
Kəlen na, zar ahan na, \textbf{enjé} ele ahan ametele.\\
\gll kɪlɛŋ na zar=ahaŋ na \textbf{\`ɛ{}-nʒ-ɛ} ɛlɛ=ahaŋ amɛ-tɛl-ɛ\\
next {\PSP} man=\oldstylenums{3}\textsc{s}.{\POSS} {\PSP} \oldstylenums{3}\textsc{s}+{\PFV}-leave-{\CL} thing=\oldstylenums{3}\textsc{s}.{\POSS} {\DEP}-walk-{\CL} \\
\glt ‘Then, her husband went away to walk;’\\
\medskip
\clearpage
\textbf{Enjé} kə delmete aka a slam enen.\\
\gll \textbf{\`ɛ-nʒ-ɛ} kə dɛlmɛtɛ aka a ɬam ɛnɛŋ\\
\oldstylenums{3}\textsc{s}+{\PFV}-leave-{\CL} on place on at place another\\
\glt ‘he left for some place.’
\z
\ea\label{ex:7:48}\corpussource{Cicada, S. 6}\\
Albaya ahay ndana kəlen \textbf{təngala} ala ma ana bahay.\\
\gll albaja=ahaj ndana kɪlɛŋ \textbf{t\`ə-ŋgala}=ala ma ana bahaj\\
{young man}=Pl {\DEM} then \oldstylenums{3}\textsc{p}+{\PFV}-return=to word {\DAT} chief\\
\glt ‘The above-mentioned young men then took the word (response) to the chief.’
\z
\is{Tense, mood, and aspect!Perfective aspect|)}
\subsection{Imperfective}\label{sec:7.4.2}\is{Tense, mood, and aspect!Imperfective aspect|(}
\hypertarget{RefHeading1212241525720847}{}
In contrast with the Perfective, the Imperfective aspect (\textsc{{\IFV}}) can refer to a realis event that is incomplete and in the process of happening or to an event that is just about to begin.\footnote{‘Imperfective aspect’ %%\is{Tense, mood, and aspect!Imperfective aspect}
usually refers to a situation ‘from the inside’ and is concerned with the internal structure of the situation \citep[4]{Comrie1976}. Perhaps ‘incomplete’ would be a better name for this aspect in Moloko; however it does not correspond with imperfect as described by \citet{Dixon2012} in that the action need not begin before the present and be continuing, as \citet[31]{Dixon2012} notes.} The subject prefix for the Imperfective form is always high tone and the tone over the verb stem varies according to the underlying tone of the verb stem. \citet{Bow1997c} noted that the high tone on the prefix spreads to the first syllable of an underlyingly low tone verb. In the examples, the high tone of the Imperfective and low tone of Perfective are marked on the subject pronominal prefix. Examples (\ref{ex:7:49}--\ref{ex:7:56}) are in pairs to show contrast between the tone of the Imperfective (the first of each pair) and the Perfective (the second of each pair). Compare \REF{ex:7:49} (Imperfective) and \REF{ex:7:50} (Perfective). Example \REF{ex:7:49} refers to an event in process of happening (going to the market; already en route).\footnote{There is also a progressive aspect expressed by a complex verb construction (see \sectref{sec:8.2.1}), but the Imperfective verb form alone can give the idea of an action in progress.}
\ea\label{ex:7:49}
K\textbf{ó}lo amtamay?\\
\gll k\textbf{\'ɔ}{}-lɔ amtamaj\\
{\twoS}+{\IFV}-go where\\
\glt ‘Where are you going?’
\z
\clearpage
\ea\label{ex:7:50}
K\textbf{o}lo amtamay?\\
\gll k\textbf{\`ɔ}{}-l\=ɔ amtamaj\\
{\twoS}+{\PFV}-go where\\
\glt ‘Where were you?’
\z
\REF{ex:7:51} and \REF{ex:7:52} illustrate another Imperfective/Perfective pair. The Imperfective in this case refers to an event in process.
\ea\label{ex:7:51}
N\textbf{á}kaɗ bərek cəcəngehe.\\
\gll n\textbf{á}-kàɗ bɪrɛk tʃɪtʃɪŋgɛhɛ\\
{\oneS}+{\IFV}-kill brick now\\
\glt ‘I am making bricks (now).’
\z
\ea\label{ex:7:52}
Nakaɗ bərek cəcəngehe.\\
\gll nà-kàɗ bɪrɛk tʃɪtʃɪŋgɛhɛ\\
{\oneS}+{\PFV}-kill brick now\\
\glt ‘I made bricks just now.’
\z
\REF{ex:7:53} is an Imperfective that marks an event about to begin (compare with the Perfective in \ref{ex:7:54}).
\ea\label{ex:7:53}
N\textbf{á}pasay agaban. \\
\gll n\textbf{á-}pàs-\={a}j agabaŋ \\
{{\oneS}+{\IFV}-take away-{\CL}} sesame\\
\glt ‘I’m about to take away the sesame seeds.’
\z
\ea\label{ex:7:54}
N\textbf{a}pasay agaban. \\
\gll n\textbf{à-}pàs-\={a}j agabaŋ \\
{{\oneS}+{\PFV}-take away-{\CL}} sesame\\
\glt ‘I took away the sesame seeds.’
\z
Likewise, the Imperfective in \REF{ex:7:55} illustrates an event about to begin (compared with the Perfective in \ref{ex:7:56}).
\ea\label{ex:7:55}
Cəcəngehe ne awəy, “N\textbf{é}ge hay əwla ete.”\\
\gll tʃɪtʃɪŋgɛhɛ nɛ awij n\textbf{\'ɛ}{}-g-\'ɛ haj=uwla ɛtɛ\\
now {\oneS} said {\oneS}+{\IFV}-do-{\CL} house={\oneS}.{\POSS} also\\
\glt ‘Now I said, “I want to/am going to make a house for myself too.”’
\z
\ea\label{ex:7:56}
Cəcəngehe ne awəy, “N\textbf{e}ge hay əwla ete.”\\
\gll tʃɪtʃɪŋgɛhɛ nɛ awij n\textbf{\`ɛ}{}-g-\=ɛ haj=uwla ɛtɛ\\
now {\oneS} said {\oneS}+{\PFV}-do-{\CL} house={\oneS}.{\POSS} also\\
\glt ‘Now I said, “I made a house for myself too.”’
\z
\tabref{tab:60} (from \citealt{FriesenMamalis2008}) shows the Imperfective tonal pattern on the same four verb stems as were illustrated in \tabref{tab:59} for the Perfective.
\begin{table}
\resizebox{\textwidth}{!}{\begin{tabular}{llll}
\lsptoprule
Underlying & Underlying tone of & Phonetic tone & {Gloss}\\
verb stem & verb stem & of verb word & \\\midrule
/nz a k-aj/ & H & [n\'ə-nzák-áj] & ‘I’m finding’\\
/a-p a s/ & L, no depressor consonants & [ná-p\={a}s-áj] & ‘I’m spreading (something) out’\\
/a-d-a r-aj/ & L, with depressor consonants & [ná-dàr-\={a}j] & ‘I’m planting’\\
/ɮ w-aj/ & Toneless & [n\'ə-ɮáw-áj] & ‘I’m fearing’\\
\lspbottomrule
\end{tabular}}
\caption{Imperfective tone\label{tab:60}}
\end{table}
\tabref{tab:61} (from \citealt{FriesenMamalis2008}) summarises the tone patterns for Perfective and Imperfective tone on stems of different structures though the syllable pattern of the stem does not influence the tone pattern for the different aspects.
\begin{table}
\begin{tabular}{llll}
\lsptoprule
{Underlying } & {Structure of } & {Perfective } & {Imperfective }\\
{tone of verb}& {verb stem} & {(lower tone on} & {(higher tone on }\\
{stem} & & {subject prefix)} & {subject prefix)}\\\midrule
{H} & /CaC-j/ & [n\=ə-nzák-áj] & [n\'ə-nzák-áj]\\
& & ‘I found’ & ‘I am finding’\\
& /CC/ & [n\={a}-mbár] & [ná-mbár]\\
& & ‘I healed’ & ‘I am healing’\\
& & [n\={a}-ɗák] & [ná-ɗák]\\
& & ‘I blocked up’ & ‘I am blocking up’\\\midrule
{L } & /a-CaC-j/ & [n\={a}-p\={a}s-áj] & [ná-p\={a}s-áj]\\
{no depressor } & & ‘I took away’ & ‘I am taking away’\\
{consonants} & /CaC-j/ & [n\=ə-t\={a}ts-áj] & [n\'ə-t\={a}ts-áj]\\
& & ‘I close’ & ‘I am closing’\\
& /CC/ & [n\={a}-f\={a}ɗ] & [ná-f\={a}ɗ]\\
& & ‘I put’ & ‘I am putting’\\\midrule
{L } & /a-CaC-j/ & [n\`{ə}-dàr-\={a}j] & [n\'{ə}-dàr-\={a}j]\\
{depressor } & & ‘I recoil’ & ‘I am recoiling’\\
{consonants in } & /CCaC-j/ & [n\`{ə}-v\`{ə}nàh-\={a}j] & [n\'{ə}-v\'{ə}nàh-\={a}j]\\
{verb stem} & & ‘I vomited’ & ‘I am vomiting’\\\midrule
{Toneless} & /CaC-j/ & [n\`ə-ɮàw-\={a}j] & [n\'ə-ɮáw-\={a}j]\\
& & ‘I feared’ & ‘I am fearing’\\
& /CC/ & [nà-ndàz] & [ná-ndáz]\\
& & ‘I pierced’ & ‘I am piercing’\\
& & [nà-dàɗ] & [ná-dáɗ]\\
& & ‘I fell’ & ‘I am falling’\\
\lspbottomrule
\end{tabular}
\caption{Summary of tone patterns in selected verb forms\label{tab:61}}
\end{table}
In texts, the Imperfective is used whenever the (ongoing) normal state of affairs is being expressed, i.e., the way the world is. All the main verbs are Imperfective in (\ref{ex:7:57}--\ref{ex:7:60}). They are general statements and not speaking of a particular situation.
\ea\label{ex:7:57}
Sləreɛle \textbf{áyəɗay} məze.\\
\gll ɬɪrɛlɛ \textbf{á-jəɗ{}-aj} mɪʒɛ\\
work \oldstylenums{3}\textsc{s}+{\IFV}-tire{}-{\CL} person\\
\glt ‘Work tires people out.’\\
\z
\ea\label{ex:7:58}
Fat \textbf{ánah} háy.\\
\gll fat \textbf{á-nax} haj\\
sun \oldstylenums{3}\textsc{s}+{\IFV}-ripen millet\\
\glt ‘The sun ripens the millet.’
\z
\ea\label{ex:7:59}
\textbf{Káslay} awak nə məsləye.\\
\gll \textbf{ká-ɬ{}-aj} awak nə mɪ-ɬ{}-ijɛ\\
{\twoS}+{\IFV}-slay{}-{\CL} goat with {\NOM}{}-slay-{\CL}\\
\glt ‘You slaughter goats by cutting their throat, and not by any other way.’ (lit. you slay a goat with slaying)\\
\z
\ea\label{ex:7:60}
\textbf{Kákaɗ} okfom nə məkəɗe. \textbf{Káslay} bay.\\
\gll \textbf{ká-kaɗ} ɔkʷfɔm nə mɪ-kɪɗ-ɛ \textbf{ká-ɬ{}-aj} baj\\
{\twoS}+{\IFV}-kill(club) mouse with {\NOM}{}-kill(club)-{\CL} {\twoS}+{\IFV}-slay-{\CL} {\NEG}\\
\glt ‘You kill mice by smashing their head; you don’t cut their throats.’ (lit. you kill a mouse with killing; you don’t slay it)\\
\z
The Imperfective can refer to events that take place at any time, including in the past. In a story set in the past, the idea of an ongoing event that was the context for another event is encoded using the Imperfective verb form combined with the progressive aspect construction (see \sectref{sec:8.2.1}). The Imperfective verb stems are bolded in \REF{ex:7:61} (a sentence from the introduction of a narrative not illustrated in this work).
\ea\label{ex:7:61}
Asa təmənjar zar Məloko andalay \textbf{ásəya} ele \\
\gll asa tə-mənzar zar Mʊlɔkʷɔ a-nd=alaj \textbf{á{}-s=ija} ɛlɛ \\
if \oldstylenums{3}\textsc{p}-see man Moloko \oldstylenums{3}\textsc{s}-{\PRG}=away \oldstylenums{3}\textsc{s}+{\IFV}-cut={\PLU} thing \\
\glt ‘If they found a Moloko cutting [his fields]’\\
\medskip
nə zlərgo coco fan na, \\
\gll nə ɮʊrgʷɔ tsɔtsɔ faŋ na \\
with axe \textsc{id}:cutting already {\PSP}\\
\glt ‘with his axe, \textit{tsotso}’\\
\medskip
təlala təta gam na, tarəbokoy na ala rəbok rəbok.\\
\gll tə-l=ala təta gam na ta-rʊbɔkʷ{}-ɔj na=ala {rʊbɔkʷ rʊbɔkʷ}\\
\oldstylenums{3}\textsc{s}-go=to \oldstylenums{3}\textsc{p} many {\PSP} \oldstylenums{3}\textsc{p}-hide-{\CL} \oldstylenums{3}\textsc{s}.{\DO}=to {\textsc{id}:hide}\\
\glt ‘many came stealthily upon him \textit{rəbok, rəbok}.’
\z
In narratives, the Imperfective is found in the introduction to stories to describe the way things were at the beginning of the story.\footnote{As well as Imperfective, verb forms in the progressive aspect \sectref{sec:8.2.1} and existentials (which do not inflect for aspect, \sectref{sec:3.4}) are found in the setting and conclusion sections of a narrative.} For example, in the Disobedient Girl story, the main verbs in the introduction (lines 1--8) are all Imperfective. The entire story is in \sectref{sec:1.5}; the literal English translation of the introduction is given here with Imperfectives bolded.
\begin{quote}{“A story under the silo, they say, the story of the disobedient girl: \\ Long ago, to the Moloko people, God \textbf{gives} his blessing. That is, even if they had only sowed a little [millet] like this, it \textbf{lasts} them enough for the whole year. While grinding on the grinding stone, they \textbf{take} one grain of millet. So, if they \textbf{are grinding} it, the flour \textbf{multiplies}. Just one grain of millet, it \textbf{suffices} for them, and there \textbf{are leftovers}. Because, during its grinding, it \textbf{multiplies} on the grinding stone.”}\end{quote}
Imperfectives are also found in the conclusion of the narrative to recount how things turned out at the end of the story. The main verbs in the conclusion of the Disobedient Girl are also Imperfective. The literal English translation of the conclusion (lines 32-38) is given here with Imperfectives bolded (the entire story is in \sectref{sec:1.5}).
\begin{quote}{“So, ever since that time, finished! The Molokos say that God \textbf{gets} angry because of that girl, the disobedient one. Because of all that, God \textbf{takes back} his blessing from them. And now, one grain of millet, it \textbf{doesn’t multiply} anymore. Putting one grain of millet on the grinding stone, it \textbf{doesn’t multiply} anymore. You must \textbf{put on} a lot. It is like this they say, The curse belongs to that young woman who brought this suffering onto the people.”}\end{quote}
When the Imperfective co-occurs with the Perfect, the verb describes the current state or result of an event (\ref{ex:7:62}, see \sectref{sec:7.5.3}).
\ea\label{ex:7:62}
Arahə\textbf{va}.\\
\gll à-rah\textbf{=va}\\
\oldstylenums{3}\textsc{s}+{\PFV}-fill={\PRF}\\
\glt ‘It is full.’ (it had filled)\\
\z
\is{Tense, mood, and aspect!Imperfective aspect|)}
\subsection{Irrealis mood}\label{sec:7.4.3}\is{Tense, mood, and aspect!Irrealis mood|(}
\hypertarget{RefHeading1212261525720847}{}
\citet{FriesenMamalis2008} showed how mood influences the vowel features of the subject pronominal prefix. Moloko has two moods: realis and irrealis. The main formal feature of the irrealis mood is that the vowel in the subject prefix is lengthened. There are three subtypes of irrealis mood, indicated by tone along with the lengthened subject prefix.\footnote{Only two moods were distinguished in previous documents (\citealt{FriesenMamalis2008}; \citealt{Boyd2003}).} Tone on the subject prefix has three patterns, and no longer correlates with Perfective or Imperfective aspect in the irrealis mood. Rather, it correlates with the speaker’s desire and will. These three types of mood are called Potential, Hortative, and Possible, respectively. Potential mood expresses an action desired by the speaker that is under his or her influence to perform. It carries a mild hortatory force for second person forms. Hortative mood expresses an action desired by the speaker to be performed by another who is somehow under his or her influence. Possible mood expresses that an action is desired by the speaker but dependent on the will of another.
The difference between the moods is illustrated in the following narrative situations. The first (\ref{ex:7:63} and \ref{ex:7:64}) illustrates a situation where someone says that he wants the chief to come to him, but he is not sure if the chief will actually come. The fact that the chief’s coming is desired by the speaker but dependent on the will of the chief is expressed by the Possible mood in \REF{ex:7:63}, with falling tone on the lengthened subject prefix (bolded). Compare with the response given in \REF{ex:7:64}, where the speaker is sure that the chief will come. The surety is expressed by the Potential mood, with high tone on the lengthened subject prefix (bolded).
\ea\label{ex:7:63}
Asaw bahay məlala azana \textbf{aá}lala ete ɗaw?\\
\gll a-s=aw bahaj mə-l=ala azana \textbf{áà-}l=ala ɛtɛ ɗaw\\
\oldstylenums{3}\textsc{s}-please={\oneS}.{\IO} chief \oldstylenums{3}\textsc{s}+{\HOR}-go=to maybe \oldstylenums{3}\textsc{s}+{\PBL}{}-go=to polite {\textsc{q}}\\
\glt ‘I would like the chief to come; maybe he will come (if he wants to).’\\
\z
\ea\label{ex:7:64}
\textbf{Áa}lala.\\
\gll \textbf{áá-}l=ala\\
\oldstylenums{3}\textsc{s}+{\POT}-go=to\\
\glt ‘He will come (I am sure).’\\
\z
Likewise, in \REF{ex:7:65}, the speaker is expressing his wish that a potential attacker will leave him and his family alone. The falling tone on the lengthened subject prefix (bolded) indicates that the speaker is not sure that the person will leave them alone, but it depends on the will of that person (Possible mood).
\ea\label{ex:7:65}
Adan bay \textbf{aá}makay loko émbəzen loko asabay.\\
\gll adaŋ baj \textbf{áà-}mak-aj lɔkʷɔ ɛ{}-mbɪʒɛŋ lɔkʷɔ asa-baj\\
perhaps {\NEG} {\twoS}+{\PBL}{}-leave{}-{\CL} \oldstylenums{1}\textsc{Pin} \oldstylenums{3}\textsc{s}+{\IFV}-ruin \oldstylenums{1}\textsc{Pin} again-{\NEG}\\
\glt ‘Perhaps he will leave us alone; he will not ruin us anymore.’\\
\z
High tone on the lengthened subject prefix indicates Potential mood (an action desired by the speaker that is under his or her influence to perform, \ref{ex:7:66} and \ref{ex:7:68}). In the examples, the subject prefix is bolded.
\noindent\parbox{\textwidth}{\ea\label{ex:7:66}
Hajan \textbf{nóo}lo a kosoko ava.\\
\gll hadzaŋ \textbf{n\'ɔ\'ɔ}{}-l\'ɔ a kɔsɔkʷɔ ava\\
tomorrow {\oneS}+{\POT}-go at market in\\
\glt ‘Tomorrow I will go to the market.’\\
\z}
\ea\label{ex:7:67}
\textbf{Ó}lo.\\
\gll \textbf{áá}-l\'ɔ\\
\oldstylenums{3}\textsc{s}+{\POT}-go\\
\glt ‘He/she will hopefully go.’ (if I have a say in it)\\
\z
\ea\label{ex:7:68}
\textbf{Káa}zala təta bay.\\
\gll \textbf{káá}{}-z=ala təta baj\\
{\twoS}+{\POT}-take=to ability {\NEG}\\
\glt ‘You cannot bring it.’\\
\z
Low tone on the lengthened subject prefix indicates Hortative mood (an action desired by the speaker to be performed by another who is somehow under his or her influence, \ref{ex:7:69}--\ref{ex:7:70}).
\ea\label{ex:7:69}
\textbf{Moo}lo a kosoko ava.\\
\gll \textbf{m\`ɔ\`ɔ}{}-l\=ɔ a kɔsɔkʷɔ ava\\
\oldstylenums{3}\textsc{s}+{\HOR}-go at market in\\
\glt ‘He/she should go to the market.’\\
\z
\ea\label{ex:7:70}
\textbf{Koo}zəmom enen bay.\\
\gll \textbf{k\`ɔ\`ɔ}{}-z\=ʊm-ɔm ɛnɛŋ baj\\
{\twoP}+{\HOR}-eat-{\twoP} another {\NEG}\\
\glt ‘You (plural) should not eat anything.’\\
\z
High tone followed by low tone on the lengthened subject prefix indicates Possible mood (an action is desired by the speaker but dependent on the will of another, \ref{ex:7:71}--\ref{ex:7:74}).
\ea\label{ex:7:71}
Epeley epeley ɗəw \textbf{noó}lo bay ɗaw?\\
\gll ɛpɛlɛj ɛpɛlɛj ɗuw \textbf{n\'ɔ\`ɔ}-l\=ɔ baj ɗaw\\
whenever whenever also {\oneS}+{\PBL}-go {\NEG} {\textsc{q}}\\
\glt ‘Far in the future also, might I not go perhaps?’\\
\z
\ea\label{ex:7:72}
\textbf{Aá}lo.\\
\gll \textbf{áà}-l\=ɔ\\
\oldstylenums{3}\textsc{s}+{\PBL}-go\\
\glt ‘He/she might go.’ (it is up to him whether he goes, and I don’t know what he is thinking)\\
\z
\ea\label{ex:7:73}
Adan bay ɓərav ahan \textbf{aá}ndeslen \textbf{aá}makay məɗəgele ahan.\\
\gll {adaŋ baj} ɓərav=ahaŋ \textbf{áà}-ndɛɬɛŋ \textbf{áà}-m\={a}k-aj mɪ-ɗɪgɛl-ɛ\\
perhaps heart=\oldstylenums{3}\textsc{s}.{\POSS} \oldstylenums{3}\textsc{s}+{\PBL}{}-cool \oldstylenums{3}\textsc{s}+{\PBL}{}-leave{}-{\CL} {\NOM}{}-think-{\CL}\\
\medskip
\gll =ahaŋ\\
=\oldstylenums{3}\textsc{s}.{\POSS}\\
\glt ‘Perhaps his heart will cool, and he might leave behind his anger (lit. his thinking).’\\
\z
\ea\label{ex:7:74}
\textbf{Maá}həzlok asabay bay way.\\
\gll \textbf{máà}-hʷʊɮ{}-ɔk asa-baj baj waj\\
\oldstylenums{1}\textsc{Pin}+{\PBL}{}-destroy-\oldstylenums{1}\textsc{Pin} again-{\NEG} {\NEG} who\\
\glt ‘Maybe we won’t be destroyed after all.’\footnote{Note that this ‘passive’ idea (to be destroyed) is accomplished through the flexible transitivity system in Moloko\is{Transitivity!Clauses with zero transitivity}. The verb means ‘destroy’ but with the Theme as subject of the verb, the whole clause here expresses a passive idea (\chapref{chap:9}). }\\
\z
The three irrealis moods are illustrated in \tabref{tab:62} for the high tone verb /l\textsuperscript{o}/ ‘go.’
\begin{table}
\caption{\label{tab:62} Mood for the verb /l\textsuperscript{o}/ ‘go’}
\resizebox{\textwidth}{!}{\begin{tabular}{ll}
\lsptoprule
{\twoS} form & \oldstylenums{3}\textsc{s} form\\\midrule
\multicolumn{2}{c}{Potential mood}\\\midrule\relax
[káá-l=àlà] & [áá-l=àlà] \\
{\twoS}+{\POT}-go=to & \oldstylenums{3}\textsc{s}+{\POT}-go=to \\
‘You will come.’ (I am sure you will come) & ‘He/she will come.’ (I am sure he will come)\\\midrule
\multicolumn{2}{c}{Hortative mood}\\\midrule\relax
[kàà-l=àlá] & [m\`ə-l= àlá]\\
{\twoS}+{\HOR}-go=to & \oldstylenums{3}\textsc{s}+{\HOR}-go=to \\
‘You come now!’ (I want you to come) & ‘He/she should come.’ (I want him to come)\\\midrule
\multicolumn{2}{c}{Possible mood}\\\midrule\relax
[káà-l=àlà] & [áà-l=àlà] \\
{\twoS}+{\PBL}{}-go=to & \oldstylenums{3}\textsc{s}+{\PBL}{}-go=to \\
‘I want you to come (but I am not sure if you will).’ & ‘I want him to come (but am not sure if he will).’\\
\lspbottomrule
\end{tabular}}
\end{table}
\tabref{tab:63} illustrates the low tone verb /tats/ ‘close’ in all of the realis and irrealis forms.
\begin{table}
\resizebox{\textwidth}{!}{%
\begin{tabular}{lll}
\lsptoprule
& {{\twoS} form} & {Gloss}\\\midrule
{Perfective} & [\textbf{k\`ə}-t\={a}ts-\={a}j mahaj] & ‘You closed the door.’\\
& {\twoS}+{\PFV}-close-{\CL} door \\\midrule
{Imperfective} & [\textbf{k\'ə}\textit{{}}-t\={a}ts-\={a}j mahaj] & ‘You are closing the door.’\slash \\
& {\twoS}+{\IFV}-close-{\CL} door & ‘You are about to close the door.’\\\midrule
{Potential} & [\textbf{káá}\textit{{}}-t\={a}ts-\={a}j mahaj] & ‘I would like you to close the door.’\slash\\
& {\twoS}+{\POT}-close-{\CL} door & ‘You should close the door.’ / \\
& & ‘You will close the door.’\\\midrule
{Hortative} & [\textbf{kàà}{}-t\={a}ts-\={a}j mahaj] & ‘I strongly suggest you close the door.’ / \\
& {\twoS}+{\HOR}-close-{\CL} door & ‘You should have already closed the door.’ \\\midrule
{Possible} & [\textbf{káà}{}-t\={a}ts-\={a}j mahaj] & ‘You might close the door.’ /\\
& {\twoS}+{\POT}-close-{\CL} door & ‘I want you to close the door but \\
& & I don’t know if you will.’\\
\lspbottomrule
\end{tabular}}
\caption{\label{tab:63} Realis and irrealis forms of /tats/ ‘close’}
\end{table}
In first or third person, the Potential mood indicates some measure of confidence on the part of the speaker that the action will be performed, or the state achieved. First note the Imperfective in \REF{ex:7:75} (with high tone and short vowel on subject prefix) expressing an incomplete action. The Potential mood in \REF{ex:7:76} (with high tone and long vowel on subject prefix) carries the idea of surety (as does \ref{ex:7:77}).
\ea\label{ex:7:75}
\textbf{Ná}l\textbf{o} a kosoko ava.\\
\gll \textbf{ná}{}-l\textbf{\'ɔ} a kɔsɔkʷɔ ava\\
{\oneS}+{\IFV}-go at market in\\
\glt ‘I am going to the market.’
\z
\ea\label{ex:7:76}
\textbf{Náa}l\textbf{o} a kosoko ava.\\
\gll \textbf{náá}{}-l\textbf{\'ɔ} a kɔsɔkʷɔ ava\\
{\oneS}+{\POT}-go at market in\\
\glt ‘I will go to the market.’\\
\z
\ea\label{ex:7:77}
Asa hay ango andava na mɛ, \textbf{áa}rəɓay.\\
\gll asa haj=aŋgʷɔ a-ndava na mɛ \textbf{áá}{}-rəɓ-aj\\
if house={\twoS}.{\POSS} \oldstylenums{3}\textsc{s}-finish {\PSP} opinion \oldstylenums{3}\textsc{s}+{\POT}-{be beautiful}-{\CL}\\
\glt ‘When your house is finished, it will be beautiful.’\\
\z
\tabref{tab:64} shows a conjugation of the low tone verb /fat-j/ ‘descend’ in the Potential form.
\begin{table}[h]
\begin{tabular}{lll}
\lsptoprule
{Person} & {Singular} & {Plural}\\\midrule
{1} & [\textbf{náá}{}-f\={a}t-aj] & [\textbf{má}{}-f\={ɔ}t-ɔkʷ] \\
& {\oneS}+{\POT}-descend{}-{\CL} & \oldstylenums{1}\textsc{Pin}+{\POT}-descend-\oldstylenums{1}\textsc{Pin}\\
& ‘I will go down.’ & ‘We will go down.’\\
& & [\textbf{ná}{}-f\={ɔ}t-ɔm]\\
& & \oldstylenums{1}\textsc{Pin}+{\POT}-descend-\oldstylenums{1}\textsc{Pin}\\
& & ‘We (exclusive) will go down.’\\\midrule
{2} & [\textbf{káá}{}-f\={a}t-aj] & [\textbf{ká}{}-f\={ɔ}t-ɔm]\\
& {\twoS}+{\POT}-descend{}-{\CL} & {\twoP}+{\POT}-descend-{\twoP}\\
& ‘I would like you to go down & ‘You will all go down.’\\
& (you should go down).’ & \\\midrule
{3} & [\textbf{áá}{}-f\={a}t-aj] & [\textbf{táá}{}-f\={a}t-aj]\\
& \oldstylenums{3}\textsc{s}+{\POT}-descend{}-{\CL} & \oldstylenums{3}\textsc{p}+{\POT}-descend{}-{\CL}\\
& ‘He/she will go down.’ & ‘They will go down.’\\
\lspbottomrule
\end{tabular}
\caption{Potential form conjugation of /fat -j / ‘descend’ \label{tab:64}}
\end{table}
\tabref{tab:65} shows a conjugation of the low tone verb /fat-j/ ‘descend’ in the Hortative form. In the Hortative form, the \oldstylenums{3}\textsc{s} subject prefix is [m\`{a}\`{a}-]. Compared with the Potential form, the Hortative form is a little stronger in terms of its hortatory force (see \sectref{sec:10.4}).
\begin{table}[h]
\fittable{
\begin{tabular}{lll}
\lsptoprule
{Person} & {Singular} & {Plural}\\\midrule
{1} & [\textbf{nàà}{}-fàt-aj] & [\textbf{mà}{}-f\`{ɔ}t-ɔkʷ]\\
& {\oneS}+{\HOR}-descend{}-{\CL} & \oldstylenums{1}\textsc{Pin}+{\HOR}-descend-\oldstylenums{1}\textsc{Pin}\\
& ‘I should go down.’ & ‘I would like us (inclusive) to go down\\
& & (we should go down).’\\
& & \textbf{nà}{}-f\`{ɔ}t-ɔm]\\
& & \oldstylenums{1}\textsc{Pin}+{\HOR}-descend-\oldstylenums{1}\textsc{Pin}\\
& & ‘I would like us (exclusive) to go down \\
& & (we should go down).’\\\midrule
{2} & \textbf{kàà}{}-fàt-aj] & [\textbf{kàà}{}-f\`{ɔ}t-ɔm]\\
& {\twoS}+{\HOR}-descend{}-{\CL} & {\twoP}+{\HOR}-descend-{\twoP}\\
& ‘I would like you to go down & ‘I would like you all to go down\\
& (you should go down).’ & (you should go down).’ \\\midrule
{3} & [\textbf{màà}{}-fàt-aj] & [\textbf{tàà}{}-fàt-aj]\\
& \oldstylenums{3}\textsc{s}+{\HOR}-descend & \oldstylenums{3}\textsc{p}+{\HOR}-descend{}-{\CL} \\
& ‘I would like him to go down & ‘I would like them to go down\\
& (he should go down).’ & (they should go down).’ \\
\lspbottomrule
\end{tabular}
}
\caption {Hortative form conjugation of /fat -j / ‘descend’ \label{tab:65}}
\end{table}
\tabref{tab:66} shows the Possible form of the low tone verb /fat-j/ ‘descend.’
\begin{table}[h]
\begin{tabular}{lll}
\lsptoprule
{{Person}} & {{Singular}} & {Plural}\\\midrule
{1} & [\textbf{náà}{}-fàt-aj] & [\textbf{máà}{}-f\`{ɔ}t-ɔkʷ]\\
& {\oneS}+{\PBL}{}-descend{}-{\CL} & \oldstylenums{1}\textsc{Pin}+{\PBL}{}-descend-\oldstylenums{1}\textsc{Pin}\\
& ‘I might go down.’ & ‘We will go down.’\\
& & [\textbf{náà}{}-f\`{ɔ}t-ɔm]\\
& & \oldstylenums{1}\textsc{Pin}+{\PBL}{}-descend-\oldstylenums{1}\textsc{Pin}\\
& & ‘We (exclusive) might go down.’\\\midrule
{2} & [\textbf{káà}{}-fàt-aj] & [\textbf{káà}{}-f\`{ɔ}t-ɔm]\\
& {\twoS}+{\PBL}{}-descend{}-{\CL} & {\twoP}+{\PBL}{}-descend-{\twoP}\\
& ‘You might go down.’ & ‘You might all go down.’\\\midrule
{3} & [\textbf{áà}{}-fàt-aj] & [\textbf{táà}{}-fàt-aj]\\
& \oldstylenums{3}\textsc{s}+{\PBL}{}-descend{}-{\CL} & \oldstylenums{3}\textsc{p}+{\PBL}{}-descend{}-{\CL}\\
& ‘He/she might go down.’ & ‘They might go down.’\\
\lspbottomrule
\end{tabular}
\caption{Possible form conjugation of /fat -j / ‘descend’ \label{tab:66}}
\end{table}
Compare the realis imperfective \REF{ex:7:78}, potential (\ref{ex:7:79}), and hortatory (\ref{ex:7:80}) forms of the high tone verb /\textit{z m}/ ‘eat.’ The subject prefixes are bolded.
\ea\label{ex:7:78}
\textbf{M\'ə}zəmok ɗaf.\\
\gll \textbf{m\'{ʊ}-}zʊm-ɔkʷ ɗaf\\
\oldstylenums{1}\textsc{Pin}+{\IFV}-eat-{\oneP} {millet loaf} \\
\glt ‘We are eating millet loaves.’
\z
\ea\label{ex:7:79}
Lomala \textbf{máa}z\textbf{ə}mok ɗaf. \\
\gll l-ɔm =ala \textbf{máá-}zʊm-ɔkʷ ɗaf \\
go[{\IMP}]-{\twoP} =to \oldstylenums{1}\textsc{Pin}+{\POT}-eat-\oldstylenums{1}\textsc{Pin} {millet loaf}\\
\glt ‘Come; I want us to eat food.’ (lit. millet loaf)
\z
\ea\label{ex:7:80}
Lomala \textbf{ma}d\textbf{ə}rok meher.\\
\gll l-ɔm =ala \textbf{mà-}d\=ʊr-\'ɔkʷ mɛhɛr\\
go[{\IMP}]-{\twoP} =to \oldstylenums{1}\textsc{Pin}+{\HOR}-pray-\oldstylenums{1}\textsc{Pin} forehead\\
\glt ‘Come; I want us to pray together.’
\z
\newpage
\tabref{tab:67} (from \citealt{FriesenMamalis2008}) shows the second and third person forms of a verb from each of the tone classes (H, L, toneless) in irrealis and realis moods.
\begin{sidewaystable}
\resizebox{\textwidth}{!}{\begin{tabular}{lllllll}
\lsptoprule
\multicolumn{2}{l}{{Underlying tone }} & \multicolumn{2}{c}{{Realis }} & \multicolumn{3}{c}{{Irrealis}}\\\cmidrule(lr){3-4}\cmidrule(lr){5-7}
{of verb stem} & & {Imperfective tone} & {Perfective tone} & {Potential} & {Hortative} & {Possible}\\\midrule
{H} & {{\twoS} } & [k\'{ə}-nzák-\={a}j] & [k\`{ə}-nzák-\={a}j] & [káá-nzák-\={a}j] & [kàà-nzák-áj] & [káà-nzák-áj] \\
& {form} & ‘you find’ & ‘you found’ & ‘I would like you to find’ & ‘you should find’ & ‘you might find’\\
\hhline{~------}& {\oldstylenums{3}\textsc{s} } & [á-nzák-\={a}j] & [à{}-nzák-\={a}j] & [áá-nzák-\={a}j] & [m\`{ə}-nzák-áj] & [máà{}-nzák-áj]\\
& {form} & ‘he finds’ & ‘he found’ & ‘I would like him to find’ & ‘he should find’ & ‘he might find’\\\midrule
{L} & {{\twoS} } & [k\'{ə}-tàts-\={a}j] & [k\`{ə}-tàts-\={a}j] & [káá-tàts-\={a}j] & [kàà-tàts-\={a}j] & [káà-tàts-\={a}j]\\
& {form} & ‘you close’ & ‘you closed’ & ‘I would like you to close’ & ‘you should close’ & ‘you might close’\\
\hhline{~------}& {\oldstylenums{3}\textsc{s} } & [áá-tàts-\={a}j] & [à-tàts-\={a}j] & [á-tàts-\={a}j] & [m\`{ə}-tàts-\={a}j] & [máà-tàts-\={a}j]\\
& {form} & ‘he closes’ & ‘he closed’ & ‘I would like him to close’ & ‘he should close’ & ‘he might close’\\\midrule
{toneless} & {{\twoS} } & [k\'{ə}-ɮáw-\={a}j] & [k\`{ə}-ɮàw-\={a}j] & [káá-ɮáw-\={a}j] & [kàà-ɮàw-\={a}j] & [káà-ɮàw-\={a}j]\\
& {form} & ‘you fear’ & ‘you feared’ & ‘I would like you to fear’ & ‘you should fear’ & ‘you might fear’\\
\hhline{~------}& {\oldstylenums{3}\textsc{s} } & [á-ɮáw-\={a}j] & [à-ɮàw-\={a}j] & [áá-ɮáw-\={a}j] & [mà-ɮàw-\={a}j] & [máà-ɮàw-\={a}j] \\
& {form} & ‘he fears’ & ‘he feared’ & ‘I would like him to fear’ & ‘he should fear’ & ‘he might fear’\\
\lspbottomrule
\end{tabular}}
\caption{Tone of realis and irrealis verb forms\label{tab:67}}
\end{sidewaystable}
Verb forms in irrealis mood occur in Moloko discourse to express events that might occur. In the Cicada text, some young men go out to bring back a tree that was desired by their chief. The men try but can’t bring home the tree (which constitutes contrastive relief for the cicada’s success in the end). A negative modal statement relates the unsuccessful attempt by the young men (\ref{ex:7:81}, from S.14). The lengthened subject prefix characterising irrealis mood is bolded in \REF{ex:7:81}.
% % \clearpage
\ea\label{ex:7:81}\corpussource{Cicada, S. 16}\\
Albaya ahay tolo amazala agwazla na, \textbf{taa}zala təta bay.\\
\gll albaja=ahaj tɔ-lɔ ama-z=ala agʷaɮa na \textbf{tàà-}zaɗ=ala \\
{young man}=Pl \oldstylenums{3}\textsc{p}-go {\DEP}-take=to {spp. of tree} {\PSP} \oldstylenums{3}\textsc{p}+{\HOR}-take=to \\
\medskip
\gll təta baj\\
ability {\NEG}\\
\glt ‘The young men left to bring back the tree; [but] they were not able to bring [it].’\\
\z
\newpage
Also, dependent complement clauses represent things that were still future relative to the time of particular events on the event line (see \sectref{sec:7.7}). They encode desired results that might not necessarily happen as illustrated in the examples below.
\ea\label{ex:7:82}\corpussource{Disobedient Girl, S. 13}\\
Asa asok \textbf{aməhaya} na, kázaɗ war elé háy bəlen.\\
\gll asa à-s=ɔkʷ \textbf{amə-h}\textbf{=aja} na ká-zaɗ war ɛlɛ\\
if \oldstylenums{3}\textsc{s}+{\PFV}-please={\twoS}.{\IO} {\DEP}+{\PFV}-grind={\PLU} {\PSP} {\twoS}+{\IFV}-take child eye\\
\medskip
\gll haj bɪlɛŋ\\
millet one\\
\glt ‘If you want to grind, you take only one grain.’\\
\z
\ea\label{ex:7:83}\corpussource{Cicada, S. 7}\\
Agasaka na ka mahay ango aka \textbf{aməmbese.}\\
\gll a-gas=aka na ka mahaj=aŋgʷɔ aka \textbf{amɪ-mbɛʃ-ɛ}\\
\oldstylenums{3}\textsc{s}-catch=on {\PSP} on door={\twoS}.{\POSS} on {\DEP}-rest{}-{\CL}\\
\glt ‘It would please you to have the tree at your door, so that you could rest under it.’ \\
\z
\is{Tense, mood, and aspect!Irrealis mood|)}
\largerpage
\subsection{Habitual iterative}\label{sec:7.4.4}\is{Tense, mood, and aspect!Habitual iterative aspect|(}\is{Plurality!Verb plurals|(}
\hypertarget{RefHeading1212281525720847}{}
The habitual iterative aspect\footnote{\citet{FriesenMamalis2008} called this ‘repetitive aspect.’ Note that Moloko has two other forms that involve repetition of the same actions -- the intermittent iterative (marked by complete reduplication of the verb stem, see \sectref{sec:7.4.5}) and the pluractional (marked by a verbal extension\textit{ =aya }or =\textit{əya}, see \sectref{sec:7.5.2}).} presents the actor(s) performing an action repeatedly as their usual habit. This aspect is formed by the gemination of the onset of the final syllable of the verb word.\footnote{There are no examples in the corpus with verbal extensions.} In a one-consonant root, the root consonant is doubled \REF{ex:7:84}. The verb words showing this aspect are bolded in each of the examples and the reduplicated consonant is underlined.
\ea\label{ex:7:84}
Kafta kosoko zlaba na, Məloko anga enen ahay \textbf{tó\underline{ll}o} a ləhe.\\
\gll kafta kɔsɔkʷɔ ɮaba na Mʊlɔkʷɔ aŋga ɛnɛŋ=ahaj \textbf{t\'ɔ-\underline{ll}ɔ} a \\
day market Dogba {\PSP} Moloko {\POSS} another=Pl \oldstylenums{3}\textsc{p}+{\IFV}-go+{\ITR} at \\
\medskip
\gll lɪhɛ\\
bush\\
\glt ‘Each Sunday (the market of Dogba), some Molokos go to [work] their fields.’\\
\z
In a CC root with no suffix, the first C of the stem is doubled (\ref{ex:7:85}--\ref{ex:7:86}).
\ea\label{ex:7:85}
Tətərak ango nehe na, \textbf{ká\underline{ff}əɗ} ele ango a mogom waya azaɗ merkwe bay\\
\gll tətərak=aŋgʷɔ nɛhɛ na \textbf{ká-\underline{ff}əɗ} ɛlɛ=aŋgʷɔ a mɔgʷɔm \\
shoes={\twoS}.{\POSS} here {\PSP} {\twoS}+{\IFV}-put+{\ITR} thing={\twoS}.{\POSS} at home\\
\glt ‘Your shoes there, you should put them on (habitually, repeatedly, day after day) at home,’\\
\medskip
\gll waja à-zàɗ mɛrkʷɛ baj\\
because \oldstylenums{3}\textsc{s}+{\PFV}-take travel {\NEG}\\
\glt ‘because you can’t travel with them.’ (lit. it doesn’t take travel)\\
\z
The fact that the reduplicated consonant is on the onset of the final syllable of the verb word (and not a particular consonant in the verb root) is illustrated by \REF{ex:7:86} and \REF{ex:7:87}, which show the same verb \textit{/z m\textsuperscript{o}/} in the {\twoS} and {\twoP} forms. The {\twoP} form has an extra syllable in the verb word because of the {\twoP} subject pronominal suffix. In the {\twoS} form, the reduplicated consonant is \textit{z} -- the first consonant of the root. In the {\twoP} form, the reduplicated consonant is \textit{m} -- the second consonant of the root. However in both cases, the reduplicated consonant is the consonant at the onset of the final syllable in the verb word.
\ea\label{ex:7:86}
A məjəvoko ava na, \textbf{kó\underline{zz}om} ɗaf.\\
\gll a mʊdzʊvɔkʷɔ ava na \textbf{k\'ʊ-\underline{zz}ʊm} ɗaf\\
at feast in {\PSP} {\twoS}+{\IFV}-eat+{\ITR} {millet loaf}\\
\glt ‘During a feast, you eat repeatedly (many times at many people’s houses).’\\
\z
\ea\label{ex:7:87}
A məjəvoko ava na, \textbf{k\'əzə\underline{mm}om} ɗaf.\\
\gll a mʊdzʊvɔkʷɔ ava na \textbf{k\'ʊ-z\'ʊ\underline{mm}{}-ɔm} ɗaf\\
at feast in {\PSP} 2+{\IFV}{}-eat+{\ITR}{}-{\twoP} {millet loaf}\\
\glt ‘During a feast, you all eat (many times at many people’s houses).’\\
\z
\REF{ex:7:88} and \REF{ex:7:89} also show the reduplication of the onset of the final syllable of the verb word with a /-j / suffix.
\ea\label{ex:7:88}
Kosoko molom na, ndam pəra ahay \textbf{té\underline{ss}e} gəzom.\\
\gll kɔsɔkʷɔ mɔlɔm na ndam pəra=ahaj \textbf{t\'ɛ-\underline{ʃʃ}{}-\'ɛ } gʊzɔm\\
market home {\PSP} person idol=Pl \oldstylenums{3}\textsc{p}+{\IFV}-drink+{\ITR}-{\CL} beer\\
\glt ‘On market day, the traditionalists drink millet beer (many people, much beer).’\\
\z
\ea\label{ex:7:89}
\textbf{Ada\underline{rr}ay} eteme waya gəvah gam.\\
\gll \textbf{à-dà\underline{rr}{}-\={a}j} ɛtɛmɛ waja gəvax gam\\
\oldstylenums{3}\textsc{s}+{\PFV}-plant+{\ITR}-{\CL} onion because field lots\\
\glt ‘He/she planted many onions because his field was large.’\\
\z
\is{Tense, mood, and aspect!Habitual iterative aspect|)}
\subsection{Intermittent iterative}\label{sec:7.4.5}\is{Tense, mood, and aspect!Intermittent iterative|(}
\hypertarget{RefHeading1212301525720847}{}
The intermittent iterative\footnote{\citet{FriesenMamalis2008} called this aspect simply “iterative.”} expresses the idea of the intermittent repetition of the same action, possibly by the same actor, over a period of time.\footnote{Moloko has two other forms that involve repetition of the same actions -- the habitual iterative (marked by reduplication of one consonant in the stem, see \sectref{sec:7.4.4}) and the pluractional (marked by a verbal extension\textit{ =aya }or =\textit{əya}, see \sectref{sec:7.5.2}).} The intermittent iterative is formed by complete reduplication of the verb. Example \REF{ex:7:90} reflects a remark made by a friend concerning a situation where one duck died, then the owner bought another, and it died, and the situation was repeated four times. In the examples, the verb complex is delimited by square brackets.
\ea\label{ex:7:90}
Andəbaba ango amət amat.\\
\gll andəbaba=aŋgʷɔ [a-mət a-mat]\\
duck={\twoS}.{\POSS} \oldstylenums{3}\textsc{s}-die \oldstylenums{3}\textsc{s}-die \\
\glt ‘Your ducks keep dying.’ (lit. your duck, it dies it dies)
\z
In the elicited example below, the situation is that a group of people has gone to the market and has bought several items from several different vendors. Note that the directional extension \textit{ala} occurs only once, following the second verb.
\ea\label{ex:7:91}
A kosoko ava na, nəskwəmom nəskwəmom ala.\\
\gll a kɔsɔkʷɔ ava na [nʊ-sʊkʷʊm-ɔm nʊ-sʊkʷʊm-ɔm =ala ]\\
at market in {\PSP} {\oneS}-buy-\oldstylenums{1}\textsc{Pex} {\oneS}-buy-\oldstylenums{1}\textsc{Pex} =to\\
\glt ‘At the market, we buy and buy.’ (lit. at the market, we buy we buy)\\
\z
\is{Plurality!Verb plurals|)}\is{Tense, mood, and aspect!Intermittent iterative|)}
\section{Verbal extensions}\label{sec:7.5}
\hypertarget{RefHeading1212321525720847}{}
\citet{FriesenMamalis2008} found that the six verbal extensions in Moloko are a class of morphemes that modify the meaning of the verb. They are clitics which cliticise to the right edge of the verbal complex to form a phonological word. The verb stem and the extensions may be separated syntactically by the indirect object pronominal clitics and third person \DO pronominals (see Sections \ref{sec:7.3.1.1} and \ref{sec:7.3.2}, respectively). The extensions will trigger the loss of any prosody on the verb stem.
In Moloko there are three categories of verbal extensions. Adpositionals (=\textit{aka} ‘on’ and =\textit{ava} ‘in’)\footnote{These locational extensions are the same as the locational clitics on adpositional phrases; see \sectref{sec:5.6.2}.} modify the meaning of the verb with particular reference to the location\is{Deixis!Locational} of the action. Directionals (\textit{=ala} ‘toward,’ \textit{=ala } ‘away,’ and \textit{=aya } ‘back and forth’ or pluractional) add the idea of movement with respect to a particular point of reference. The third category is the Perfect =\textit{va.}
\subsection{Adpositionals}\label{sec:7.5.1}\is{Clitics!Adpositionals|(}\is{Adpositionals|(}
\hypertarget{RefHeading1212341525720847}{}
There are two adpositional enclitics:\footnote{\citet{FriesenMamalis2008} called these ``locationals.''} \textit{=aka} ‘on, on top of’ and \textit{=ava} ‘in.’ These extensions give the verb an added sense of the location of the action in the discourse. The extension \textit{=aka} ‘on, on top of’ \REF{ex:7:92} resembles the second element of the adposition \textit{kə…aka} ‘on.’ In like manner, \textit{=ava} ‘in’ \REF{ex:7:93} resembles the adposition \textit{a…ava} ‘in’ (see \sectref{sec:5.6.2}).\footnote{Even though the verb in this example has verbal extensions, it is not conjugated for subject since it is a climactic point in the story where nominalised forms are often found (\sectref{sec:7.6}).} The corresponding adpositional phrases often co-occur with the adpositionals. In the examples, the adpositions and adpositionals are bolded.
\ea\label{ex:7:92}
Afəɗ\textbf{aka} war elé háy na, \textbf{kə} ver \textbf{aka}.\\
\gll a-fəɗ=\textbf{aka} war ɛlɛ haj na \textbf{kə} vɛr \textbf{aka}\\
\oldstylenums{3}\textsc{s}-place=on child eye millet {\PSP} on stone on\\
\glt ‘She put the grain of millet on the grinding stone.’\\
\z
\ea\label{ex:7:93}
Məmət\textbf{ava} alay \textbf{a} ver \textbf{ava}.\\
\gll mə-mət=\textbf{ava}=alaj \textbf{a} vɛr \textbf{ava}\\
{\NOM}-die=in=away at room in\\
\glt ‘She died in the room.’\\
\z
Adpositional extensions are phonological enclitics at the right edge of the verb. \citet{FriesenMamalis2008} showed them to be phonologically bound to the verb stem because the /-j/ suffix drops off when the clitic attaches \REF{ex:7:95} (see also \sectref{sec:6.3}). Compare \REF{ex:7:94} and \REF{ex:7:95} which illustrate the verb /g -j \textsuperscript{e}/ ‘do.’ Note that the /-j/ suffix in the stem drops off when the extension \textit{=aka} is attached \REF{ex:7:95}. Another piece of evidence that the extension is phonologically bound to the verb stem is that the palatisation of the verb stem is neutralised by the extension. There is no adpositional extension and the verb word is palatalised in \REF{ex:7:94}, whereas in \REF{ex:7:95} the locational extension\textit{ =aka }has neutralised the prosody of the entire verb complex.
\ea\label{ex:7:94}
Tege cəɗoy.\\
\gll tɛ-g-ɛ tsʊɗɔj\\
\oldstylenums{3}\textsc{p}-do-{\CL} trick\\
\glt ‘They played a trick.' (lit. they did trick)\\
\z
\ea\label{ex:7:95}
Tag\textbf{aka} cəɗoy.\\
\gll ta-g=\textbf{aka} tsʊɗɔj\\
\oldstylenums{3}\textsc{p}-do=on trick\\
\glt ‘They played another trick.’ (lit. they did trick ‘on top’ [of when they did it before])\\
\z
Another piece of evidence that the extensions are phonologically attached to the verb stem is that the word-final allophones of /n/ and /h/, that is [ŋ] and [x], respectively, do not occur in the word-final position in the verb word when the locational is attached. When the extension \textit{=va} cliticises to the verb /r h/ ‘fill,’ word-final alterations of /h/ do not occur \REF{ex:7:96}. These allophones would be expected if the verb stem and Perfect extension were separate words.
\ea\label{ex:7:96}
Arah\textbf{va} peɗeɗe.\\
\gll à-rah\textbf{=va} pɛɗɛɗɛ\\
\oldstylenums{3}\textsc{s}+{\PFV}-fill={\PRF} \textsc{id}:full\\
\glt ‘It had filled right to the rim.’\\
\z
The adpositional does not cliticise to the verb in \REF{ex:7:97} and \REF{ex:7:98} since the indirect object pronominal enclitic and plural subject suffix both trigger a word-final boundary (see \sectref{sec:7.1}), rendering the adpositional in a separate phonological word. In the examples, the boundaries of the phonological words are indicated by square brackets.
\ea\label{ex:7:97}
Kanjaw \textbf{aka}.\\
\gll {}[ka-nz=aw] [=\textbf{aka}]\\
{\twoS}-sit={\oneS}.{\IO} \hspaceThis{[}=on\\
\glt ‘You are better than me.’ (lit. you sit on me)\\
\z
\ea\label{ex:7:98}
Nədozlom \textbf{ava} a cəveɗ ava nə məze.\\
\gll {}[n\`ə-dɔɮ-ɔm] [\textbf{=ava}] a tʃɪvɛɗ ava nə mɪʒɛ\\
{\oneS}+{\PFV}-intersect-\oldstylenums{1}\textsc{Pex} \hspaceThis{[}=in in road in with person\\
\glt ‘We met a person on the road.’\\
\z
The extension \textit{=aka} ‘on’ or ‘on top of’ also has the metaphorical meaning of ‘in addition to,’ ‘again,’ or ‘even still’ when the action of the verb occurs ‘on top of’ something that occurred previously; compare the following pair of examples, and note how the \textit{=aka} in \REF{ex:7:100} looks backward to another instance of the same action in \REF{ex:7:99}.
\ea\label{ex:7:99}
Dərala.\\
\gll dər=ala\\
move[{\twoS}.{\IMP}]=to\\
\glt ‘Come closer~(to me).’\\
\z
\ea\label{ex:7:100}
Dər\textbf{aka} ala.\\
\gll dər=\textbf{aka}=ala\\
move[{\twoS}.{\IMP}]=on=to\\
\glt ‘Come even still closer.’\\
\z
Using \textit{=aka} in a context where the addressee is eating renders the meaning ‘do you want any more ‘on top of’ what you have already eaten?’ \REF{ex:7:101}.
\ea\label{ex:7:101}
Asok \textbf{aka} ɗaw?~\\
\gll a-s=ɔkʷ =\textbf{aka} ɗaw\\
\oldstylenums{3}\textsc{s}-please={\twoS}.{\IO} =on {\textsc{q}}\\
\glt ‘Do you want any more?’ (lit. is it pleasing to you on?)\\
\z
With the verb \textit{mbaɗ} ‘change,’ \textit{=aka} gives an idiomatic meaning to mark a change of speaker; that is, he spoke ‘on top of’ what the other person had just said.
\ea\label{ex:7:102}
Ambaɗaŋ \textbf{aka.}\\
\gll a-mbaɗ=aŋ =\textbf{aka}\\
\oldstylenums{3}\textsc{s}-change=\oldstylenums{3}\textsc{s}.{\IO} =on\\
\glt ‘He/she replied.’ (lit. he changed to him on)\\
\z \is{Adpositionals|)}
\subsection{Directionals}\label{sec:7.5.2}\is{Deixis!Directionals|(}\is{Clitics!Directionals|(}\is{Directionals|(}
\hypertarget{RefHeading1212361525720847}{}
\citet{FriesenMamalis2008} found three directional extensions \textit{=ala} ‘towards’ (\ref{ex:7:103}, \ref{ex:7:104}), \textit{=alay} ‘away from’ \REF{ex:7:105}, and \textit{=aya/=əya} ‘back and forth repeated movement’ \REF{ex:7:106}. These directionals occur after the verb word and, if present, after the adpositional extensions as seen in \REF{ex:7:103} and \REF{ex:7:104}. The directionals precede the Perfect (see \sectref{sec:7.5.3}), as seen in \REF{ex:7:106}.
\ea\label{ex:7:103}
Kazaka \textbf{ala} hor ese.\\
\gll ka-zaɗ=aka=\textbf{ala} hʷɔr ɛʃɛ\\
{\twoS}-take=on=to woman again\\
\glt ‘You take another wife’ (on top of the one you already have).\footnote{The root-final \textit{ɗ} of the verb \textit{zaɗ} ‘take’ drops off when affixes and clitics are added (\sectref{sec:6.2}).} (lit. you take a wife on again)\\
\z
\ea\label{ex:7:104}
Təjapata aka \textbf{ala} ana Məloko enen ahay.\\
\gll tə-dzap=ata =aka=\textbf{ala} ana Mʊlɔkʷɔ ɛnɛŋ=ahaj\\
\oldstylenums{3}\textsc{p}-group=\oldstylenums{3}\textsc{s}.{\IO} =on=to {\DAT} Moloko another=Pl\\
\glt ‘They grouped together again against some of the Molokos.’ (point of reference is the Molokos)\\
\z
\ea\label{ex:7:105}
Dəraka \textbf{alay.}\\
\gll dər=aka=\textbf{alaj}\\
move[{\twoS}.{\IMP}]=on=away\\
\glt ‘Move further away~(from me).’\\
\z
\ea\label{ex:7:106}\corpussource{Race story}\footnote{\citealt{Friesen2003}.}\\
Moktonok na, abək ta \textbf{aya} va məlama ahan ahay jəyga.\\
\gll mɔkʷtɔnɔkʷ na a-bək ta=\textbf{aja}=va məlama=ahaŋ=ahaj dzijga\\
toad {\PSP} \oldstylenums{3}\textsc{s}-invite \oldstylenums{3}\textsc{p}={\PLU}={\PRF} brothers=\oldstylenums{3}\textsc{p}.{\POSS}=Pl all\\
\glt ‘The toad, he had already invited all of his brothers.’ (i.e., he went back and forth to all his brothers, inviting each)\\
\z
Like the adpositionals, the directionals are phonological clitics at the right edge of the verbal complex. The presence of the enclitics requires that the /-j/ suffix be dropped off (the verb stem in example \REF{ex:7:104} is /dzap -j/ ‘mix’). The neutral prosody of these extensions causes the palatalisation on the verb stem to neutralise. In \REF{ex:7:107} the verb stem is / nz -j \textsuperscript{e}/ ‘go’ with a \oldstylenums{3}\textsc{s} surface form of [ɛnʒɛ].
\ea\label{ex:7:107}
Anj\textbf{ala}.\\
\gll a-nz=\textbf{ala}\\
\oldstylenums{3}\textsc{s}-go=to\\
\glt ‘He/she is coming.’
\z
Directional extensions orient the event expressed by the verb relative to a centre of reference. In speech, that point of reference is usually the speaker, so actions are seen as going towards the speaker (=\textit{ala}), away from the speaker (=\textit{alay}), or back and forth repeatedly (=\textit{aya}). Compare the following examples of the verb /s kʷ m/ ‘buy/sell’ with a first person subject. When used with the directional \textit{=ala} ‘toward,’ the verb means ‘buy’ \REF{ex:7:108}. When it is used with the directional \textit{=alay} ‘away,’ it means ‘sell’ \REF{ex:7:109}.
\ea\label{ex:7:108}
Nəskom\textbf{ala} awak.\\
\gll n\`ə-sʊkʷɔm\textbf{=ala} awak\\
{\oneS}+{\PFV}-buy/sell=to goat\\
\glt ‘I bought a goat.’\\
\z
\ea\label{ex:7:109}
Nəskom\textbf{alay} awak.\\
\gll n\`ə-sʊkʷɔm\textbf{=alaj} awak\\
{\oneS}+{\PFV}-buy/sell=away goat\\
\glt ‘I sold my goat.’\\
\z
The directional \textit{=ala} ‘toward’ indicates an action that moves toward the centre of reference (see \ref{ex:7:110} and \ref{ex:7:112}). The directional \textit{=alay} ‘away’ indicates an action that moves away from that centre (see \ref{ex:7:111} and \ref{ex:7:113}). Compare the example pairs for /d r/ ‘move’ (\ref{ex:7:110} and \ref{ex:7:111}) and for \textit{/}z ɗ/ ‘take’ (\ref{ex:7:112} and \ref{ex:7:113}). In each example pair, the first shows an action towards the speaker and the second shows an action away from the speaker.
\ea\label{ex:7:110}
Dər\textbf{ala}. \\
\gll dər=\textbf{ala}\\
move[{\twoS}.{\IMP}]=to\\
\glt ‘Come closer (to me).’\\
\z
\ea\label{ex:7:111}
Dər\textbf{alay}.\\
\gll dər=\textbf{alaj}\\
move[{\twoS}.{\IMP}]=away\\
\glt ‘Move away (from me).’\\
\z
\ea\label{ex:7:112}
Z\textbf{ala} eteme.\\
\gll \ zaɗ=\textbf{ala} ɛtɛmɛ\\
take[{\twoS}.{\IMP}]=to onion\\
\glt ‘Bring the onion (to me).’\\
\z
\ea\label{ex:7:113}
Z\textbf{alay} eteme.\\
\gll \ zaɗ=\textbf{alaj} ɛtɛmɛ\\
take[{\twoS}.{\IMP}]=away onion\\
\glt ‘Take the onion away (from me).’\\
\z
The third directional \textit{=aya} or \textit{=}\textit{əya} gives the idea of repetitive movement back and forth. This repetitive back and forth movement is called pluractional\is{Tense, mood, and aspect!Pluractional}\is{Plurality!Verb plurals}.\footnote{A verbal extension or affix is one way of showing pluractional actions in other Chadic languages (\citealt{Newman1990}). The other is reduplication of the verb root. Such verb root reduplication is also seen in Moloko for habitual iterative aspect \sectref{sec:7.4.4} and intermittent iterative aspect \sectref{sec:7.4.5}.} A few verbs never occur without the pluractional and involve regular back and forth movements like sawing \REF{ex:7:114}, grinding \REF{ex:7:115}, or putting many (\textit{d=əya}). For other verbs, adding the directional adds a back and forth movement to the sense. Example \REF{ex:7:106} above involves the subject going from person to person to invite them to help.
\ea\label{ex:7:114}
Zar asəya memele.\\
\gll \ zar a-s=ija mɛmɛlɛ\\
man \oldstylenums{3}\textsc{s}-saw={\PLU} tree\\
\glt ‘The man saws the tree.’\\
\z
\ea\label{ex:7:115}
Aban ahaya háy.\\
\gll Abaŋ a-h=aja haj\\
Abang \oldstylenums{3}\textsc{s}-grind={\PLU} millet\\
\glt ‘Abang grinds millet.’\\
\z
Directionals are a device used in Moloko discourse to help provide cohesion.\footnote{Other discourse devices which function in cohesion include demonstratives (\sectref{sec:3.2}), the adjectiviser \textit{ga} (\sectref{sec:5.3}), the presupposition marker \textit{na} (\chapref{chap:11}), and participant tracking (\sectref{sec:7.3}).} Directionals keep the hearer oriented to the events of a story and how they relate to a particular spatial point of reference (a place or dominant character). The point of reference may remain constant throughout the whole story or it may change during the story. Selected lines from the Cicada text \REF{ex:7:116} illustrate how directionals relate main line events to the point of reference which is the chief (or perhaps the place in his compound where he makes the millet beer). The directionals are bolded in the examples. The presence of the two directionals in \REF{ex:7:119} and \REF{ex:7:120} is the only way in the story that we know that the cicada brought the tree back to the chief (until the chief thanks him in line 34).
\ea\label{ex:7:116}\corpussource{Cicada, S. 6}\\
Albaya ahay ndana kəlen təngala\textbf{ala} ma ana bahay.\\
\gll albaja=ahaj ndana kɪlɛŋ t\`ə-ŋgala\textbf{=ala} ma ana bahaj\\
youth=Pl {\DEM} then \oldstylenums{3}\textsc{p}+{\PFV}-return=to word {\DAT} chief\\
\glt ‘The above-mentioned young men then took the word (response) to the chief.’ (lit they returned the word to the chief)\\
\z
\ea\label{ex:7:117}\corpussource{Cicada, S. 12}\\
Təlo tamənjar na \textbf{ ala} mama agwazla nəndəye.\\
\gll t\`ə-lɔ tà-mənzar na\textbf{=ala} mama agʷaɮa nɪndijɛ\\
\oldstylenums{3}\textsc{p}+{\PFV}-go \oldstylenums{3}\textsc{p}+{\HOR}-see \oldstylenums{3}\textsc{s}.{\DO}=to mother {spp. of tree} {\DEM}\\
\glt ‘They went to see [for the chief] that mother-tree.'\\
\z
\ea\label{ex:7:118}\corpussource{Cicada, S. 16}\\
Kəlen albaya ahay tolo amaz\textbf{ala} agwazla na, taaz\textbf{ala} təta bay.\\
\gll kɪlɛŋ albaja=ahaj t\`ɔ-lɔ ama-z\textbf{=ala} agʷaɮa na \\
then youth=Pl \oldstylenums{3}\textsc{p}+{\PFV}-go {\DEP}-take=to {spp. of tree} {\PSP} \\
\glt ‘And then, the young men left to bring back the tree [to the chief];’\\
\medskip
\gll tàà-z\textbf{=ala} təta baj\\
\oldstylenums{3}\textsc{p}+{\HOR}-take=to ability {\NEG}\\
\glt ‘but they were not able to bring it [to him].'\\
\z
\ea\label{ex:7:119}\corpussource{Cicada, S. 30}\\
Amag\textbf{ala} ləmes.\\
\gll ama-g\textbf{=ala} lɪmɛʃ\\
{\DEP}-do=to song\\
\glt ‘He was singing towards [the chief’s house].' (lit. to do towards a song)\\
\z
\medskip
\ea\label{ex:7:120}\corpussource{Cicada, S. 31}\\
Sen \textbf{ala}\textbf{.}\\
\gll ʃɛŋ\textbf{=ala}\\
\textsc{id}:go=to\\
\glt ‘Going, [he] came [to the chief’s house].’\\
\z
Sometimes the directional \textit{=ala} ‘towards’ (see see \sectref{sec:7.5.2}) can carry a Perfect kind of idea (an event being completed before a temporal reference point with ongoing effects to that time) but which has a slightly different connotation to the Perfect extension \textit{=va}. Compare \REF{ex:7:121} and \REF{ex:7:122}. Use of the directional \textit{=ala} ‘towards’ \REF{ex:7:121} with the verb /z m\textsuperscript{o}\textit{ }/ indicates that the person has already eaten, but at some other location, since the directional gives the idea that food has come to the speaker. Use of the Perfect itself \REF{ex:7:122} indicates that the person has finished eating (at the place where he is sitting). As such, the directional\textit{ =ala } may be in the process of becoming grammaticalised for past tense or a subtype of Perfect.
\ea\label{ex:7:121}
Nəzəm\textbf{ala} toho.\\
\gll n\`ə-zəm\textbf{=ala} tɔhʷɔ\\
{\oneS}+{\PFV}-eat=to {\DEM}\\
\glt ‘I already ate over there (some other person’s house – before I arrived here).’\\
\z
\ea\label{ex:7:122}
Nəzəm\textbf{va} pew.\\
\gll n\`ə-zəm\textbf{=va} pɛw\\
{\oneS}+{\PFV}-eat={\PRF} enough\\
\glt ‘I already ate/ I have eaten enough (here in this place since I arrived here).’\\
\z
Likewise, the verb /s kʷ m/ ‘buy/sell’ is given a Perfect idea when it carries the \textit{=ala} extension. In \REF{ex:7:108}, the goat has come to the speaker. There is no Perfect extension \textit{=va} but the idea is accomplished through the directional \textit{=ala}.
\ea\label{ex:7:123}
Nəskom na \textbf{ala awak}.\\
\gll n\`ʊ-sʊkʷʊm na\textbf{=ala} \textbf{awak}\\
{\oneS}+{\PFV}-buy/sell \oldstylenums{3}\textsc{s}.{\DO}=to goat\\
\glt ‘I bought the goat (and it is mine now).’\\
\z
\is{Clitics!Directionals|)}\is{Deixis!Directionals|)}\is{Directionals|)}\is{Clitics!Adpositionals|)}
\subsection{Perfect}\label{sec:7.5.3}\is{Tense, mood, and aspect!Perfect|(}\is{Clitics!Perfect|(}
\hypertarget{RefHeading1212381525720847}{}
The final extension is =\textit{va}, the Perfect \citep{FriesenMamalis2008}. The Perfect marks events or states as having occurred prior to a particular point of reference\is{Cohesion!Point of reference}, with ongoing effect that continues to that point of reference \citep{Comrie1976}. The Perfect extension is bolded in the examples.
\clearpage
\ea\label{ex:7:124}
Tawəy, “Ambəɗə\textbf{va} anga ləme.”\\
\gll tawij à-mbəɗ=\textbf{va} aŋga lɪmɛ\\
\oldstylenums{3}\textsc{p}+said \oldstylenums{3}\textsc{s}+{\PFV}-change={\PRF} {\POSS} \oldstylenums{1}\textsc{Pex}\\
\glt ‘They said, “It has become ours!”’ (lit. it has changed; belonging to us)\\
\z
\ea\label{ex:7:125}
Nasar həraf ɛlɛ nəngehe asabay, \\
\gll nà-sar həraf ɛlɛ nɪŋgɛhɛ asa-baj \\
{\oneS}+{\PFV}-know medicine thing {\DEM} again-{\NEG} \\
\glt ‘I didn’t know how to resolve the problem,’ \\
\medskip
waya nəl\textbf{va} afa səwpərefe.\\
\gll waja n\`ə-l=\textbf{va} afa suwpɪrɛfɛ\\
because {\oneS}+{\PFV}-go={\PRF} {at house of} {sub prefect}\\
\glt ‘because I had already been to the sub-prefect [and he didn’t help me].’\\
\z
\ea\label{ex:7:126}
Təta na, tanjakə\textbf{va} ɛlɛ məzəme.\\
\gll təta na tà-nzak=\textbf{va} ɛlɛ mɪ-ʒɪm-ɛ\\
\oldstylenums{3}\textsc{p} {\PSP} \oldstylenums{3}\textsc{p}+{\PFV}-find={\PRF} thing {\NOM}{}-eat-{\CL}\\
\glt ‘And so they had found something to eat.’\\
\z
\ea\label{ex:7:127}
Arahə\textbf{va} peɗeɗe.\\
\gll à-rah\textbf{=va} pɛɗɛɗɛ\\
\oldstylenums{3}\textsc{s}+{\PFV}-fill={\PRF} \textsc{id}:full\\
\glt ‘It had filled right to the rim.’\\
\z
\ea\label{ex:7:128}
Nəzəm\textbf{va}.\\
\gll n\`ə-zəm\textbf{=va}\\
{\oneS}+{\PFV}-eat={\PRF}\\
\glt ‘I already ate.’\\
\z
Unlike the other extensions, the Perfect\textit{ }enclitic has two possible positions in the verb phrase. It can either be phonologically bound to the right edge of the verbal complex (see \sectref{sec:7.1}) or to the right edge of the clause (\chapref{chap:8}) after the direct object and adpositionals. In (\ref{ex:7:124}--\ref{ex:7:127}, \ref{ex:7:131}), =\textit{va} follows the adpositional and directional extensions in the verb complex and precedes other elements in the verb phrase. In \REF{ex:7:129} and \REF{ex:7:132}, =\textit{va} occurs at the end of the clause, a rarer construction that presumably occurs to underscore the idea that the event is already finished.
\clearpage
\ea\label{ex:7:129}\corpussource{Disobedient Girl, S. 17}\\
Azləna, hor na, asərkala afa təta \textbf{va} na, \\
\gll aɮəna hʷɔr na à-sərk=ala afa təta\textbf{=va} na\\
but woman {\PSP} \oldstylenums{3}\textsc{s}+{\PFV}-{habitually}=to {at house of} \oldstylenums{3}\textsc{p}={\PRF} {\PSP} \\
\glt ‘Now, that woman, she was in the habit at their place’\\
\medskip
aməhaya háy na, gam.\\
\gll amə-h=aja haj na gam\\
{\DEP}-grind={\PLU} millet {\PSP} {a lot}\\
\glt ‘[of] grinding a lot of millet.’\\
\z
The Perfect extension has neutral prosody itself and causes the loss of palatalisation of the verb stem (compare \ref{ex:7:130}--\ref{ex:7:131} where the stem is /s-j\textsuperscript{e}/). Also, verb stems drop their /-j/ suffix when this extension is present. These features all confirm that \textit{=va} is an enclitic. In \REF{ex:7:130} without the Perfect, the verb stem is palatalised. When the verb carries the Perfect extension \REF{ex:7:131}, the stem loses its palatalisation.
\ea\label{ex:7:130}
Nese gəzom.\\
\gll n\`ɛ-ʃ{}-ɛ gʊzɔm\\
{\oneS}+{\PFV}-drink-{\CL} {millet beer}\\
\glt ‘I drank millet beer.’\\
\z
\ea\label{ex:7:131}
Nasa\textbf{va} gəzom.\\
\gll n\`a{}-sa=\textbf{va} gʊzɔm\\
{\oneS}+{\PFV}-drink={\PRF} {millet beer}\\
\glt ‘I drank millet beer already.’\\
\z
Notably, palatalisation is lost even when there are intervening words \REF{ex:7:132}, even though the prosody of these words is unaffected.
\ea\label{ex:7:132}
Nasa gəzom \textbf{va}.\\
\gll nà{}-sa gʊzɔm=\textbf{va}\\
{\oneS}+{\PFV}-drink {millet beer}={\PRF}\\
\glt ‘I drank millet beer already.’\\
\z
Likewise \REF{ex:7:133} illustrates the loss of palatalisation from the root /g-j\textsuperscript{ e}/ ‘do’ when the Perfect is added.
\clearpage
\ea\label{ex:7:133}
Ləho aga\textbf{va}.\\
\gll lʊhʷɔ à-ga\textbf{=va}\\
{late afternoon} \oldstylenums{3}\textsc{s}+{\PFV}-do={\PRF}\\
\glt ‘It is the cool of the day (after three o’clock).’ (lit. late afternoon has done)\\
\z
\citet{Bow1997c} established that the Perfect extension\footnote{\citet{Bow1997c} called it an aspect or tense marker.} carries a floating tone. Its underlying tone is HL. She demonstrates the floating tone using two verbs with different tone melodies; the high tone verb /bal-j/ ‘wash’ (\ref{ex:7:134}--\ref{ex:7:135}) and the low tone verb /a-dar-j/ ‘plant’ (\ref{ex:7:136}-- \ref{ex:7:137}), both with the object noun [háj] ‘millet.’ \REF{ex:7:134} and \REF{ex:7:136} show the two clauses without the Perfect for comparison. Comparing \REF{ex:7:135} with \REF{ex:7:137} demonstrates that the floating low tone on the Perfect has lowered the tone of ‘millet’ from high to mid since there is no other low tone apparent that could be responsible for the lowering.
\ea\label{ex:7:134}
N\'əbalay háy.\\
\gll {}[n\'ə-bál-áj háj]\\
{\oneS}+{\IFV}-wash-{\CL} millet\\
\glt ‘I wash the millet.’\\
\z
\ea\label{ex:7:135}
Nəbalva háy.\\
\gll {}[n\=ə-bál=vá h\={a}j]\\
{\oneS}+{\PFV}-wash={\PRF} millet\\
\glt ‘I washed the millet already.’\\
\z
\ea\label{ex:7:136}
N\'ədaray háy.\\
\gll {}[n\'ə-dàr-\={a}j háj]\\
{\oneS}+{\IFV}-plant-{\CL} millet\\
\glt ‘I plant the millet.’\\
\z
\ea\label{ex:7:137}
Nədarva háy.\\
\gll {}[n\`ə-dàr=v\={a} h\={a}j]\\
{\oneS}+{\PFV}-plant={\PRF} millet\\
\glt ‘I planted the millet already.’\\
\z
The Perfect extension can mark information in a relative clause (\sectref{sec:5.4.3}) as having been accomplished before the information in the main clause, with relevance to the point of reference in the main clause \REF{ex:7:138}.
\ea\label{ex:7:138}
War elé háy ngəndəye nok ameze na \textbf{va}, bəlen ngəndəye na,\\
\gll war ɛlɛ haj ŋgɪndijɛ [nɔkʷ amɛ-ʒɛɗ{}-ɛ na\textbf{=va}] bɪlɛŋ ŋgɪndijɛ na\\
child eye millet {\DEM} {\twoS} {\DEP}-take-{\CL} \oldstylenums{3}\textsc{s}.{\DO}={\PRF} one {\DEM} {\PSP}\\
\glt ‘That grain that you have taken, that one [grain],’
\medskip
káahaya kə ver aka.\\
\gll káá-h=aja kə vɛr aka\\
{\twoS}+{\POT}-grind={\PLU} on {grinding stone} on\\
\glt ‘grind it on the grinding stone.’\\
\z
When the Perfect co-occurs with Perfective aspect\is{Tense, mood, and aspect!Perfective aspect}\is{Tense, mood, and aspect!Imperfective aspect} (\ref{ex:7:124}--\ref{ex:7:129}, \ref{ex:7:135}, \ref{ex:7:137}), it indicates that the event expressed by the verb took place before the point of reference established in the discourse. When the Perfect co-occurs with Imperfective aspect (\ref{ex:7:139}--\ref{ex:7:142}), the verb is resultative, referring to an ongoing state that is the result of a previous completed event (filling, becoming tired, ripening, or becoming angry).
\ea\label{ex:7:139}
Árahə\textbf{va}.\\
\gll á-ráh=\textbf{va}\\
\oldstylenums{3}\textsc{s}+{\IFV}-fill={\PRF}\\
\glt ‘It is full.’\\
\z
\ea\label{ex:7:140}
Mana áyəɗə\textbf{va.}\\
\gll Mana á-jəɗ=\textbf{va}\\
Mana \oldstylenums{3}\textsc{s}-tire={\PRF}\\
\glt ‘Mana is tired.’\\
\z
\ea\label{ex:7:141}
Háy ánahə\textbf{va}.\\
\gll haj á-nah=\textbf{va}\\
millet \oldstylenums{3}\textsc{s}-ripen={\PRF}\\
\glt ‘The millet is ripe.’\\
\z
\ea\label{ex:7:142}\corpussource{Disobedient Girl, S. 33}\\
Məloko ahay tawəy, “Hərmbəlom ága ɓərav \textbf{va}\\
\gll mʊlɔkʷɔ=ahaj tawij Hʊrmbʊlɔm á-ga ɓərav\textbf{=va} \\
Moloko=Pl \oldstylenums{3}\textsc{p}+said God \oldstylenums{3}\textsc{s}+{\IFV}-do heart={\PRF}\\
\glt ‘The Molokos say, “God got angry’\\
\medskip
\clearpage
kəwaya war dalay na, amecen sləmay bay ngəndəye.”\\
\gll kuwaja war dalaj na amɛ-tʃɛŋ ɬəmaj baj ŋgɪndijɛ\\
because child girl {\PSP} {\DEP}-hear ear {\NEG} {\DEM}\\
\glt ‘because of that girl, that one that was disobedient.”’\\
\z
In narrative discourse, the Perfect\is{Tense, mood, and aspect!Perfect|)} verbal extension \textit{=va} marks events that occur prior to the events on the main story line, and which supply flashback information to the story. For example, in the setting of the Disobedient Girl story (S. 2), the Perfect marks God giving his blessing to the people. This blessing preceded the events of the story \REF{ex:7:143} and had an ongoing effect at the time of the story.
\ea\label{ex:7:143}\corpussource{Disobedient Girl, S. 3}\\
Zlezle na, Məloko ahay na, Hərmbəlom ávəlata barka \textbf{va}.\\
\gll ɮlɛɮɛ na Mʊlɔkʷɔ=ahaj na Hʊrmbʊlɔm á-vəl=ata \\
{long ago} {\PSP} Moloko=Pl {\PSP} God \oldstylenums{3}\textsc{s}+{\IFV}-send=\oldstylenums{3}\textsc{p}.{\IO}\\
\medskip
\gll barka=\textbf{va}\\
blessing={\PRF}\\
\glt ‘Long ago, to the Moloko people, God had given his blessing.’\\
\z
In the body of the Disobedient Girl story (\ref{ex:7:129} above), the story flashes back to the woman’s prior situation, using the Perfect, in order to prepare the reader/hearer for what will happen next in the story. In the body of another fable (the race between the giraffe and the toad, \citealt{Friesen2003}), the Perfect marks a flashback to a prior action of the toad.
\ea\label{ex:7:144}
Macəkəmbay moktonok na, abək ta aya \textbf{va}\\
\gll matsəkəmbaj mɔkʷtɔnɔkʷ na a-bək ta=aja=\textbf{va}\\
meantime toad {\PSP} \oldstylenums{3}\textsc{s}-invite \oldstylenums{3}\textsc{p}.{\DO}={\PLU}={\PRF}\\
\glt ‘In the meantime the toad, he had already invited’\\
\medskip
məlama ahan ahay jəyga.\\
\gll məlama=ahaŋ=ahaj dzijga\\
brother=\oldstylenums{3}\textsc{s}.{\POSS}=Pl all\\
\glt ‘all of his brothers.’\\
\z
\is{Clitics!Perfect|)}
\section{Nominalised verb form}\label{sec:7.6}\is{Derivational processes!Verb to noun|(}
\hypertarget{RefHeading1212401525720847}{}
The nominalised verb form\footnote{\citet{FriesenMamalis2008} called this form the ‘infinitive.’} is derived from a verb\is{Derivational processes!Verb to noun} stem by the addition of the prefix /\textit{m-}/ plus a palatalised suffix [{}-ɛ].\footnote{There is also an irregular nominalisation process that has already been discussed (\sectref{sec:4.2}).} Syntactically, the nominalised form can pattern as a noun (see \sectref{sec:7.6.1}), and in certain cases it can pattern as a verb, taking some inflectional components such as object suffixes and extensions (see \sectref{sec:7.6.2}). In the examples below, both underlying and nominalised forms are given. The nominalised form indicates an event (race, \ref{ex:7:145}; betrayal, \ref{ex:7:146}) or state (beauty, \ref{ex:7:147}; coldness, \ref{ex:7:148}).
\ea\label{ex:7:145}
/h-m-j/ \hspace{40pt} [mɪ-hɪm-ɛ]\\
\glt ‘run' \hspace{50pt} ‘race’
\z
\ea\label{ex:7:146}
/tʃaf\textsuperscript{e}/ \hspace{45pt} [mɪ- tʃɛf-ɛ]\\
\glt ‘betray’ \hspace{40pt} ‘betrayal’
\z
\ea\label{ex:7:147}
/r ɓ-j/ \hspace{45pt} [mɪ-rɪɓ-ɛ]\\
\glt ‘be beautiful’ \hspace{15pt} ‘beauty’
\z
\ea\label{ex:7:148}
/ndaɬ-j\textsuperscript{e}/ \hspace{34pt} [mɪ-ndɛɬ-ɛ]\\
\glt ‘make cold’ \hspace{25pt} ‘coldness’
\z
In the case where a verb stem consists of one single consonant, the nominalised form receives an additional syllable [{}-ijɛ].
\ea\label{ex:7:149}
/dz-j/ \hspace{55pt} [mɪ-dʒ-ijɛ]\\
\glt ‘say’ \hspace{60pt} ‘saying’
\z
\ea\label{ex:7:150}
/s-j\textsuperscript{e}/ \hspace{55pt} [mɪ-ʃ{}-ijɛ]\\
\glt ‘drink’ \hspace{53pt} ‘drinking’
\z
\ea\label{ex:7:151}
\textup{/l\textsuperscript{o}/ \hspace{65pt} [mɪ-l-ijɛ}]\\
\glt ‘go’ \hspace{65pt} ‘going’
\z
If present, the underlying \textit{a-} prefix in a verb stem shows up in the prefix vowel of the nominalised form. The prefix vowel in an \textit{a-} prefix verb is full; in \REF{ex:7:152} and \REF{ex:7:153}, this full vowel is realised as [ɛ] due to the palatalisation prosody which is part of the nominalising morphology. Compare with (\ref{ex:7:145}--\ref{ex:7:148}) where [mɪ-] is the prefix for verb stems with no \textit{a-} prefix.
\ea\label{ex:7:152}
\textup{/a-d a r–aj/ \hspace{27pt} [mɛ-dɛr-ɛ}]\\
\glt ‘plant’ \hspace{55pt} ‘planting’
\z
\ea\label{ex:7:153}
\textup{/a-d l/ \hspace{52pt} [mɛ-dɪl-ɛ}]\\
\glt ‘overtake’ \hspace{40pt} ‘overtaking’
\z
The tone pattern of the nominalised form reflects the underlying tone of the verb stem. \tabref{tab:68} (from \citealt{FriesenMamalis2008}) illustrates a few nominalised forms that suggest this pattern.
\begin{table}
\resizebox{\textwidth}{!}{\begin{tabular}{lllll}
\lsptoprule
{Tone class} & {Underlying form} & {Nominalised form} & {Imperative} & {Gloss}\\\midrule
{High tone verb stems} & /nz a k-j / & [m\'ɪ{ }-nʒ\`ɛk-\`ɛ] & [nzák-áj] & ‘find’\\
& / z m\textsuperscript{ o}/ & [m\'ɪ-ʒùm-\`ɛ] & [z\'ɔm] & ‘eat’\\\midrule
{Low tone verb stems } & /f ɗ/ & [m\=ɪ-f\=ɪɗ-\=ɛ] & [f\={a}ɗ] & ‘put’\\
{without depressor consonants} & /tats-j / & [m\=ɪ-t\={e}tʃ-\=ɛ] & [t\={a}ts-áj] & ‘close’\\\midrule
{Low tone verb stems } & /v h n-j / & [m\=ɪ-v\'ɪh\=ɪn-\=ɛ] & [v\`əh\`ən-\={a}j] & ‘vomit’\\
{with depressor consonants} & /a-dar-j / & [m\=ɛ{}-d\=ɛr-\=ɛ] & [dàr-\={a}j] & ‘plant’\\\midrule
{Toneless verb stems} & /d ɗ/ & [m\`ɪ-d\'ɪɗ-\=ɛ] & [dàɗ] & ‘fall’\\
& /nd z/ & [m\`ɪ-nd\'ɛʒ-\=ɛ] & [ndàz] & ‘pierce’\\
\lspbottomrule
\end{tabular}}
\caption{Nominalised form tone patterns\label{tab:68}}
\end{table}
\subsection{Nominalised form as noun}\label{sec:7.6.1}
\hypertarget{RefHeading1212421525720847}{}
As a noun, the nominalised form takes modifiers the same as any abstract noun, i.e., quantifier \REF{ex:7:155} , numeral \REF{ex:7:156}, possessive pronoun \REF{ex:7:154}, demonstrative \REF{ex:7:157}, adjectiviser (\ref{ex:7:158}--\ref{ex:7:160}) but not plural (see \sectref{sec:4.2.5}). Any argument of the clause can be realised with a nominalisation. The noun phrase is marked off by square brackets and the nominalised form is bolded in the examples.
\ea\label{ex:7:154}
{}[\textbf{Məhəme} aloko na], epeley?~\\
\gll {}[\textbf{mɪ-hɪm-ɛ}=alɔkʷɔ na] ɛpɛlɛj~\\
{\NOM}{}-run-{\CL}=\oldstylenums{1}\textsc{Pin}.{\POSS} {\PSP} when\\
\glt ‘When is our race?’ (lit. our running [is] when)\\
\z
\clearpage
\ea\label{ex:7:155}\corpussource{Disobedient Girl, S. 4}\\
Ávata [\textbf{məvəye} haɗa].\\
\gll á-v=ata [\textbf{mɪ-v-ijɛ} haɗa]\\
\oldstylenums{3}\textsc{s}+{\IFV}-{spend time}=\oldstylenums{3}\textsc{p}.{\IO} {\NOM}{}-{spend time}-{\CL} many\\
\glt ‘It would last them the whole year.’ (lit. it will {spend time} for them many time-spending\footnote{The nominalised form of the verb ‘{spend time}’ has been lexicalized as ‘year.’})\\
\z
\ea\label{ex:7:156}
Ege [\textbf{məvəye} məko] ehe, nawas háy əwla.\\
\gll ɛ{}-g-ɛ [\textbf{mɪ-v-ijɛ} mʊkʷɔ] ɛhɛ na-was haj=uwla\\
\oldstylenums{3}\textsc{s}-do-{\CL} {\NOM}{}-{spend time}-{\CL} six here {\oneS}-cultivate millet={\oneS}.{\POSS}\\
\glt ‘Six years ago (lit. it did six years), I cultivated my millet.’
\z
\ea\label{ex:7:157}
{}[\textbf{Medəre} nehe na], səlom ga.\\
\gll {}[\textbf{mɛ-dɪr-ɛ} nɛhɛ na] sʊlɔm ga\\
{\NOM}{}-plant-{\CL} {\DEM} {\PSP} goodness {\ADJ}\\
\glt ‘This planting is good.’\\
\z
Adjectives can be further derived from a nominalised verb form by adding \textit{ga}, as is true of any noun (\sectref{sec:4.3}). Adjectives that are derived from nominalised verbs express resultant states. For example, the peanuts in \REF{ex:7:158} are already ground, the woman in \REF{ex:7:159} is already beautiful, the man is already seated in \REF{ex:7:160}. The nominalised forms are bolded in the examples.
\ea\label{ex:7:158}
Nadok [andəra \textbf{məngəlɗe ga}].\\
\gll na-d=ɔkʷ [andəra \textbf{mɪ-ŋgɪlɗ-ɛ} \textbf{ga}]\\
{\oneS}-prepare={\twoS}.{\IO} peanut {\NOM}{}-grind-{\CL} {\ADJ}\\
\glt ‘I made peanut butter (lit. ground peanuts) for you.’\\
\z
\ea\label{ex:7:159}
Avəlaw [war dalay \textbf{mərəɓe} ga].\\
\gll a-vəl=aw [war dalaj \textbf{mɪ-rɪɓ-ɛ} ga]\\
\oldstylenums{3}\textsc{s}-give={\oneS}.{\IO} child female {\NOM}{}-{be beautiful}-{\CL} {\ADJ}\\
\glt ‘He/she gave me a beautiful girl.’ \\
\z
\ea\label{ex:7:160}
Ndahan [\textbf{mənjəye} ga].\\
\gll ndahaŋ [\textbf{mɪ-nʒ-ijɛ} ga]\\
\oldstylenums{3}\textsc{s} {\NOM}{}-sit-{\CL} {\ADJ}\\
\glt ‘He/she [is] seated.’\\
\z
It is interesting that noun phrases where the head noun is a nominalised verb behave like a clause when there is a noun modifier. The nominalised verb can be the head of a genitive construction (see \sectref{sec:5.4.1}), a permanent attribution construction (see \sectref{sec:5.4.2}), or an argument in another clause (see \sectref{sec:12.1.1}). In the genitive construction (\ref{ex:7:154} and \ref{ex:7:164}), the second noun represents the subject of the verb stem. In the other constructions (\ref{ex:7:161}--\ref{ex:7:162}), the second noun represents the direct object of the nominalised verb.
\ea\label{ex:7:161}
məbeze háy\\
\gll mɪ-bɛʒ-ɛ haj\\
{\NOM}{}-harvest-{\CL} millet\\
\glt ‘the millet harvest’\\
\z
\ea\label{ex:7:162}
andəra məngəlɗe ga\\
\gll andəra mɪ-ŋgɪlɗ-ɛ ga\\
peanut {\NOM}{}-grind-{\CL} {\ADJ}\\
\glt ‘ground peanuts’\\
\z
\ea\label{ex:7:163}
mənjəye a Mana\\
\gll mɪ-nʒ-ijɛ a Mana\\
{\NOM}{}-sit-{\CL} {\GEN} Mana\\
\glt ‘Mana’s behaviour’ (lit. the sitting of Mana)\\
\z
\ea\label{ex:7:164}
\textbf{məhəme} aloko\\
\gll \textbf{mɪ-hɪm-ɛ}=alɔkʷɔ\\
{\NOM}{}-run-{\CL}=\oldstylenums{1}\textsc{Pin}.{\POSS}\\
\glt ‘our race’ (lit. the running of us)\\
\z
\subsection{Nominalised form as verb}\label{sec:7.6.2}
\hypertarget{RefHeading1212441525720847}{}
The nominalised form can fill the verb slot in a clause (discussed further in \sectref{sec:8.2.3} and \sectref{sec:9.4}). Examples \REF{ex:7:165} and \REF{ex:7:166} are full (complete) clauses on the main event line where the verb is in nominalised form. Such clauses are found at the inciting moment and peak of a narrative.\is{Focus and prominence!Discourse peak} The nominalised form is not conjugated for subject or direct object, but the clause may have a subject (the \oldstylenums{3}\textsc{s} pronoun \textit{ndahan} in \ref{ex:7:165}) or direct object (\textit{yam} ‘water’ in \ref{ex:7:165}) and other clausal elements. The nominalised form can take verbal extensions (\oldstylenums{3}\textsc{p} indirect object \textit{=ata}, adpositional \textit{=aka} and Perfect \textit{=va} in \ref{ex:7:165}; the adpositional \textit{=ava} and the directional \textit{=alay} in \ref{ex:7:166}).
\ea\label{ex:7:165}
Ndahan ngah \textbf{mangəhata aka va} yam a ver ahan ava.\\
\gll ndahaŋ ŋgah \textbf{ma-ŋgəh=ata=aka=va} jam a vɛr=ahaŋ ava\\
\oldstylenums{3}\textsc{s} hide {\NOM}{}-hide=\oldstylenums{3}\textsc{p}.{\IO}=on={\PRF} water in room=\oldstylenums{3}\textsc{s}.{\POSS} in\\
\glt ‘He had hidden the water in his room’ (lit. he hide-hiding water in his room)\\
\z
\ea\label{ex:7:166}
Məmətava alay a ver ava.\\
\gll mə-mət=ava=alaj a vɛr ava\\
{\NOM}{}-die=in=away in room in\\
\glt ‘[She] died in the room.’ \\
\z
\subsection{Verb focus construction}\label{sec:7.6.3}\is{Focus and prominence!Verb focus construction}
\hypertarget{RefHeading1212461525720847}{}
The nominalised form of a verb is used in an idiomatic construction that functions to bring focus on the verb. The verb focus construction is composed of an inflected verb followed by an adpositional phrase (see \sectref{sec:5.6.1}) containing the same verb in nominalised form. \REF{ex:7:167} shows the construction \textit{nəskom nə məskwəme} ‘I really did buy it’ (lit. I bought [it] with buying). This construction specifies that the action is done ‘by means of’ or ‘by actually’ doing something (to the exclusion of all other possibilities). It is used by the speaker to contest a real or implied challenge of the validity of what has been said. In \REF{ex:7:167}, the speaker is saying that he actually bought a particular item, i.e. he didn’t steal it and nobody gave it to him. Likewise, (\ref{ex:7:168}--\ref{ex:7:170}) illustrate other verbs in this construction.
\ea \label{ex:7:167}
Awəy, “Nəskom \textbf{nə məskwəme.”}\\
\gll awij nʊ-sʊkʷɔm \textbf{nə} \textbf{mɪ-søkʷøm-ɛ}\\
said {\oneS}-buy with {\NOM}{}-buy-{\CL}\\
\glt ‘He said, “I actually bought it.”’ (lit. I bought it with buying)
\z
\ea \label{ex:7:168}
Káslay awak \textbf{nə} \textbf{məsləye}.\\
\gll ká-ɬ{}-aj awak \textbf{nə} \textbf{mɪ-ɬ{}-ijɛ}\\
{\twoS}+{\IFV}-slay{}-{\CL} goat with {\NOM}{}-slay-{\CL}\\
\glt ‘You kill goats by cutting their throat and not by any other way’ (lit. you slay a goat with slaying)
\z
\ea \label{ex:7:169}
Kákaɗ okfom \textbf{nə məkəɗe}. Káslay bay.\\
\gll ká-kaɗ ɔkʷfɔm \textbf{nə} \textbf{mɪ-kɪɗ-ɛ} ká-ɬ{}-aj baj\\
{\twoS}+{\IFV}-kill(club) mouse with {\NOM}{}-kill(club)-{\CL} {\twoS}+{\IFV}-slay-{\CL} {\NEG}\\
\glt ‘You kill mice by smashing their head; you don’t cut their throats.’
\z
\ea \label{ex:7:170}
Kándaz \textbf{nə məndəze} awak anga pəra.\\
\gll ká-ndaz \textbf{nə} \textbf{mɪ-ndɪʒ-ɛ} awak aŋga pəra\\
{\twoS}+{\IFV}-kill(pierce) with {\NOM}{}-kill(pierce)-{\CL} goat {\POSS} idol\\
\glt ‘You kill a goat for the idols by piercing it (you don’t cut its throat).’ (lit. you kill with killing a goat that belongs to an idol)
\z
\is{Derivational processes!Verb to noun|)}
\section{Dependent verb forms}\label{sec:7.7}
\hypertarget{RefHeading1212481525720847}{}
A dependent verb form is formed by prefixing \textit{am-} to the verb stem, palatalisation, and the suffix \textit{{}-e} (or \textit{{}-əye} for verb roots of one syllable). Historically, this construction may involve the nominalised form (see \sectref{sec:7.6}) preceded by the preposition \textit{a} ‘to.’\footnote{Crosslinguistic studies reveal that locatives can give rise to Imperfectives (\citealt[103]{Comrie1976}; \citealt[142]{BybeeEtAl1994}; \citealt[99]{HeineKuteva2002}).} In any case it acts as a single unit now. \tabref{tab:69} shows examples of the dependent verb form for stems of each underlying prosody. The table gives the underlying form, the third person singular form, the nominalised form, and the dependent form.
\begin{table}
\resizebox{\textwidth}{!}{%
\begin{tabular}{lllll}
\lsptoprule
{Underlying form} & {Gloss} & {\oldstylenums{3}\textsc{s} form} & {Nominalised form} & {Dependent form}\\\midrule
/h m-j/ & ‘run’ & \textit{a-həm-ay} & \textit{mə-həm-e} & \textit{amə-həm-e}\\
/ d-j\textsuperscript{e}/ & ‘prepare’ & \textit{e{}-d-e} & \textit{mə-d-əye} & \textit{amə-d-əye}\\
/s kʷ m/ & ‘buy’/’sell’ & \textit{a-səkom} & \textit{mə-skwəm-e} & \textit{amə-skwəm-e}\footnote{Note that the labialised consonant /kʷ/ keeps its labialisation even when the word is palatalised (\sectref{sec:2.2.2}).}\\
\lspbottomrule
\end{tabular}}
\caption{Dependent verb forms\label{tab:69}}
\end{table}
There are no subject inflections on the dependent verb form; the subject is determined either by the subject of the matrix clause (a gap for subject is marked as Ø in \ref{ex:7:171}, \ref{ex:7:173}, and \ref{ex:7:174}) or a pronoun within the dependent clause indicating subject (\ref{ex:7:172}--\ref{ex:7:176}). The dependent form of the verb may receive object suffixes and extensions. The dependent verb form is used when clauses that carry an imperfective or unfinished idea are embedded in other constructions. The clause structure is illustrated in \figref{fig:14}.
\clearpage
\begin{figure}
\resizebox{\textwidth}{!}{\frame{%
\begin{tabular}{l>{\bfseries}llll}
(subject & {Dependent verb plus } & (direct object & (oblique adposi- & (adverb)\\
pronoun) & extensions expressing event & noun phrase) & tional phrase) & \\
\end{tabular}}}
\caption{Constituent order in dependent clauses\label{fig:14}}
\end{figure}
The types of clauses that employ dependent verb forms are:
\begin{itemize}
\item {Relative clauses} (\sectref{sec:5.4.3})
\item {Adverbial clauses} (\sectref{sec:12.2})
\item {Complement clauses} (\sectref{sec:12.1})
\end{itemize}
The relative clause is a noun phrase modifier (\ref{ex:7:171}--\ref{ex:7:176}). In the examples in this section, the dependent verb is bolded and the dependent clause is marked with square brackets.
\ea\label{ex:7:171}\corpussource{Disobedient Girl, S. 38}\\
War dalay ga ngendəye \\
\gll war dalaj ga ŋgɛndijɛ \\
child girl {\ADJ} {\DEM} {} \\
\glt ‘that young woman ’\\
\medskip
[\textbf{amazata aka ala} avəya nengehe ana məze ahay na.]\\
\gll [Ø \textbf{ama-z=ata}\textbf{=aka}\textbf{=ala} avija nɛŋgɛhɛ ana mɪʒɛ=ahaj na]\\
{} {\DEP}-carry=\oldstylenums{3}\textsc{p}.{\IO}=on=to suffering {\DEM} {\DAT} person=Pl {\PSP}\\
\glt ‘that had brought this suffering to the people.’\\
\z
\ea\label{ex:7:172}
Tasan oko ana hay [ata \textbf{aməgəye} \textbf{na va}].\\
\gll ta-s-aŋ ɔkʷɔ ana haj[=atəta \textbf{amɪ-g-ijɛ} \textbf{na}\textbf{=va}]\\
\oldstylenums{3}\textsc{p}-cut=\oldstylenums{3}\textsc{s}.{\DO} fire {\DAT} house\hspaceThis{[}=\oldstylenums{3}\textsc{p}.{\POSS} {\DEP}-do-{\CL} \oldstylenums{3}\textsc{s}.{\DO}={\PRF}\\
\glt ‘They (the attackers) set fire to the house that the others had built (lit. their house to prepare).’\\
\z
Adverbial clauses in Moloko are subordinate temporal clauses that are embedded in the main clause as the first \REF{ex:7:173} or last \REF{ex:7:174} element.
\clearpage
\ea\label{ex:7:173}
{}[A\textbf{məhaya} həmbo na], anday asakala wəsekeke.\\
\gll {}[Ø \textbf{amə-h}\textbf{=aja} hʊmbɔ na] a-ndaj a-sak =ala wuʃɛkɛkɛ\\
{} {\DEP}-grind={\PLU} flour {\PSP} \oldstylenums{3}\textsc{s}-{\PRG} \oldstylenums{3}\textsc{s}-multiply =to \textsc{id}:multiply\\
\glt ‘While [she] was grinding the flour, [the millet] was multiplying \textit{wəshekeke}.’\\
\z
\ea\label{ex:7:174} \corpussource{Cicada, S. 16}\\
Kəlen albaya ahay tolo [\textbf{amazala} agwazla na].\\
\gll kɪlɛŋ albaja=ahaj tɔ-lɔ [Ø \textbf{ama-z=ala} agʷaɮa na]\\
then {young men}=Pl \oldstylenums{3}\textsc{p}-go {} {\DEP}-take=to {spp. of tree} {\PSP}\\
\glt ‘Then the young men went to try to bring back the tree [to the chief].’\\
\z
The complement clause can function as the subject \REF{ex:7:175} or the direct object \REF{ex:7:176} of the matrix verb.
\ea\label{ex:7:175}
Asaŋ [\textbf{amadata aka va} azan].\\
\gll a-s=aŋ [Ø \textbf{ama-d=ata=aka=va } azaŋ]\\
\oldstylenums{3}\textsc{s}-please=\oldstylenums{3}\textsc{s}.{\IO} {} {\DEP}-prepare=\oldstylenums{3}\textsc{p}.{\IO}=on={\PRF} temptation\\
\glt ‘He wanted to tempt them.’ (lit. to prepare a temptation for them [is] pleasing to him)\\
\z
\ea\label{ex:7:176}
Məkəɗ va azla tazlan [aləme \textbf{aməzləge} va].\\
\gll mə-kəɗ va aɮa ta-ɮ=aŋ [alɪmɛ \textbf{amɪ-ɮɪg-ɛ} va]\\
{\NOM}-kill body now \oldstylenums{3}\textsc{p}-begin=\oldstylenums{3}\textsc{s}.{\IO} \oldstylenums{1}\textsc{Pex}.{\POSS} {\DEP}-plant-{\CL} body\\
\glt ‘Combat now, they began to fight with us.’ (lit. killing body now, they started it, our planting bodies)\\
\z
| {
"alphanum_fraction": 0.6855772007,
"avg_line_length": 65.4345930233,
"ext": "tex",
"hexsha": "2aefe9d238b4320409d75b97ca8c901727915d57",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8c84b650c980d24d0811b9ea6e5106ebf1e67b0f",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "langsci/118",
"max_forks_repo_path": "chapters/7.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8c84b650c980d24d0811b9ea6e5106ebf1e67b0f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "langsci/118",
"max_issues_repo_path": "chapters/7.tex",
"max_line_length": 1949,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8c84b650c980d24d0811b9ea6e5106ebf1e67b0f",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "langsci/118",
"max_stars_repo_path": "chapters/7.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 50936,
"size": 135057
} |
%%% Attachments to the master thesis, if any. Each attachment must be
%%% referred to at least once from the text of the thesis. Attachments
%%% are numbered.
%%%
%%% The printed version should preferably contain attachments, which can be
%%% read (additional tables and charts, supplementary text, examples of
%%% program output, etc.). The electronic version is more suited for attachments
%%% which will likely be used in an electronic form rather than read (program
%%% source code, data files, interactive charts, etc.). Electronic attachments
%%% should be uploaded to SIS and optionally also included in the thesis on a~CD/DVD.
%%% Allowed file formats are specified in provision of the rector no. 72/2017.
\appendix
\chapter{Attachments}
\section{Digital attachments}
\subsection{msc-neuro}\label{at:msc-neuro}
Repository containing all tools introduced in \refsection{ch:3.3} as well as the implementation of experiments from \refsection{ch:5}. For description, refer to \texttt{./README.md}. The repository is also available online at: \href{https://github.com/petrroll/msc-neuro}{https://github.com/petrroll/msc-neuro}.
\subsection{NDN3}\label{at:ndn3}
Fork of the NDN3 library containing all extensions described in \refsection{ch:3.2}, including the additions not (yet) merged upstream\footnote{\href{https://github.com/NeuroTheoryUMD/NDN3}{https://github.com/NeuroTheoryUMD/NDN3}}. Also available online: \href{https://github.com/petrroll/NDN3/tree/messyDevelop}{https://github.com/petrroll/NDN3/tree/messy\-Develop}. | {
"alphanum_fraction": 0.7737940026,
"avg_line_length": 73.0476190476,
"ext": "tex",
"hexsha": "dc056e62967f60c94ab52231e6bfb952494c2e5e",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-11-25T21:44:31.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-25T21:44:31.000Z",
"max_forks_repo_head_hexsha": "65219d1819f7d93f154bd2dc1484727a52a00229",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "petrroll/msc-thesis",
"max_forks_repo_path": "text/chapters/attachments.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "65219d1819f7d93f154bd2dc1484727a52a00229",
"max_issues_repo_issues_event_max_datetime": "2020-11-26T21:02:36.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-11-26T12:37:50.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "petrroll/msc-thesis",
"max_issues_repo_path": "text/chapters/attachments.tex",
"max_line_length": 367,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "65219d1819f7d93f154bd2dc1484727a52a00229",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "petrroll/msc-thesis",
"max_stars_repo_path": "text/chapters/attachments.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 391,
"size": 1534
} |
\hypertarget{sparse}{%
\section{Sparse}\label{sparse}}
The Sparse class provides support for sparse matrices. An empty sparse
matrix can be initialized with a given size,
\begin{lstlisting}
var a = Sparse(nrows,ncols)
\end{lstlisting}
Alternatively, a matrix can be created from an array of triplets,
\begin{lstlisting}
var a = Sparse([[row, col, value] ...])
\end{lstlisting}
For example,
\begin{lstlisting}
var a = Sparse([[0,0,2], [1,1,-2]])
\end{lstlisting}
creates the matrix
\begin{lstlisting}
[ 2 0 ]
[ 0 -2 ]
\end{lstlisting}
Once a sparse matrix is created, you can use all the regular arithmetic
operators with matrix operands, e.g.
\begin{lstlisting}
a+b
a*b
\end{lstlisting}
| {
"alphanum_fraction": 0.7274031564,
"avg_line_length": 18.8378378378,
"ext": "tex",
"hexsha": "b19b719d27c22960a49144051066fb86e9a2478e",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-10-31T19:55:27.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-10-05T16:56:16.000Z",
"max_forks_repo_head_hexsha": "50bb935653c0675b81e9f2d78573cf117971a147",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mattsep/morpho",
"max_forks_repo_path": "manual/src/Reference/sparse.tex",
"max_issues_count": 79,
"max_issues_repo_head_hexsha": "50bb935653c0675b81e9f2d78573cf117971a147",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T16:06:10.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-10-05T17:33:19.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mattsep/morpho",
"max_issues_repo_path": "manual/src/Reference/sparse.tex",
"max_line_length": 71,
"max_stars_count": 10,
"max_stars_repo_head_hexsha": "50bb935653c0675b81e9f2d78573cf117971a147",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mattsep/morpho",
"max_stars_repo_path": "manual/src/Reference/sparse.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-26T11:41:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-18T14:44:14.000Z",
"num_tokens": 202,
"size": 697
} |
\documentclass[12pt]{article}
\usepackage{fullpage}
\usepackage{amsthm}
\usepackage{amsfonts,amsmath, amssymb,latexsym,mathrsfs}
\usepackage[margin=1.15in]{geometry}
\usepackage{enumitem}
\setlength{\parindent}{0pt}
\usepackage{tikz-cd}
\usepackage{fancyhdr}
\usepackage{multicol}
\theoremstyle{definition}
\newtheorem{thm}{Theorem}[section]
\newtheorem{prop}{Proposition}[section]
\theoremstyle{definition}
\newtheorem{definition}{Definition}[section]
\theoremstyle{remark}
\newtheorem*{remark}{Remark}
\theoremstyle{definition}
\newtheorem{example}{Example}[section]
\theoremstyle{definition}
\newtheorem{lem}{Lemma}[section]
\theoremstyle{definition}
\newtheorem{cor}{Corollary}[section]
\date{}
\title{Final Exam Summary (Everything, formula)}
\begin{document}
\maketitle
\section{Integral}
\section{Riemann Sums}
\begin{enumerate}
\item$\int_a^bf(x)dx= \lim_{n\rightarrow \infty} \sum_{i=1}^n f(x_i)\Delta x\text{\bf (Limit of Right-hand sum RIGHT(n))}$
\item$\int_a^bf(x)dx= \lim_{n\rightarrow \infty} \sum_{i=0}^{n-1} f(x_i)\Delta x\text{\bf (Limit of Left-hand sum LEFT(n))}$
\item$\int_a^bf(x)dx\approx \sum_{i=0}^{n-1} f(\frac{x_i+x_{i+1}}{2})\Delta x\text{\bf (Limit of Mid sum MID(n))}$
\item$\int_a^bf(x)dx\approx \sum_{i=0}^{n-1} \frac{f(x_i)+f(x_{i+1})}{2}\Delta x\text{\bf (Limit of Trapezoid sum TRAP(n))}$
\item$\Delta(x)=\dfrac{b-a}{n}$
\item$\frac{LEFT(n)+RIGHT(n)}{2}=TRAP(n)$
\item$MID(n)\neq TRAP(n)$
\item Error estimation: $|LEFT(n) - f(x)|<|LEFT(n)-RIGHT(n)|=(f(b)-f(a))\Delta x$. This usually gives a bound for $n$.
\end{enumerate}
\subsection{Properties of Riemann sums:}
\begin{enumerate}
\item If the graph of $f$ is increasing on $[a,b]$, then $LEFT(n)\leq \int^b_a f(x) dx \leq RIGHT(n)$
\item If the graph of $f$ is decreasing on $[a,b]$, then $RIGHT(n)\leq \int^b_a f(x) dx \leq LEFT(n)$
\item If the graph of $f$ is concave up on $[a,b]$, then $MID(n)\leq \int^b_a f(x) dx \leq TRAP(n)$
\item If the graph of $f$ is concave down on $[a,b]$, then $TRAP(n)\leq \int^b_a f(x) dx \leq MID(n)$
\end{enumerate}
\subsection{Properties of Definite Integrals}
\begin{enumerate}
\item $int^a_b f(x) dx = -\int^b_a f(x) dx$
\item $\int^a_b f(x) dx+\int^b_c f(x) dx=\int^a_c f(x) dx$
\item $\int^a_b (f(x)\pm g(x)) dx=\int^a_b f(x) dx \pm \int^a_b g(x) dx$
\item $\int^a_b cf(x) dx = c \int^a_b f(x) dx$
\item Symmetry due to the oddity of the function.
\item Average value of function $f(x)$ in $[a,b]$ is $\frac{1}{b-a} \int_{a}^{b}f(x)dx$.
\end{enumerate}
\begin{thm}
\textbf{The Fundamental Theorem of Calculus}:
If $f$ is continuous on interval $[a,b]$ and $f(t)=F'(t)$, then $\int^b_a f(t) dt = F(b)-F(a).$
\textbf{Second FTC (Construction theorem for Antiderivatives)}
If $f$ is a continuous function on an interval, and if $a$ is any number in that interval then the function $F$ defined on the interval as follows is an antiderivative of $f$:
\[F(x)=\int^x_a f(t) dt\]
\end{thm}
\begin{enumerate}
\item $\int C dx = 0$
\item $\int kdx=kx+C$
\item $\int x^ndx=\frac{x^{n+1}}{n+1}+C, (n \neq -1)$
\item $\int \frac{1}{x}dx = \ln|x|+C$
\item $\int e^xdx=e^x+C $
\item $\int \cos xdx=\sin x + C $
\item $\int \sin xdx=-\cos x + C $
\end{enumerate}
Properties of antiderivatives:
\begin{enumerate}
\item $\int (f(x) \pm g(x))dx=\int f(x) dx \pm \int g(x) dx$
\item $\int cf(x) dx = c \int f(x) dx$
\end{enumerate}
\subsection{Integration Techniques}
\begin{enumerate}
\item Guess and Check
\item Substitution $du=f(x)'dx$ if $u=f(x)$
\item By parts $\int u dv=uv-\int v du$
\item Partial fractions $\frac{p(x)}{(x+c_1)^2(x+c_2)(x^2+c_3)}=\frac{A}{x+c_1}+\frac{B}{(x+c_1)^2}+\frac{C}{x+c_2}+ \frac{Dx+E}{x^2+c_3}$
\end{enumerate}
\section{Find Area/Volumes by slicing}
\begin{enumerate}
\item Compute the area: Think about slicing the area into parallel line segments.
\item Disk Method:\\
Horizontal axis of revolution ($x$-axis): $V = \int_a^b \pi(f(x)^2 - g(x)^2)dx$\\
Vertical axis of revolution ($y$-axis): $V = \int_a^b \pi(f(y)^2 - g(y)^2)dy$
\item Shell Method:\\
Horizontal axis of revolution ($x$-axis): $V = \int_a^b 2\pi y(f(y) - g(y))dy$\\
Vertical axis of revolution ($y$-axis): $V = \int_a^b 2\pi x(f(x) - g(x))dx$
\end{enumerate}
\subsection{Mass}
The basic formula we are doing is:\begin{enumerate}
\item One dimensional: $M=\delta l$ where $M$ is the total mass, $\delta$ is the density, $l$ is line.
\item
Two dimensional: $M=\delta A$ where $M$ is the total mass, $\delta$ is the density, $A$ is Area.
\item Three dimensional (real world): $M=\delta V$ where $M$ is the total mass, $\delta$ is the density, $V$ is Volume.
\end{enumerate}
\subsection{Work}
Key formula we are using:\\
$\text{Work done = Force} \cdot \text{Distance}$ or
$W=F\cdot s$
Integration version:
$W = \int^b_a F(x) dx$
\subsection{L'Hopital's rule}
L’Hopital’s rule: If $f$ and $g$ are differentiable and (below $a$ can be $\pm \infty$)\\
i)$f(a) = g(a) = 0$ for finite $a$, \\
Or ii)$\lim_{x\to a} f(x)=\lim_{x\to a} g(x)= \pm \infty$,\\
Or iii)$\lim_{x\to \infty} f(x)= \lim_{x\to \infty} g(x) = 0$
then
\[\lim_{x\to a}\frac{f(x)}{g(x)} = \lim_{x\to a} \frac{f'(x)}{g'(x)} \]
\subsection{Dominance}
We say that $g$ dominates $f$ as $x \to \infty$ if $\lim_{x\to \infty}f(x)/g(x) = 0$.
\section{Improper integral}
There are two types of improper integral.
\begin{itemize}
\item The first case is where we have the limit of the integration goes to infinity, i.e. $\lim_{b \to \infty} \int^b_a f(x) dx$.
\item The integrand goes to infinity as $x \to a$.
\end{itemize}
\subsection{Converges or diverges?}
\begin{enumerate}
\item Check by definition, this means check the limit directly.
\item $p$-test.\\
\includegraphics*[width=0.9\textwidth]{1.png}
\item Exponential decay test.
\[\int^\infty_0 e^{-ax} dx\] converges for $a>0$.
\item Comparison test.\\
If $f(x)\geq g(x) \geq 0$ on the interval $[a,\infty]$ then,\begin{itemize}
\item If $\int^\infty_a f(x) dx$ converges then so does $\int^\infty_a g(x) dx$.
\item If $\int^\infty_a g(x) dx$ diverges then so does $\int^\infty_a f(x) dx$.
\end{itemize}
\item Limit Comparison theorem.\\
Limit Comparison Test. If $f(x)$ and $g(x)$ are both positive on the interval $[a,b)$ where $b$ could be a real number or infinity.
and
\[\lim_{x\to b}\frac{f(x)}{g(x)} = C\] such that $0 < C < \infty$
then the improper integrals $\int^b_a f(x) dx$ and $\int^b_a g(x) dx$ are either both convergent or both divergent.
\end{enumerate}
\section{Probability}
\subsection{PDF and CDF}
\begin{definition}
A function $p(x)$ is a \textbf{probability density function} or PDF if it satisfies the following conditions
\begin{itemize}
\item $p(x) \geq 0$ for all $x$.
\item $\int_{-\infty}^\infty p(x) = 1.$
\end{itemize}
\end{definition}
\begin{definition}
A function $P(t)$ is a \textbf{Cumulative Distribution Function} or cdf, of a density function $p(t)$, is defined by
$P(t) =\int_{-\infty}^t p(x) dx $,
which means that $P(t)$ is the antiderivative of $p(t)$ with the following properties:
\begin{itemize}
\item $P(t)$ is increasing and $0\leq P(t)\leq 1$ for all $t$.
\item $\lim_{t \to \infty}P(t)=1.$
\item $\lim_{t \to -\infty}P(t)=0.$
\end{itemize}
\end{definition}
Moreover, we have $\int_a^b p(x)dx=P(b)-P(a)$.
\subsection{Probability, mean and median}
\subsubsection*{Probability}
Let us denote $X$ to be the quantity of outcome that we care ($X$ is in fact, called the random variable).
$\mathbb{P}\{a\leq X\leq b\}=\int_a^b p(x)dx=P(b)-P(a)$\\
$\mathbb{P}\{X\leq t\}=\int_{-\infty}^t p(x)dx=P(t)$\\
$\mathbb{P}\{X\geq s\}=\int_{s}^\infty p(x)dx=1-P(s)$
\subsubsection*{The mean and median}
\begin{definition}
A \textbf{median} of a quantity $X$ is a value $T$ such that the probability of $X\leq T$ is $1/2$. Thus we have $T$ is defined such that
$\int_{-\infty}^T p(x) dx=1/2$ or $P(T)=1/2$.
\end{definition}
\begin{definition} A \textbf{mean} of a quantity $X$ is the value given by
\[ Mean= \frac{\text{Probability of all possible quantity}}{\text{Total probability}}= \frac{\int_{-\infty}^{\infty}xp(x)dx}{\int_{-\infty}^{\infty}p(x)dx}=\frac{\int_{-\infty}^{\infty}xp(x)dx}{1}=\int_{-\infty}^{\infty}xp(x)dx. \]
\end{definition}
\subsubsection*{Normal Distribution}
\begin{definition}
A normal distribution has a density function of the form
\[p(x)=\frac{1}{\sigma \sqrt{2 \pi}}e^{-\frac{(x-\mu)^2}{2\sigma^2}}\] where $\mu$ is the mean of the distribution and $\sigma$ is the standard deviation, with $\sigma > 0$.
The case $\mu = 0$, $\sigma = 1$ is called the standard normal distribution.
\end{definition}
\section{Sequences and Series}
\subsection{Sequence}
If a sequence $s_n$ is bounded and monotone, it converges.
\subsection{Series}
Convergence Properties of Series:
\begin{enumerate}
\item If $\sum_{n=1}^{\infty} a_n$ and $\sum_{n=1}^{\infty} b_n$ converge and if $k$ is a constant, then
$\sum_{n=1}^{\infty} (a_n+b_n)$ converges to$\sum_{n=1}^{\infty} a_n + \sum_{n=1}^{\infty} b_n$.\\
$\sum_{n=1}^{\infty} ka_n$ converges to $k\sum_{n=1}^{\infty} a_n$
\item Changing a finite number of terms in a series does not change convergence,
\item If $\lim_{n \to \infty}a_n\neq 0$ or $\lim_{n \to \infty}a_n$ does not exist, then
$\sum_{n=1}^{\infty} a_n$ diverges. (\textbf{!})
\item If $\sum_{n=1}^{\infty} a_n$ diverges, then $\sum_{n=1}^{\infty} a_n$ diverges if $k\neq 0$.
\end{enumerate}
Moreover, there are several test to determine if a series is convergent.
\begin{enumerate}
\item \textbf{The Integral Test}\\
Suppose $a_n = f(n)$, where $f(x)$ is decreasing and positive.
\\a. If $\int_1^\infty f(x) dx$ converges, then $\sum_{n=1}^{\infty} a_n$ an converges.
\\b. If $\int_1^\infty f(x) dx$ diverges, then $\sum_{n=1}^{\infty} a_n$ an diverges.
\item \textbf{p-test}\\
The $p$-series $\sum_{n=1}^{\infty} 1/n^p$ converges if $p > 1$ and diverges if $p \leq 1$.
\item \textbf{Comparison Test}\\
Suppose $0 \leq a_n \leq b_n$ for all $n$ beyond a certain value.
\\ a. If $\sum_{n=1}^{\infty} b_n$ converges, then $\sum_{n=1}^{\infty} a_n$ converges.
\\ b. If $\sum_{n=1}^{\infty} a_n$ diverges, then $\sum_{n=1}^{\infty} b_n$ diverges.
\item \textbf{Limit Comparison Test}\\
Suppose $a_n > 0$ and $b_n > 0$ for all $n$. If
$\lim_{n\to \infty}a_n/b_n= c$ where $c > 0$,
then the two series $\sum_{n=1}^{\infty} a_n$ and $\sum_{n=1}^{\infty} b_n$ either both converge or both diverge.
\item \textbf{Convergence of Absolute Values Implies Convergence}\\
If $\sum_{n=1}^{\infty}|a_n|$ converges, then so does $\sum_{n=1}^{\infty} a_n$.
\item \textbf{The Ratio Test}
For a series $\sum_{n=1}^{\infty} a_n$, suppose the sequence of ratios $|a_{n+1}|/|a_n|$ has a limit:
$\lim_{n\to \infty}|a_{n+1}|/|a_n| = L$, then
\begin{itemize}
\item If $L < 1$, then $\sum_{n=1}^{\infty} a_n$ converges.
\item If $L > 1$, or if $L$ is infinite, then $\sum_{n=1}^{\infty} a_n$ diverges.
\item If $L = 1$, the test does not tell anything about convergence of $\sum_{n=1}^{\infty} a_n$ (!).
\end{itemize}
\item \textbf{Alternating Series Test}
A series of the form $\sum_{n=1}^{\infty} (-1)^{n-1}a_n = a_1 - a_2 + a_3 - a_4 + \ldots + (-1)^{n-1}a_n + \ldots$
converges if
$0 < a_{n+1} < a_n$ for all $n$ and $lim_{n \to \infty}a_n = 0$.
Error of alternating test: let $S = \lim_{n\to \infty}S_n$, then have $|S - S_n| < a_{n+1}$.
\end{enumerate}
Notably, We say that the series $\sum_{n=1}^{\infty} a_n$ is
\begin{itemize}
\item absolutely convergent if $\sum_{n=1}^{\infty} a_n$ and $\sum_{n=1}^{\infty}|a_n|$ both converge.
\item conditionally convergent if $\sum_{n=1}^{\infty} a_n$ converges but $\sum_{n=1}^{\infty}|a_n|$ diverges.
\end{itemize}
\includegraphics[width=1\textwidth]{program2.png}
\begin{multicols}{2}
Test we consider for proving convergence:
\begin{enumerate}
\item The integral test
\item p-test
\item Comparison test
\item Limit comparison test
\item Check the absolute convergence of the series
\item Ratio Test
\item Alternating Series Test
\end{enumerate}
\columnbreak
Test we consider for proving divergence:
\begin{enumerate}
\item The integral test
\item p-test
\item Comparison test
\item Limit comparison test
\item Ratio Test
\item Check $\lim_{n \to \infty} \neq 0$ or $\lim_{n \to \infty}$ does not exist.
\end{enumerate}
\end{multicols}
\includegraphics[width=1\textwidth]{program.png}
\subsection{Geometric Series}
There is a special series that we learn about, which is the Geometric Series, notice that the formula on the right hand side is what we called closed form.
A finite geometric series has the form
\[a + ax + ax^2 + \cdots + ax^{n−2} + ax^{n−1}=\frac{a(1-x^n)}{1-x}\text{ For } x \neq 1\]
An infinite geometric series has the form
\[a + ax + ax^2 + \cdots + ax^{n−2} + ax^{n−1}+ax^n +\cdots=\frac{a}{1-x}\text{ For } |x| < 1\]
\newpage
\subsection{Power Series}
\begin{definition}
A power series about $x = a$ is a sum of constants times powers of $(x - a)$: \\
$C_0 + C_1(x - a) + C_2(x - a)^2 + \ldots + C_n(x - a)^n + \ldots = \sum_{n
=0}^{\infty} C_n(x - a)^n$.
\end{definition}
Moreover, each power series falls into one of the three following cases, characterized by its radius of convergence, $R$.
\begin{itemize}
\item The series converges only for $x = a$; the radius of convergence is defined to be $R = 0$.
\item The series converges for all values of $x$; the radius of convergence is defined to be
$R = \infty$.
\item There is a positive number $R$, called the radius of convergence, such that the series
converges for $|x - a| < R$ and diverges for $|x - a| > R$.
\end{itemize}
How to find radius of convergence: consider ratio test
The interval of convergence is the interval between $a - R$ and $a + R$, including any
endpoint where the series converges.
\subsection{Taylor Polynomial}
\textcolor{red}{Taylor Polynomial of Degree $n$ Approximating $f(x)$ for $x$ near $a$} is \[f(x) \approx P_n(x)
= f(a) + f'(a)(x-a) + \frac{f''(a)}{2!}(x-a)^2 + \frac{f'''(a)}{3!}(x-a)^3 + \ldots + \frac{f^{(n)}(a)}{n!} (x-a)^n\]
We call $P_n(x)$ the Taylor polynomial of degree $n$ centered at $x = a$, or the Taylor poly
nomial about $x =a$.
\subsection{Taylor Series}
\textcolor{red}{Taylor Series for $f(x)$ about $x=a$} is \[f(x) = f(a) + f'(a)(x-a) + \frac{f''(a)}{2!}(x-a)^2 + \frac{f'''(a)}{3!}(x-a)^3 + \ldots + \frac{f^{(n)}(a)}{n!} (x-a)^n+ \ldots \]
We call $P_n(x)$ the Taylor polynomial of degree $n$ centered at $x = a$, or the Taylor poly
nomial about $x =a$.
$f^{(n)}(a)= \{\text{coefficient of }x^n\}*n!$.
Moreover, there are \textcolor{red}{several important cases} that we consider, each of them is an Taylor expansion of a function about $x=0$:
\begin{itemize}
\item $e^{x}= 1 + x + \frac{x^2}{2!} + \frac{x^3}{3!} + \frac{x^4}{4!} + \frac{x^5}{5!} + \frac{x^6}{6!} + \frac{x^7}{7!} + \frac{x^8}{8!} + \cdots\text{ converges for all } x$
\item $\sin(x)=\sum\limits_{n=0}^\infty \dfrac{x^{2n+1}}{(2n+1)!}\cdot(-1)^n = x-\dfrac{x^3}{3!}+\dfrac{x^5}{5!}-\dfrac{x^7}{7!}+\dots\text{ converges for all } x$
\item $\cos(x)=\sum\limits_{n=0}^\infty \dfrac{x^{2n}}{(2n)!}\cdot(-1)^n = 1-\frac{x^2}{2!}+\frac{x^4}{4!}-\frac{x^6}{6!}+\dots \text{ converges for all } x$
\item $(1 + x)^p = \sum_{k=0}^{\infty} \binom{p}{k} x^k= \sum_{k=0}^{\infty} \frac{p!}{k!(p-k)!} x^k=1 + px + \frac{p(p - 1)}{2!}x^2 + \frac{p(p - 1)(p - 2)}{3!}x^3 + \cdots \text{ converges for } -1 < x < 1$.
\item $\ln(1+x) =\sum_{n = 0}^{\infty}\frac{(-1)^nx^{n+1}}{n+1}= x-\frac{x^2}{2}+\frac{x^3}{3}-\frac{x^4}{4}+\cdots,$
\end{itemize}
Moreover, we can definitely find Taylor Series based on the existing series using \textcolor{red}{four methods}:\\
Substitude/Differentiate/Integrate /Multiply
\section{Parametric Equations and Polar Coordinate}
\subsection{Parametric Equations}
Summarize, we have the \textcolor{red}{slope}:$\frac{dy}{dx}=\frac{dy/dt}{dx/dt}$
and the \textcolor{red}{concavity} of the parametrized curve to be
$\frac{d^2y}{dx^2}=\frac{(dy/dx)/dt}{dx/dt}$
The quantity $v_x = dx/dt$ is the instantaneous velocity in the $x$-direction; $v_y = dy/dt$ is the
instantaneous velocity in the $y$-direction.
And we call that $(v_x,v_y)$ to be the velocity vector.
The \textcolor{red}{instantaneous speed} :$v = \sqrt{(dx/dt)^2 + (dy/dt)^2} =\sqrt{(v_x)^2 + (v_y)^2}$.\\
Moreover, the \textcolor{red}{distance} traveled from time $a$ to $b$ is $\int^b_a v(t) dt = \int_a^b \sqrt{(dx/dt)^2 + (dy/dt)^2} dt$
\subsection{Polar Coordinate}
\subsubsection{Relation between Cartesian and Polar}
\textcolor{red}{Cartesian to Polar}: $(x,y) \to (r= \sqrt{x^2 + y^2}, \theta) \text{ (Here we have that } \tan \theta = \frac{y}{x} \text{)}$
$\theta$ does not have to be $\arctan(\frac{y}{x})$!
Polar to Cartesian: $(r,\theta) \to (x=r \cos \theta, y=r \sin \theta)$
\subsubsection{Slope, Arc length and Area in Polar Coordinates}
\textcolor{red}{slope} of to be
$\frac{dy}{dx}=\frac{dy/d\theta}{dx/d\theta}$
The \textcolor{red}{arc length} from angle $a$ to $b$ is $\int_a^b \sqrt{(dx/d\theta)^2 + (dy/d\theta)^2} d\theta=\int_a^b \sqrt{r^2 + (dr/d\theta)^2} d\theta$
Fact: \textcolor{red}{area of the sector} is $1/2 r^2 \theta$, we have that for a curve $r = f(\theta)$, with \textcolor{red}{$f(\theta)$ continuously of the same sign}, the area of the region enclosed is $\frac{1}{2}\int^{b}_{a}f(\theta)^2 d\theta$
\end{document} | {
"alphanum_fraction": 0.6572269004,
"avg_line_length": 40.2273781903,
"ext": "tex",
"hexsha": "01bc307426fa1e0b46e8e94be7525f00ae1cb212",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "80ba7f9b24fa6a666d2d7f0f4b4c41a9aa1822c8",
"max_forks_repo_licenses": [
"CC-BY-3.0"
],
"max_forks_repo_name": "yiwchen/yiwchen.github.io",
"max_forks_repo_path": "m116fall20/Note/FinalReview/justformula.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "80ba7f9b24fa6a666d2d7f0f4b4c41a9aa1822c8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-3.0"
],
"max_issues_repo_name": "yiwchen/yiwchen.github.io",
"max_issues_repo_path": "m116fall20/Note/FinalReview/justformula.tex",
"max_line_length": 249,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "80ba7f9b24fa6a666d2d7f0f4b4c41a9aa1822c8",
"max_stars_repo_licenses": [
"CC-BY-3.0"
],
"max_stars_repo_name": "yiwchen/yiwchen.github.io",
"max_stars_repo_path": "m116fall20/Note/FinalReview/justformula.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6561,
"size": 17338
} |
\input{mmd-article-header}
\def\mytitle{MultiMarkdown Fenced Code Blocks Test}
\def\latexmode{memoir}
\input{mmd-article-begin-doc}
\chapter{Fenced Code Blocks}
\label{fencedcodeblocks}
Plain text.
\begin{adjustwidth}{2.5em}{2.5em}
\begin{verbatim}
This is a code block
\end{verbatim}
\end{adjustwidth}
\begin{adjustwidth}{2.5em}{2.5em}
\begin{lstlisting}[language=perl]
This is a code block using perl.
\end{lstlisting}
\end{adjustwidth}
\input{mmd-memoir-footer}
\end{document}
| {
"alphanum_fraction": 0.7566462168,
"avg_line_length": 16.8620689655,
"ext": "tex",
"hexsha": "95e91d8a37091b9c52897b57ce940e6f2f940037",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "98abc383e38bbd310f61d322ca31e675cacac4fe",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dtjm/go-multimarkdown",
"max_forks_repo_path": "deps/mmd4/MarkdownTest/MultiMarkdownTests/Fencing.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "98abc383e38bbd310f61d322ca31e675cacac4fe",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dtjm/go-multimarkdown",
"max_issues_repo_path": "deps/mmd4/MarkdownTest/MultiMarkdownTests/Fencing.tex",
"max_line_length": 51,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "98abc383e38bbd310f61d322ca31e675cacac4fe",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dtjm/go-multimarkdown",
"max_stars_repo_path": "deps/mmd4/MarkdownTest/MultiMarkdownTests/Fencing.tex",
"max_stars_repo_stars_event_max_datetime": "2015-02-05T07:18:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-02-05T07:18:40.000Z",
"num_tokens": 158,
"size": 489
} |
\theoremstyle{plain}% default
\newtcbtheorem[number within=section]{definicion}%
{\textsc{Definici\'on}}{theorem style=plain, sharp
corners,enhanced,colframe=blue!50!black,colback=yellow!20!white,coltitle=red!50!black,fonttitle=\upshape\bfseries\large,fontupper=\itshape,
drop lifted shadow=blue!70!black!50!white,boxrule=0.4pt}{definicion}
%
%
%fontupper=\itshape\large
%
\newtcbtheorem[use counter from=definicion]{teorema}%
{\textsc{Teorema}}{theorem style=plain,sharp
corners, enhanced,colframe=blue!50!black,colback=yellow!20!white,
coltitle=red!50!black,fonttitle=\upshape\bfseries\large,fontupper=\itshape,
drop lifted shadow=blue!70!black!50!white,boxrule=0.4pt}{teorema}
%
%
%
%
\newtcbtheorem[use counter from=definicion]{lema}%
{\textsc{Lema}}{theorem style=plain, sharp
corners,enhanced,colframe=blue!50!black,colback=yellow!20!white,
coltitle=red!50!black,fonttitle=\upshape\bfseries\large,fontupper=\itshape,drop lifted shadow=blue!70!black!50!white,boxrule=0.4pt}{lema}
%
%
%
%
\newtcbtheorem[use counter from=definicion]{corolario}%
{\textsc{Colorario}}{theorem style=plain,sharp
corners, enhanced,colframe=blue!50!black,colback=yellow!20!white,
coltitle=red!50!black,fonttitle=\upshape\bfseries\large,fontupper=\itshape,
drop lifted shadow=blue!70!black!50!white,boxrule=0.4pt}{colorario}
%
%
%
%
%
\newtcbtheorem[use counter from=definicion]{proposicion}%
{\textsc{Proposici\'on}}{theorem style=plain,sharp
corners, enhanced,colframe=blue!50!black,colback=yellow!20!white,
coltitle=red!50!black,fonttitle=\upshape\bfseries\large,fontupper=\itshape,
drop lifted shadow=blue!70!black!50!white,boxrule=0.4pt}{proposicion}
%
%
%
\newtcbtheorem[number within=chapter]
{observacion}%
{\textsc{Observaci\'on}}{theorem style=plain,sharp
corners, enhanced,colframe=blue!50!black,colback=blue!5!white,
coltitle=red!50!black,fonttitle=\upshape\bfseries\normalsize,fontupper=\itshape,
drop lifted shadow=blue!70!black!50!white,boxrule=0.6pt}{observacion}
%
%
%
\newtcbtheorem[auto counter,number within=section]{notacion}%
{\textsc{Notaci\'on}}{fonttitle=\bfseries\upshape\large, fontupper=\slshape,
arc=0mm, colback=blue!0!white,colframe=blue!0!white}{notacion}
%
%
%
\theoremstyle{definition}
\newtheorem{Ejemplo}{Ejemplo}[section]
%---------------------------------------------------------------
%{\begin{pspicture}(0.62,0)(2.6,0.4)
%\psline[linecolor=red!50!black, linewidth=1pt](0.6,-0.1)(3.8,-0.1)
%\psline[linecolor=red!50!black, linewidth=1pt](0.6,-0.1)(0.6,0.4)
%\rput(1.7,0.1){{\large\textsc{Ejemplo}}}
%\end{pspicture}}
%
%------------------------------------------------------------------------------
%%%%%%%%%%%%%%%%%% new style teorem %%%%%%%%%%%%%%%%%%%%%%%
\newtheoremstyle{ejer} % name of the style to be used
{10mm} % measure of space to leave above the theorem. E.g.: 3pt
{10mm} % measure of space to leave below the theorem. E.g.: 3pt
{\slshape} % name of font to use in the body of the theorem
{6pt} % measure of space to indent
{\bfseries} % name of head font
{\newline} % punctuation between head and body
{10pt} % space after theorem head
{\fcolorbox{red!50!black}{white}{Ejercicio.\thmnumber{ #2}}} % Manually specify head
\theoremstyle{ejer}
\newtheorem{ejer}{Ejercicio:}
%%%%%%%%%%%%%%%%%%%%%%%%%%%% cambio de margen %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newenvironment{cmargen}[1]{\begin{minipage}[c]{#1\linewidth}}{\end{minipage}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\theoremstyle{plain}
%\newtheorem{ejer}{Ejercicios
%\begin{pspicture}(0,0)(0,0)
%%\psgrid
%\psline[linecolor=red!50!black, linewidth=1pt](-2.1,-0.2)(1.5,-0.2)
%\psline[linecolor=red!50!black, linewidth=1pt](-2.1,-0.2)(-2.1,0.5)
%\psline[linecolor=red!50!black, linewidth=1pt](1.5,-0.2)(1.5,0.5)
%\psline[linecolor=red!50!black, linewidth=1pt](-2.1,0.5)(1.5,0.5)
%\end{pspicture}
%}[section]
%\hspace{-0.67cm}
%
\newcommand{\solucion}{ \textcolor{red!50!black}{ \textsc{\bf Soluci\'on: }}}
%
%\newcommand{\acc}[1]{
%{\begin{pspicture}(0,0)(2.5,0.5)
%%\psgrid
%%\psline[linecolor=Turquoise, linewidth=2pt](0,0)(2.5,0)
%%\psline[linecolor=Turquoise, linewidth=2pt](0,0)(0,0.5)
%\rput(1.5,0.2){\Large \textcolor{MidnightBlue}{Actividad #1}}
%\end{pspicture}}}
%
\newcommand{\acc}[1]{\fcolorbox{MidnightBlue}{white}{\bfseries{Actividad}:\ #1}}
%
\def\QEDmark{\ensuremath{\square}}
%
\def\proof{\paragraph{\textcolor{red!50!black}{ \textsc{\bf Soluci\'on.}}}}
\def\endproof{\hfill\color{red!50!black}$\blacksquare$}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%% para la demostración %%%%%%%%%%%%%%%%%%%%%
\newcommand{\demostracion}{ \textcolor{red!50!black}{\hspace{-0.67cm}\textsc{ \bf Demostraci\'{o}n.}\,}}
%
%
%%%%%%%%%%%%%% MALLA %%%%%%%%
\newpsobject{malla}{psgrid}{subgriddiv=1,griddots=10,gridlabels=6pt}
\allowdisplaybreaks
\setlength{\parindent}{0pt}
%
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\Co}{\mathbb C}
\newcommand{\F}{\mathbb F}
\newcommand{\J}{\mathbb J}
\newcommand{\K}{\mathbb K}
\newcommand{\N}{\mathbb N}
\newcommand{\Po}{\mathbb P}
\newcommand{\Q}{\mathbb Q}
\def\R{I\!\! R}
\newcommand{\Z}{\mathbb Z}
\renewcommand{\K}{\mathbb K}
\newcommand{\Rd}{\ensuremath{\R^2}}
\newcommand{\Rt}{\ensuremath{\R^3}}
\newcommand{\Ro}{\ensuremath{\R_0}}
\newcommand{\No}{\ensuremath{\N_0}}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%FLECHAS
\newcommand{\infinitot}{t\to\infty} % t tiende hacia infinito
\newcommand{\infiniton}{n\to\infty} % n tiende hacia infinito
\newcommand{\hacia}{\longrightarrow}
\newcommand{\ssi}{\longleftrightarrow}
\newcommand{\Ssi}{\Longleftrightarrow}
\newcommand{\implica}{\Longrightarrow}
\newcommand{\reciproca}{\longleftarrow}
\newcommand{\xtoa}[2]{#1\to #2}
%%%%%%%%%%%%%%%%%%%%%%%%%%%% límites%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\funcreal}[2]{\ensuremath{\,#1\!:#2\rightarrow \mathbb R\,}}
\newcommand{\cfunc}[2]{\ensuremath{\,#1:#2\rightarrow \mathbb C\,}}
\newcommand{\circulo}{\marginpar{\vspace{0.3cm}\hspace{-16.7cm}$\circledS$}{}}%LIMITES
\newcommand{\nlim}{\lim\limits_{n\to\infty}}
\newcommand{\tlim}{\lim\limits_{t\to\infty}}
\newcommand{\jlim}{\lim\limits_{j\to\infty}}
\newcommand{\klim}{\lim\limits_{k\to\infty}}
\newcommand{\mlim}{\lim\limits_{m\to\infty}}
\newcommand{\rlim}{\lim\limits_{r\to\infty}}
\newcommand{\limin}[1]{\lim\limits_{#1\to +\infty}}
\newcommand{\limmin}[1]{\lim\limits_{#1\to - \infty}}
\newcommand{\Limd}[3]{\mbox{$\displaystyle{\lim_{#2\to #3^+}#1}$}}
\newcommand{\Limi}[3]{\mbox{$\displaystyle{\lim_{#2\to #3^-}#1}$}}
\newcommand{\Lim}[3]{\mbox{$\displaystyle{\lim_{#2\to #3}#1}$}}
\newcommand{\limlft}[3]{\mbox{$\dis{\lim_{\substack{#2\to #3\\ #2\,<\,#3}}#1}$}}
\newcommand{\limrgt}[3]{\mbox{$\dis{\lim_{\substack{#2\to #3\\ #2\,>\,#3}}#1}$}}
\newcommand{\Btag}{\tag*{$\Box$}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%SUMAS
\newcommand{\Sumai}{\sum\limits_{i=1}^n} %Suma desde i=1 hasta n (con el \limits)
\newcommand{\sumai}{\sum_{i=1}^n} %Suma desde i=1 hasta n (sin el \limits)
\newcommand{\Sumaj}{\sum\limits_{j=0}^{n-1}} %Suma desde j=0 hasta n-1
\newcommand{\Suman}{\sum\limits_{n=1}^\infty} %Suma desde n=1 hasta infinito
\newcommand{\suman}{\sum\limits_{n=0}^\infty} %Suma desde n=0 hasta infinito
\newcommand{\jSuma}{\sum\limits_{j=1}^\infty} %Suma desde j=1 hasta infinito
\newcommand{\jsuma}{\sum_{j=1}^\infty} %Suma desde j=1 hasta infinito
\newcommand{\Sumak}{\sum\limits_{k=0}^\infty} %Suma desde k=0 hasta infinito (con el \limits)
\newcommand{\sumak}{\sum\limits_{k=1}^\infty} %Suma desde k=1 hasta infinito (con el \limits)
\newcommand{\serie}[1]{\sum\limits_{#1}^\infty}
\newcommand{\suma}[2]{\sum\limits_{#1}^#2}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\dis}{\displaystyle}
%\newcommand{\Int}{\displaystyle\int}
%\newcommand{\rig}{\rightarrow}
%\newcommand{\lef}{\leftarrow}
%\newcommand{\Rig}{\Rightarrow}
%
%%%%%% Funciones trigonometrica e hiperbolicas %%%%%%%%%
\newcommand{\sen}{\operatorname{\sen}}
%\newcommand{\cos}{\operatorname{\cos}}
\newcommand{\arcsec}{\mathop{\rm arcsec}\nolimits}
\newcommand{\arcsen}{\mathop{\rm arcsen}\nolimits}
\newcommand{\arccot}{\mathop{\rm arccot}\nolimits}
\newcommand{\arccsc}{\mathop{\rm arccsc}\nolimits}
\newcommand{\senh}{\mathop{\rm senh}\nolimits}
\newcommand{\secanteh}{\mathop{\rm sech}\nolimits}
\newcommand{\cosecanteh}{\mathop{\rm csch}\nolimits}
%%%%%%%%%%%%%%%%%%%%% derivadas %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%% DERIVAR %%%%%%%%%%%%%%%%%%%%%
%\providecommand{\derive}[2]{\frac{d }{ d #2}\left[#1\right]}
%
%
\providecommand{\deriven}[2]{\dfrac{d^n#1 }{ d #2^n}}
\providecommand{\derive}[2]{\dfrac{d #1 }{ d #2}}
\providecommand{\derivee}[2]{\dfrac{d^2#1 }{ d #2}}
\newcommand{\Dfa}{\mbox{$f^{\,\prime}(a)$}}
\newcommand{\Dfka}{\mbox{$f^{\,(k)}(a)$}}
\newcommand{\Dfna}{\mbox{$f^{\,(n)}(a)$}}
\newcommand{\derivada}[2]{\ensuremath{\dfrac{\mathrm{d} #1}{\mathrm{d}#2}}} % Derivada de #1 respecto de #2
\newcommand{\derivadados}[2]{\ensuremath{\dfrac{\mathrm{d}^2 #1}{\mathrm{d}#2^2}}}
\newcommand{\derivadatres}[2]{\ensuremath{\dfrac{\mathrm{d}^3 #1}{\mathrm{d}#2^3}}}
\newcommand{\derivadan}[2]{\ensuremath{\dfrac{\mathrm{d}^n #1}{\mathrm{d}#2^n}}}
\newcommand{\fder}[1]{\mbox{$#1^{\,\prime}$}}
\newcommand{\Derdos}[2]{\mbox{$#1^{\,\prime\prime}(#2)$}}
\newcommand{\derpar}[2]{\mbox{$\dfrac{\partial #1}{\partial #2}$}}
\newcommand{\derpardos}[2]{\mbox{$\dfrac{\partial^{2}\! #1}{\partial #2^{2}}$}}
\newcommand{\scd}{\mbox{$^{\,\prime\prime}$}}
\newcommand{\Der}[2]{\mbox{$#1^{\,\prime}(#2)$}}
\newcommand{\Derk}[2]{\mbox{$#1^{\,(k)}(#2)$}}
\newcommand{\Dern}[2]{\mbox{$#1^{\,(n)}(#2)$}}
\newcommand{\partx}[1]{\ensuremath{\dfrac{\partial #1}{\partial x}}}
\newcommand{\party}[1]{\ensuremath{\dfrac{\partial #1}{\partial y}}}
\newcommand{\partz}[1]{\ensuremath{\dfrac{\partial #1}{\partial z}}}
\newcommand{\partxx}[1]{\ensuremath{\dfrac{\partial^2 #1}{\partial x^{2}}}}
\newcommand{\partyy}[1]{\ensuremath{\dfrac{\partial^2 #1}{\partial y^2}}}
\newcommand{\partxy}[1]{\ensuremath{\dfrac{\partial^2 #1}{\partial x
\partial y}}}
\newcommand{\partyx}[1]{\ensuremath{\frac{\partial^2 #1}{\partial y
\partial x}}}
\newcommand{\partone}[2]{\ensuremath{\dfrac{\partial #1}{\partial #2}}}
\newcommand{\partwo}[2]{\ensuremath{\dfrac{\partial^2 #1}{\partial #2^2}}}
\newcommand{\Dpd}[2]{\ensuremath{D_{#2}#1}}
\newcommand{\Dpxy}[3]{\ensuremath{D_{#2 #3}#1}}
\newcommand{\Dp}[3]{\ensuremath{\partial_{#1}#2(#3)}}
\newcommand{\partonetwo}[3]{\ensuremath{\dfrac{\partial^2 #1}{\partial #2\partial #3}}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\derparcial}[2]{\ensuremath{\dfrac{\partial #1}{\partial
#2}}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\renewcommand{\partname}{Semana}
%\newtheorem{proof}{Remark}
%\renewcommand*{\proofname}{Solution}
%
\def\@sqrt[#1]{\root #1\of}
%
%\renewcommand{\rmdefault}{phv}
%\renewcommand{\sfdefault}{phv}
\normalfont
%%%%%%%%%%%%%%%% cajas espciales%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%% Otros comandos %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%% ------ colores ---------------%%%%%%%%%%%%%%%%%%%%%
\definecolor{problemblue}{RGB}{100,134,158}
\definecolor{titlebgdark}{RGB}{10,76,115}
\definecolor{ocre}{RGB}{10,76,115}
\definecolor{ptctitle}{RGB}{10,76,115}
\definecolor{ptcbackground}{RGB}{212,237,252}
\definecolor{titlebglight}{RGB}{191,233,251}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand\peque{\@setfontsize\peque{8}{9}}
\newcommand{\remark}{\colorbox{titlebgdark}{\color{white}{\bfseries Nota:}} }
\newcommand{\prop}{\colorbox{titlebgdark}{\color{white}{\bfseries Proposici\'on:}} }
\newcommand{\dem}{\colorbox{titlebgdark}{\color{white}{\bfseries Demostraci\'on:}} }
\newcommand{\notacionn}{\colorbox{titlebgdark}{\color{white}{\bfseries Notaci\'on:}} }
\newcommand{\sol}{\colorbox{titlebgdark}{\color{white}{\bfseries{Soluci\'on}:}}\ }
\newcommand{\resp}{\colorbox{titlebgdark}{\color{white}{\bfseries{Respuesta}:}}\ }
\newcommand{\general}{\colorbox{titlebgdark}{\color{white}{\bfseries{En general}:}} }
\newcommand{\obs}{\colorbox{titlebgdark}{\color{white}{\bfseries{Observaci\'on}:}} }
\newcommand{\intro}{\colorbox{titlebgdark}{\color{white}{\large\bfseries{Introducci\'on}:}} }
\newcommand{\conclu}{\colorbox{titlebgdark}{\color{white}{\bfseries{Conclusi\'on}:}} }
\newcommand{\resu}{\colorbox{titlebgdark}{\color{white}{\bfseries{Resumen}:}} }
\newcommand{\expli}{\colorbox{titlebgdark}{\color{white}{\bfseries{Explicaci\'on}:}}\ }
\newcommand{\ej}{\colorbox{titlebgdark}{\color{white}{\bfseries{Ejemplos}:}} }
\newcommand{\cuadro}[2]{\colorbox{#1}{\color{white}{\bfseries{#2}:}} }
\newcommand{\tcuenta}{\colorbox{titlebgdark}{\color{white}{\bfseries{Ten en cuenta}:}} }
\newcommand{\fin}{\hfill\boxempty}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%% cajas para remark %%%%%%%%%%%%%%%%%%%%%%%%%%5
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\definecolor{boxbg}{RGB}{179,222,255}
\tcbset{
common/.style={
before=\vskip2\baselineskip\noindent,
after=\vskip2\baselineskip,
enhanced,
colback=boxbg,
frame code={},
fontupper=\normalsize,
}
}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
\newtcolorbox{ideabox}{
common,
interior code={
\filldraw[ultra thick,densely dashed,fill=boxbg,draw=black,rounded corners=10pt] (interior.north west) rectangle (interior.south east);
\node at ([xshift=-20pt,yshift=8pt]interior.north east) {\includegraphics[width=1.5cm,angle=-30]{lightbulb}};
}
\vspace*{40pt}}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
\newtcolorbox{questionbox}{
common,
interior code={
\filldraw[ultra thick,densely dashed,fill=boxbg,draw=black,rounded corners=10pt] (interior.north west) rectangle (interior.south east);
\node at ([xshift=-20pt,yshift=8pt]interior.north east) {\includegraphics[width=1.5cm,angle=-30]{questionmark}};
}
\vspace*{40pt}}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
\newtcolorbox{apunte}{
common,
interior code={
\filldraw[ultra thick,densely dashed,fill=boxbg,draw=black,rounded corners=10pt] (interior.north west) rectangle (interior.south east);
\node at ([xshift=-30pt,yshift=8pt]interior.north east) {\includegraphics[width=1.5cm]{apuntes}};
}
\vspace*{20pt}}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newtcolorbox{analiza}{
common,
interior code={
\filldraw[ultra thick,densely dashed,fill=boxbg,draw=black,rounded corners=10pt] (interior.north west) rectangle (interior.south east);
\node at ([xshift=-30pt,yshift=10pt]interior.north east) {\includegraphics[width=1.5cm]{image/warning}};
}
\vspace*{20pt}}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newtcolorbox{piensa}{
common,
interior code={
\filldraw[ultra thick,densely dashed,fill=boxbg,draw=black,rounded corners=10pt] (interior.north west) rectangle (interior.south east);
\node at ([xshift=-30pt,yshift=10pt]interior.north east) {\includegraphics[width=1.5cm]{image/tipp}};
}
\vspace*{20pt}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newtcolorbox{resumen}{
common,
interior code={
\filldraw[ultra thick,densely dashed,fill=boxbg,draw=black,rounded corners=10pt] (interior.north west) rectangle (interior.south east);
\node at ([xshift=-30pt,yshift=8pt]interior.north east) {\includegraphics[angle=-30,width=1.5cm]{image/resumen}};
}
\vspace*{20pt}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%notas para recordar algo%%%%%%%%%%%%%%
\definecolor{paper}{RGB}{239,227,157}
\usetikzlibrary{decorations.pathmorphing}
\newenvironment{notax}[1]{
\begin{tikzpicture}[pencildraw/.style={ %
decorate,
decoration={random steps,segment length=2pt,amplitude=1pt}
} %
]
\node[
preaction={fill=black,opacity=.5,transform canvas={xshift=1mm,yshift=-1mm}},
pencildraw,draw,fill=paper,text width=.8\textwidth,inner sep=5mm]
{#1};
\end{tikzpicture}
}{\vskip 20pt}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usetikzlibrary{arrows,shadows}
\newtcolorbox{nota}{%
enhanced jigsaw, breakable, % allow page breaks
frame hidden, % hide the default frame
overlay={%
\draw [
fill=ptcbackground, % fill paper
draw=yellow!20!white, % boundary colour
decorate, % decoration
decoration={random steps,segment length=2pt,amplitude=1pt},
drop shadow, % shadow
]
% top line
(frame.north west)--(frame.north east)--
% right line
(frame.north east)--(frame.south east)--
% bottom line
(frame.south east)--(frame.south west)--
% left line
(frame.south west)--(frame.north west);
},
% paragraph skips obeyed within tcolorbox
parbox=false,
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
\newcounter{praproblem}[chapter]
\renewcommand\thepraproblem{\thesection.\arabic{praproblem}}
\newtcolorbox{praproblem}{
before=\bigskip\centering,
after=\bigskip,
breakable,
enhanced,
colback=white,
boxrule=0pt,
arc=0pt,
outer arc=0pt,
fontupper=\small,
title=Problemas para practicar~\thepraproblem,
fonttitle=\bfseries\sffamily\large\strut,
coltitle=problemblue,
colbacktitle=problemblue,
title style={
left color=orange!60,
right color=white,
middle color=white
},
overlay={
\draw[line width=1.5pt,problemblue] (title.north west) -- (title.north east);
\draw[line width=1.5pt,problemblue] (frame.south west) -- (frame.south east);
}
}
\BeforeBeginEnvironment{praproblem}{\refstepcounter{praproblem}}
%
%%%%%%%%%%5
%
\newenvironment{tproblem}[1]{
\begin{praproblem}
\begin{multicols}{2}
#1
\end{multicols}
}{\end{praproblem}}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
\newtcolorbox[auto counter,number within=section]{desafio}{
breakable,
enhanced,
colback=white,
boxrule=0pt,
arc=0pt,
outer arc=0pt,
title=\textcolor{white}{Problemas desafiantes:~\thetcbcounter,}
fonttitle=\bfseries\sffamily\large\strut,
coltitle=problemblue,
colbacktitle=problemblue,
% title style={
% %exercisebgblue
% interior style={fill=idiomsgreen}
% },
overlay={
\draw[line width=1.5pt,problemblue] (frame.south west) -- (frame.south east);
}
}
%
%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%% problemas %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\inline}{\refstepcounter{equation}~~\mbox{\color{blue!50!black}(\theequation)}}%enumera eq inline
%%%%%%%%%%%%%%%%%%%%%%% cambiar margenes %%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\problemas}[1]
{
\section{Ejercicios propuestos}
\small
\begin{adjustwidth}{-1cm}{-4cm}
%\begin{multicols}{2}
\noindent #1
%\end{multicols}
{\setlength{\parindent}{0mm}\color{blue!50!black}\rule{\linewidth}{1mm}}
\end{adjustwidth}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%solucion%%%%%%%%%%%%%%%%%%
\newcounter{solucion}[chapter]
%\renewcommand\theexample{\thesection.\arabic{example}}
\tcbset{solucionbox/.style={%
title={Soluci\'on},
breakable,
leftrule=0pt,
arc=0pt,
colback=white,
colframe=titlebgdark,
enhanced,
colbacktitle=white,
coltitle=blue!50!black,
titlerule=0pt,
enlarge left by=-4mm,
width=\linewidth+4mm,
enlarge top by=2pt,
overlay unbroken={%
\draw[titlebgdark,line width=2pt] (frame.north west)++(0,0.25mm) --++(4cm,0pt) ;
\draw[white,line width=10mm] (frame.south west) --++(0cm,0pt) node (P) {};
\fill[titlebgdark] (P) rectangle ++(6pt,6pt) ;
},%
%%%%%%%%%%%%%%%
overlay first={
\draw[titlebgdark,line width=2pt] (frame.north west)++(0,1pt) --++(4cm,0pt);
},%
%%%%%%%%%%%%%%%%%%%%%%%5
overlay last={
\draw[white,line width=10mm] (frame.south west) --++(8cm,0pt) node (P) {};
\fill[titlebgdark] (P) rectangle ++(6pt,6pt) ;},%
}%
}
\newenvironment{solucionn}{%
\tcolorbox[solucionbox]}%
{\endtcolorbox}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%prueba%%%%%%%%%%%%%%%%%%
\newcounter{prueba}[chapter]
%\renewcommand\theexample{\thesection.\arabic{example}}
\tcbset{pruebabox/.style={%
title={Prueba},
breakable,
leftrule=0pt,
arc=0pt,
colback=white,
colframe=cyan,
enhanced,
colbacktitle=white,
coltitle=cyan,
titlerule=0pt,
enlarge left by=-4mm,
width=\linewidth+4mm,
enlarge top by=2pt,
overlay unbroken={\draw[cyan,line width=2pt] (frame.north west)++(0,0.25mm) --++(4cm,0pt);
\draw[white,line width=10mm] (frame.south west) --++(8cm,0pt) node (P) {};
\fill[cyan] (P) rectangle ++(6pt,6pt) ;},%
overlay first={\draw[cyan,line width=2pt] (frame.north west)++(0,1pt) --++(4cm,0pt);},%
overlay last={\draw[white,line width=10mm] (frame.south west) --++(8cm,0pt) node (P) {};
\fill[cyan] (P) rectangle ++(6pt,6pt) ;},%
}%
}
\newenvironment{prueba}{%
\tcolorbox[pruebabox]}%
{\hfill \textcolor{ptctitle}{$\blacksquare$}(q.e.d)\endtcolorbox}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\tcbset{demsbox/.style={%
title={\bf Demostraci\'on:},
breakable,
leftrule=0pt,
arc=0pt,
colback=white,
colframe=red!50!black,
enhanced,
colbacktitle=white,
coltitle=red!50!black,
titlerule=0pt,
enlarge left by=-4mm,
width=\linewidth+4mm,
enlarge top by=2pt,
overlay unbroken={\draw[red!50!black,line width=2pt] (frame.north west)++(0,0.25mm) --++(4cm,0pt);
\draw[white,line width=10mm] (frame.south west) --++(8cm,0pt) node (P) {};
\fill[red!50!black] (P) rectangle ++(6pt,6pt) ;},%
overlay first={\draw[red!50!black,line width=2pt] (frame.north west)++(0,1pt) --++(4cm,0pt);},%
overlay last={\draw[white,line width=10mm] (frame.south west) --++(8cm,0pt) node (P) {};
\fill[red!50!black] (P) rectangle ++(6pt,6pt) ;},%
}%
}
\newenvironment{dems}{%
\tcolorbox[demsbox]}%
{\hfill \textcolor{ptctitle}{$\blacksquare$}(q.e.d)\endtcolorbox}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\\
\newenvironment{solver}{\hspace*{-30pt}
\colorbox{blue!50!black}{%
\parbox[c][16pt][c]{50pt}{%
\centering\textcolor{white}{\SectionFont\rmfamily Soluci\'on}}}
\color{blue!50!black}{\rule{\dimexpr\textwidth-5pt-2\fboxsep\relax}{2pt}}\\[10pt]
\color{black}
}{\\[10pt] \color{blue!50!black}{\rule{\dimexpr \textwidth+30pt\relax}{2pt}}\\[20pt]}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% normal box
\newcommand{\sqboxs}{1.2ex}% the square size
\newcommand{\sqboxf}{0.6pt}% the border in \sqboxEmpty
\newcommand{\sqbox}[1]{\textcolor{#1}{\rule{\sqboxs}{\sqboxs}}}
% empty box
\newcommand{\sqboxEmpty}[1]{%
\begingroup
\setlength{\fboxrule}{\sqboxf}%
\setlength{\fboxsep}{-\fboxrule}%
\textcolor{#1}{\fbox{\rule{0pt}{\sqboxs}\rule{\sqboxs}{0pt}}}%
\endgroup
}
\newenvironment{probar}{
\hspace*{-30pt}\colorbox{blue!50!black}{%
\parbox[c][16pt][c]{80pt}{%
\centering\textcolor{white}{\SectionFont\rmfamily Demostraci\'on}}}
\vspace{-1.2\baselineskip}
\color{blue!50!black}{\rule{\dimexpr\textwidth-5pt-6\fboxsep\relax}{2pt}}
\\[10pt]
\noindent \color{black}\\[10pt]
}{ \hfill \sqboxEmpty{red!50!black}
\color{blue!50!black}{\rule{\dimexpr \textwidth+30pt\relax}{2pt}}\\[20pt]}
\newcommand{\linea}{{\setlength{\parindent}{0mm}\color{blue!50!black}\rule{\linewidth}{1mm}}}
| {
"alphanum_fraction": 0.602617366,
"avg_line_length": 39.8509933775,
"ext": "tex",
"hexsha": "f31cccd304b4967a1c9c0d2c36f489deed83211d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "caffa81c23e0755b1f4da82e6484772d147c129f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "antalcides/plantilla-calculo",
"max_forks_repo_path": "styles/newcomman.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "caffa81c23e0755b1f4da82e6484772d147c129f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "antalcides/plantilla-calculo",
"max_issues_repo_path": "styles/newcomman.tex",
"max_line_length": 139,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "caffa81c23e0755b1f4da82e6484772d147c129f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "antalcides/plantilla-calculo",
"max_stars_repo_path": "styles/newcomman.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8032,
"size": 24070
} |
\documentclass[a4paper,titlepage,12pt]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[margin=1in]{geometry}
\usepackage{parskip}
\usepackage{hyperref}
\usepackage{titlesec}
\hypersetup{
colorlinks,
pdfauthor=Delan Azabani,
pdftitle=Software Engineering 200: Mars rover assignment
}
\title{Software Engineering 200:\\Mars rover assignment}
\date{October 21, 2014}
\author{Delan Azabani}
\pagenumbering{gobble}
\thispdfpagelabel0
\titlespacing*\section{0pt}{0.5em}{0.5em}
\begin{document}
\maketitle
\pagenumbering{arabic}
To compile and run this assignment submission, use the following
commands:
\texttt{\% ant \&\& java -jar dist/rover.jar}
\section{Part (c): system walkthrough}
Let's say that mission control sent the following message to our rover
on Mars:
\texttt{analyse;translate:-100;photograph;rotate:-180}
Each message represents a task list, where the tasks are separated by
semicolons. Each task has a name, such as one shown above or
\texttt{call}, which executes a previous task list. Parameters for a
task such as angles, distances and task list numbers are written after
their task name, separated by a colon.
From the perspective of our rover's system, this message starts at
\texttt{Comm.receive(String)}, which is implemented by
\texttt{ConcreteComm.receive(String)}. Because \texttt{ConcreteComm}
is a subject in the Observer pattern, all \texttt{receive()} needs to
do is call the subject's method named \texttt{notifyObservers()}, which
required by the \texttt{ObservableDevice} interface.
\texttt{notifyObservers()} then iterates through each of the
\texttt{DeviceObserver} objects who are listening, calling their
\texttt{update()} methods with the message string from
\texttt{receive()}. In practice, the only observer of
\texttt{ConcreteComm} is one \texttt{CommObserver} object.
\texttt{CommObserver.update()} takes the message and simply downcasts
it to a \texttt{String}. Then it finishes by passing it to
\texttt{controller.messageReceived()}, where \texttt{controller} is the
\texttt{RoverController} that owns the \texttt{CommObserver}.
\texttt{RoverController.messageReceived()} instantiates a
\texttt{RoverCommandCodec} to parse the message and turn it into a task
list. There's an unfortunate level of coupling here because the task
objects that its \texttt{decode()} method creates need to have
references to various devices (a \texttt{Camera}, a \texttt{Driver} and
a \texttt{SoilAnalyser}). Worse still, a \texttt{RecursionCommand}
needs to be able to read existing task lists and modify the execution
stack. As a result, the \texttt{RoverController} needs to pass many
parameters to \texttt{decode()}.
In return, we now have a \texttt{RoverTaskList} containing a
\texttt{RoverCommand} for each task in the incoming message, and we
append this task list to \texttt{program}, a \texttt{RoverListList}
which represents a list of task lists. What a mouthful.
In another thread, \texttt{RoverController.start()} has been polling
an execution stack called \texttt{stack} jadedly, because thus far it
has remained empty. The stack simply contains \texttt{Iterator} objects
inside active task lists. As there's now a task list in
\texttt{program}, we take an iterator for it and push it onto the
stack. Whenever \texttt{busy} is \texttt{false} (iff no tasks are
running) and there's a task list on the stack, we grab the next task
with \texttt{Iterator.next()} and call its \texttt{execute()} method.
\texttt{AnalysisCommand.execute()} calls
\texttt{soilAnalyser.analyse()} using its private reference field, and
\texttt{ConcreteSoilAnalyser} sends the results of the analysis back to
the controller using the same Observer workflow discussed previously.
Finally, \texttt{RoverController.analysisReceived()} calls
\texttt{Comm.send()} and Earth is pleased with the rover. Rinse and
repeat with the rest of the tasks and task lists.
\section{Part (d): design patterns}
The Observer pattern is essential and works well with the asynchronous
nature of this system. It's significantly cleaner than setting and
polling for changes in global state.
\texttt{Concrete\{Comm,Driver,SoilAnalyser,Camera\}} are the subjects,
each implementing the \texttt{ObservableDevice} interface, while
\texttt{\{Comm,Driver,SoilAnalyser,Camera\}Observer} are the observers,
implementing \texttt{DeviceObserver}. \texttt{RoverController} can even
be seen as an overarching observer of the other observers.
Dependency injection is used to decouple the \texttt{RoverController}
from any particular set of concrete device classes that it uses.
\texttt{RoverProgram} is the entry point of execution, and it is
responsible for injecting instances of
\texttt{Concrete\{Comm,Driver,SoilAnalyser,Camera\}} or any subclasses
of these into a new controller object.
The Factory pattern is realised through \texttt{RoverCommandCodec}. By
refactoring task list message parsing out of \texttt{RoverController},
the code which instantiates various subclasses of \texttt{RoverCommand}
is isolated in its own class without any other functionality.
\texttt{RoverCommand} is an interface that demonstrates the Command
pattern in combination with
\texttt{\{Rotation,Translation,Analysis,Photography,Recursion\}Command},
the classes that implement it. The key benefit of using this pattern
here is that it allows the code in \texttt{RoverController} to deal
purely with coordinating the execution stack, while treating tasks as
opaque entities. All the controller does is choose the time of
execution.
The container classes \texttt{RoverListList} and \texttt{RoverTaskList}
are subjects of the Iterator pattern, as it allows the
\texttt{RoverController} to keep track of a `cursor' in each task list
without the need for clumsy integral indices. Actually I ended up being
unable to take advantage of the pattern with \texttt{RoverListList},
because the iterator would die whenever the list of task lists is
modified, which occurs whenever a new task list arrives.
\section{Part (e): alternative design choices}
With my final design, I couldn't take advantage of Java's native
implementation of the Observer pattern, because \texttt{Observable}
is a class, not an interface, and I was already using my implementation
inheritance relationship with \texttt{Comm}, \texttt{Driver} and the
like. I could have instead decorated \texttt{Concrete*} with four
classes that extended \texttt{Observable}. While this would obviate the
need for boring Observer code, it would introduce delegate methods
instead. I chose the route with fewer classes overall.
\texttt{RecursionCommand} is unlike the other task classes, because it
doesn't really \textit{do} anything other than push an old task list
reference onto the execution stack. There's no device associated with
it, and thus no observer to tell the controller to reset \texttt{busy}.
I ended up working around this by `tagging' the other task classes with
the empty interface \texttt{AsynchronousCommand}, and checking for this
`tag' with \texttt{instanceof} after firing off \texttt{execute()}. One
could argue that this may be an anti-pattern, as it's a bit of a
semantic abuse of the interface language feature. After finishing the
assignment, I discovered that this technique was common before Java
introduced annotations; had I known about annotations earlier, I would
have certainly preferred to use them instead.
\section{Part (f): separation of concerns}
At the risk of rehashing my answer to part (d), virtually every design
pattern I used was chosen at least partially due to its capacity to
improve the separation of concerns within classes inside the Mars rover
system.
The Observer pattern separated what was to happen after a device had
new information, from each device's specific events and their
implementations.
The Factory pattern separated the choice of concrete task classes from
\texttt{RoverController}, while dependency injection separated the
choice of device classes from its constructor.
The Command pattern separates the `when' from the `how' of executing
each task.
The Iterator pattern separates the act of walking through tasks from
the underlying data structure used, because only the former is relevant
to the controller class.
\section{Part (g): avoiding unnecessary duplication}
The design used in this assignment does not use any delegate methods or
decoration classes outside of \texttt{RoverListList} and
\texttt{RoverTaskList}, which are basically encapsulating
\textit{semantic} sugar around what are essentially \texttt{ArrayList}
objects.
I've taken advantage of the existing relevant exception class
\texttt{IllegalArgumentException} rather than creating my own subclass
just for this system. For readers of the source code, this provides the
slight additional benefit of familiarity.
The use of tedious \texttt{switch}-like constructs on external input
has been restricted to one such usage inside
\texttt{RoverCommandCodec.decode()} only.
The frequently used \texttt{Thread.sleep} wrapped in a
\texttt{try}-\texttt{catch} for \texttt{InterruptedException} has
been refactored into a static method in \texttt{RoverUtils} for
clarity of reading.
\section{Part (h): testability}
As the name suggests, the package \texttt{com.azabani.java.rover.fake}
contains fake versions of the classes \texttt{Comm}, \texttt{Driver},
\texttt{SoilAnalyser} and \texttt{Camera}. While these already
existed hypothetically, and are external to the system, they were
important for four reasons:
\begin{itemize}
\item They allowed me to build the system during development
and check the syntax;
\item The compiler would also check data types, a weak form of
semantic verification;
\item I could craft a limited set of test data to use as an
occasional sanity check; and
\item Being able to run parts of the system while writing it
allowed me to better visualise where I was going with the
system, because I felt less `blind'.
\end{itemize}
Given more time, I would have developed unit test harnesses using JUnit
and Mockito to test the system in a much more rigorous manner,
employing the techniques covered in \textit{Software Engineering 110}.
The use of the Factory pattern and dependency injection would certainly
help with unit testing, because of the way they effectively decouple
classes.
\end{document}
| {
"alphanum_fraction": 0.7963766021,
"avg_line_length": 44.5364806867,
"ext": "tex",
"hexsha": "3c04ad403146eb3069f246402cfa4b936b3ecfe5",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f9499d44094e99fb6b980fc0c7ea57f535a9ca27",
"max_forks_repo_licenses": [
"0BSD"
],
"max_forks_repo_name": "delan/rover",
"max_forks_repo_path": "report/source.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f9499d44094e99fb6b980fc0c7ea57f535a9ca27",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"0BSD"
],
"max_issues_repo_name": "delan/rover",
"max_issues_repo_path": "report/source.tex",
"max_line_length": 72,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f9499d44094e99fb6b980fc0c7ea57f535a9ca27",
"max_stars_repo_licenses": [
"0BSD"
],
"max_stars_repo_name": "delan/rover",
"max_stars_repo_path": "report/source.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2515,
"size": 10377
} |
\documentclass[11pt, a4paper, final]{article}
\usepackage[pdftex]{graphicx}
\usepackage{a4wide}
\title{Text analysis}
\author{Me}
\begin{document}
\maketitle
\section{Longest words}
Figure \ref{fig:longest_words} shows to the lengths of the longest words in various texts.
\begin{figure}[htb]
\centering
\includegraphics[width=\textwidth]{longest_words}
\caption{Longest words.}
\label{fig:longest_words}
\end{figure}
\end{document} | {
"alphanum_fraction": 0.7744874715,
"avg_line_length": 19.9545454545,
"ext": "tex",
"hexsha": "7cb24ac66f5826bc760f97c0cd991cc4f9ee65e8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "134f409a6cb4efe7c64c563f64d2c8ed1311407f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dw-ngcm/scientific_programming",
"max_forks_repo_path": "exercises/solutions/text_analysis.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "134f409a6cb4efe7c64c563f64d2c8ed1311407f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dw-ngcm/scientific_programming",
"max_issues_repo_path": "exercises/solutions/text_analysis.tex",
"max_line_length": 90,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "134f409a6cb4efe7c64c563f64d2c8ed1311407f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dw-ngcm/scientific_programming",
"max_stars_repo_path": "exercises/solutions/text_analysis.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 137,
"size": 439
} |
\chapter{Instrumental Variables}
| {
"alphanum_fraction": 0.8484848485,
"avg_line_length": 16.5,
"ext": "tex",
"hexsha": "cb7b07c8ae83ca28a72d7b29b0d972237feae14e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5577f1f25c7805ad21639a1cb30d7538f3df7c4f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "saiteki-kai/appunti-magistrale",
"max_forks_repo_path": "05 - Causal Networks/chapters/09_instrumental_variables.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5577f1f25c7805ad21639a1cb30d7538f3df7c4f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "saiteki-kai/appunti-magistrale",
"max_issues_repo_path": "05 - Causal Networks/chapters/09_instrumental_variables.tex",
"max_line_length": 32,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5577f1f25c7805ad21639a1cb30d7538f3df7c4f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "saiteki-kai/appunti-magistrale",
"max_stars_repo_path": "05 - Causal Networks/chapters/09_instrumental_variables.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7,
"size": 33
} |
\subsubsection{Acknowledgement.}
This work is supported by {\color{red}Grant of Existential Crisis}.
The author would like to thank the author.
| {
"alphanum_fraction": 0.7986111111,
"avg_line_length": 36,
"ext": "tex",
"hexsha": "2e4ba3855b4f50dc204255d054e4aee2b546385e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c127388a2c66f086c86e60a8459451ecf6885982",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "GeeLaw/laomian",
"max_forks_repo_path": "crypto-paper/paper/acknowledgement.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c127388a2c66f086c86e60a8459451ecf6885982",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "GeeLaw/laomian",
"max_issues_repo_path": "crypto-paper/paper/acknowledgement.tex",
"max_line_length": 67,
"max_stars_count": 8,
"max_stars_repo_head_hexsha": "c127388a2c66f086c86e60a8459451ecf6885982",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "GeeLaw/laomian",
"max_stars_repo_path": "crypto-paper/paper/acknowledgement.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-23T07:59:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-01-12T17:21:20.000Z",
"num_tokens": 33,
"size": 144
} |
%-------------------------
% Resume in Latex
% Author : Adam Du
% License : MIT
%------------------------
\documentclass[a4paper,11pt]{article}
\usepackage{latexsym}
\usepackage[empty]{fullpage}
\usepackage{titlesec}
\usepackage{marvosym}
\usepackage[usenames,dvipsnames]{color}
\usepackage{verbatim}
\usepackage{enumitem}
\usepackage[hidelinks]{hyperref}
\usepackage{fancyhdr}
\usepackage[english]{babel}
\pagestyle{fancy}
\fancyhf{} % clear all header and footer fields
\fancyfoot{}
\renewcommand{\headrulewidth}{0pt}
\renewcommand{\footrulewidth}{0pt}
% Adjust margins
\addtolength{\oddsidemargin}{-0.5in}
\addtolength{\evensidemargin}{-0.5in}
\addtolength{\textwidth}{1in}
\addtolength{\topmargin}{-.5in}
\addtolength{\textheight}{1.0in}
\urlstyle{same}
\raggedbottom
\raggedright
\setlength{\tabcolsep}{0in}
% Sections formatting
\titleformat{\section}{
\vspace{-4pt}\scshape\raggedright\large
}{}{0em}{}[\color{black}\titlerule \vspace{-5pt}]
%-------------------------
% Custom commands
\newcommand{\resumeItem}[2]{
\item\small{
\textbf{#1}{: #2 \vspace{-2pt}}
}
}
\newcommand{\resumeItemNoDots}[2]{
\item\small{
\textbf{#1}{#2 \vspace{-2pt}}
}
}
\newcommand{\resumeItemNoDotsSpace}[2]{
\item\small{
\textbf{#1}{#2 \vspace{-2pt}}
}
}
\newcommand{\resumeSubheadingEducation}[4]{
\vspace{-1pt}\item
\begin{tabular*}{0.97\textwidth}[t]{l@{\extracolsep{\fill}}r}
\textbf{#1} & #2 \\
\textit{\small#3} & \textit{\small #4} \\
\end{tabular*}\vspace{-5pt}
}
\newcommand{\resumeSubheading}[4]{
\vspace{-1pt}\item
\begin{tabular*}{0.97\textwidth}[t]{l@{\extracolsep{\fill}}r}
\textbf{#1} & #2 \\
\textit{\small#3} & \textit{\small #4} \\
\end{tabular*}\vspace{-5pt}
}
\newcommand{\resumeSubItem}[2]{\resumeItem{#1}{#2}\vspace{-4pt}}
\renewcommand{\labelitemii}{$\circ$}
\newcommand{\resumeSubHeadingListStart}{\begin{itemize}[leftmargin=*]}
\newcommand{\resumeSubHeadingListEnd}{\end{itemize}}
\newcommand{\resumeItemListStart}{\begin{itemize}}
\newcommand{\resumeItemListEnd}{\end{itemize}\vspace{-5pt}}
%-------------------------------------------
%%%%%% CV STARTS HERE %%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
%----------HEADING-----------------
\begin{tabular*}{\textwidth}{l@{\extracolsep{\fill}}r}
\textbf{\Large Adam Du} & Email : \href{mailto:[email protected]}{[email protected]}\\
\href{https://www.openstreetmap.org/node/5421567814}{78 Disraeli St, Epsom, Auckland} & Mobile : 022 198 0578 \\
& \href{https://github.com/RobertJGabriel}{Github} - \href{https://www.linkedin.com/in/robertjamesgabriel/}{Linkedin} -\href{https://www.robertgabriel.ninja/}{Website} -
\href{https://www.robertgabriel.ninja/awards}{Awards}
\\
\end{tabular*}
%-----------EXPERIENCE-----------------
\section{\href{https://github.com/RobertJGabriel}{Experience}}
\resumeSubHeadingListStart
\resumeSubheading
{Carrick Just Capital Markets Limited}{Auckland, New Zealand}
{Software and Support Developer}{June 2019 - Present}
\resumeItemListStart
\resumeItemNoDots{}
{Lead the conversion/re-architecture of the Teamwork.com website and platform to a Jamstack(Knockout) PWA design. Reducing page load times from an average of 5 seconds to an average of fewer than 2 seconds. Developed into a PWA, API endpoints written in Golang and Coldfusion.}
\resumeItemNoDots{}
{Designed and implemented a modern continuous deployment system. Use Including staging/production builds, unit tests, Docker, Kubernetes and Travis.}
\resumeItemNoDots{}
{I managed a team of four engineers, one designer. Including time off requests, interviewing potential interns, reviewing code and ensuring the team delivers tasks on time.}
\resumeItemNoDots{}
{Frequently use various services. AWS, S3, Travis, Kubernetes, Lambda, Jest, lighthouse and Docker.}
\resumeItemNoDots{}
{Reduced the time between development to release of the application from 20 minutes to under 10 minutes by improving the build portion of our continuous integration using Docker, Kubernetes, Unit tests and Travis.}
\resumeItemNoDots{}
{Built a Node module that automated the translation of the text of web pages from English to several other languages. Using the i18 specification.}
\resumeItemListEnd
{Software Engineer}
\resumeItemListStart
\resumeItemNoDots{}{Implemented mBrketing tools and features within the products, website and blog.}
\resumeItemNoDots{}{Developed a Golang based API documentation generator.}
\resumeItemNoDots{}{Developed improvements to the Teamwork Projects importer that allows users to import their data from competitors directly.}
\resumeItemNoDots{}{Responsibilities included creating PHP, Coldfusion and Golang based server-side solutions, implemented a signup page used by millions, developed front-end solutions, CSS, Javascript and SEO improvements.}
\resumeItemListEnd
\resumeSubheading
{robertgabriel.ninja}{Remote}
{Founder / Open source Engineer}{June 2018 - Present}
\resumeItemListStart
\resumeItemNoDots{}
{I founded two sass companies with over 50,000 daily users. One was acquired. I maintained open-source Golang and Node modules. Created apps with over 100,000 users using AWS, Node, Golang, Vue, React and Webpack.}
\resumeItemListEnd
\resumeSubHeadingListEnd
%-----------EDUCATION-----------------
\section{\href{https://robertgabriel.ninja/education}{Education}}
\resumeSubHeadingListStart
\resumeSubheadingEducation
{University of Canterbury}{Christchurch, New Zealand}
{Bachelor of Science in Computer Science}{Jan 2015 - Apr 2018}
\resumeSubHeadingListEnd
%-----------PROJECTS-----------------
\section{\href{https://robertgabriel.ninja/projects}{Projects}}
\resumeSubHeadingListStart
\resumeSubItem{Projects}
{\href{https://github.com/OpenDyslexic/}{OpenDyslexic for Chrome},
\href{https://github.com/helperbird/}{Helperbird},
\href{https://github.com/RobertJGabriel/netflix-hidden-menu}{Netflix Hidden Categories},
\href{https://github.com/yvasiyarov/swagger}{Swagger for Golang}, \href{https://github.com/robertjgabriel/xbox-dvr}{Xbox One DVR}, \href{https://github.com/robertjgabriel/Google-profanity-words}{Google profanity}.
\href{https://www.robertgabriel/apps/}{More on robertgabriel/apps}.
}
\resumeSubHeadingListEnd
%--------SKILLS------------
\section{\href{https://robertgabriel.ninja/\#skills}{Skills}}
\resumeSubHeadingListStart
\resumeSubItem{Languages}{Html, JavaScript(ES6, React, Vue.js, Redux), CSS, .NET Core, ASP.NET, PWA, Service workers, NPM, Webpack, SQL, Coldfusion, PHP and Node.js.}
\resumeSubItem{Technologies}{Firebase, NoSQL, Serverless, Express.js, Mongo.DB, AWS, Git, Babel, Jest, Puppeteer, Kubernetes, Chrome, Netify, Linux, Webpack, Seleniumg, methodologies (Agile,
Scrum, Kanban) and Docker}
\resumeSubHeadingListEnd
%-----------AWARDS-----------------
\section{\href{https://robertgabriel.ninja/awards}{Awards \& Honours}}
\resumeSubHeadingListStart
\resumeSubItem{Awards \& Honours}{12 Engineering awards, 8 workplace awards, 5 Scholar awards, 5 talks given and 3 grants.}
\resumeSubHeadingListEnd
%-------------------------------------------
\end{document}
| {
"alphanum_fraction": 0.7204647256,
"avg_line_length": 36.824742268,
"ext": "tex",
"hexsha": "83d5a3b4a7ca153b8c7049486424ba29404ce65a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ad1c25ecb98e28e2ccf218d2e5acfde8c9fe03ca",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dulinnan/resume",
"max_forks_repo_path": "resume.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ad1c25ecb98e28e2ccf218d2e5acfde8c9fe03ca",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dulinnan/resume",
"max_issues_repo_path": "resume.tex",
"max_line_length": 278,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ad1c25ecb98e28e2ccf218d2e5acfde8c9fe03ca",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dulinnan/resume",
"max_stars_repo_path": "resume.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2059,
"size": 7144
} |
% Default to the notebook output style
% Inherit from the specified cell style.
\documentclass{article}
\usepackage{graphicx} % Used to insert images
\usepackage{adjustbox} % Used to constrain images to a maximum size
\usepackage{color} % Allow colors to be defined
\usepackage{enumerate} % Needed for markdown enumerations to work
\usepackage{geometry} % Used to adjust the document margins
\usepackage{amsmath} % Equations
\usepackage{amssymb} % Equations
\usepackage{eurosym} % defines \euro
\usepackage[mathletters]{ucs} % Extended unicode (utf-8) support
\usepackage[utf8x]{inputenc} % Allow utf-8 characters in the tex document
\usepackage{fancyvrb} % verbatim replacement that allows latex
\usepackage{grffile} % extends the file name processing of package graphics
% to support a larger range
% The hyperref package gives us a pdf with properly built
% internal navigation ('pdf bookmarks' for the table of contents,
% internal cross-reference links, web links for URLs, etc.)
\usepackage{hyperref}
\usepackage{longtable} % longtable support required by pandoc >1.10
\usepackage{booktabs} % table support for pandoc > 1.12.2
\definecolor{orange}{cmyk}{0,0.4,0.8,0.2}
\definecolor{darkorange}{rgb}{.71,0.21,0.01}
\definecolor{darkgreen}{rgb}{.12,.54,.11}
\definecolor{myteal}{rgb}{.26, .44, .56}
\definecolor{gray}{gray}{0.45}
\definecolor{lightgray}{gray}{.95}
\definecolor{mediumgray}{gray}{.8}
\definecolor{inputbackground}{rgb}{.95, .95, .85}
\definecolor{outputbackground}{rgb}{.95, .95, .95}
\definecolor{traceback}{rgb}{1, .95, .95}
% ansi colors
\definecolor{red}{rgb}{.6,0,0}
\definecolor{green}{rgb}{0,.65,0}
\definecolor{brown}{rgb}{0.6,0.6,0}
\definecolor{blue}{rgb}{0,.145,.698}
\definecolor{purple}{rgb}{.698,.145,.698}
\definecolor{cyan}{rgb}{0,.698,.698}
\definecolor{lightgray}{gray}{0.5}
% bright ansi colors
\definecolor{darkgray}{gray}{0.25}
\definecolor{lightred}{rgb}{1.0,0.39,0.28}
\definecolor{lightgreen}{rgb}{0.48,0.99,0.0}
\definecolor{lightblue}{rgb}{0.53,0.81,0.92}
\definecolor{lightpurple}{rgb}{0.87,0.63,0.87}
\definecolor{lightcyan}{rgb}{0.5,1.0,0.83}
% commands and environments needed by pandoc snippets
% extracted from the output of `pandoc -s`
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\newenvironment{Shaded}{}{}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}}
\newcommand{\RegionMarkerTok}[1]{{#1}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\NormalTok}[1]{{#1}}
% Define a nice break command that doesn't care if a line doesn't already
% exist.
\def\br{\hspace*{\fill} \\* }
% Math Jax compatability definitions
\def\gt{>}
\def\lt{<}
% Document parameters
\title{Curve fitting}
% Pygments definitions
\makeatletter
\def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax%
\let\PY@ul=\relax \let\PY@tc=\relax%
\let\PY@bc=\relax \let\PY@ff=\relax}
\def\PY@tok#1{\csname PY@tok@#1\endcsname}
\def\PY@toks#1+{\ifx\relax#1\empty\else%
\PY@tok{#1}\expandafter\PY@toks\fi}
\def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{%
\PY@it{\PY@bf{\PY@ff{#1}}}}}}}
\def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}}
\expandafter\def\csname PY@tok@gd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@gu\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@gt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
\expandafter\def\csname PY@tok@gs\endcsname{\let\PY@bf=\textbf}
\expandafter\def\csname PY@tok@gr\endcsname{\def\PY@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@cm\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@vg\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@m\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@go\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
\expandafter\def\csname PY@tok@ge\endcsname{\let\PY@it=\textit}
\expandafter\def\csname PY@tok@vc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@il\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@cs\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.74,0.48,0.00}{##1}}}
\expandafter\def\csname PY@tok@gi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@gh\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@ni\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.60,0.60,0.60}{##1}}}
\expandafter\def\csname PY@tok@nl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@nn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@no\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@na\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.49,0.56,0.16}{##1}}}
\expandafter\def\csname PY@tok@nb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@nd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@ne\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.82,0.25,0.23}{##1}}}
\expandafter\def\csname PY@tok@nf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@si\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@s2\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@vi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@nt\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nv\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@s1\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@kd\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@sh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sx\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@bp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@c1\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@kc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@c\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@mf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@err\endcsname{\def\PY@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}}
\expandafter\def\csname PY@tok@mb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@ss\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sr\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@mo\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@kn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@mi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@gp\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@o\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@kr\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@s\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@kp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@w\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
\expandafter\def\csname PY@tok@kt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}}
\expandafter\def\csname PY@tok@ow\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@sb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@k\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@se\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.13}{##1}}}
\expandafter\def\csname PY@tok@sd\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\def\PYZbs{\char`\\}
\def\PYZus{\char`\_}
\def\PYZob{\char`\{}
\def\PYZcb{\char`\}}
\def\PYZca{\char`\^}
\def\PYZam{\char`\&}
\def\PYZlt{\char`\<}
\def\PYZgt{\char`\>}
\def\PYZsh{\char`\#}
\def\PYZpc{\char`\%}
\def\PYZdl{\char`\$}
\def\PYZhy{\char`\-}
\def\PYZsq{\char`\'}
\def\PYZdq{\char`\"}
\def\PYZti{\char`\~}
% for compatibility with earlier versions
\def\PYZat{@}
\def\PYZlb{[}
\def\PYZrb{]}
\makeatother
% Exact colors from NB
\definecolor{incolor}{rgb}{0.0, 0.0, 0.5}
\definecolor{outcolor}{rgb}{0.545, 0.0, 0.0}
% Prevent overflowing lines due to hard-to-break entities
\sloppy
% Setup hyperref package
\hypersetup{
breaklinks=true, % so long urls are correctly broken across lines
colorlinks=true,
urlcolor=blue,
linkcolor=darkorange,
citecolor=darkgreen,
}
% Slightly bigger margins than the latex defaults
\geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in}
\begin{document}
\maketitle
\section{Curve fitting in python}\label{curve-fitting-in-python}
\subsection{A.M.C. Dawes - 2015}\label{a.m.c.-dawes---2015}
An introduction to various curve fitting routines useful for physics
work.
The first cell is used to import additional features so they are
available in our notebook. \texttt{matplotlib} provides plotting
functions and \texttt{numpy} provides math and array functions.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}1}]:} \PY{k+kn}{import} \PY{n+nn}{matplotlib.pyplot} \PY{k+kn}{as} \PY{n+nn}{plt}
\PY{k+kn}{import} \PY{n+nn}{numpy} \PY{k+kn}{as} \PY{n+nn}{np}
\PY{o}{\PYZpc{}}\PY{k}{matplotlib} inline
\end{Verbatim}
Next we define \texttt{x} as a linear space with 100 points that range
from 0 to 10.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}2}]:} \PY{n}{x} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{linspace}\PY{p}{(}\PY{l+m+mi}{0}\PY{p}{,}\PY{l+m+mi}{10}\PY{p}{,}\PY{l+m+mi}{100}\PY{p}{)}
\end{Verbatim}
\texttt{y} is mock data that we create by linear function with a slope
of 1.45. We also add a small amount of random data to simulate noise as
if this were a measured quantity.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}3}]:} \PY{n}{y} \PY{o}{=} \PY{l+m+mf}{1.45} \PY{o}{*} \PY{n}{x} \PY{o}{+} \PY{l+m+mf}{1.3}\PY{o}{*}\PY{n}{np}\PY{o}{.}\PY{n}{random}\PY{o}{.}\PY{n}{random}\PY{p}{(}\PY{n+nb}{len}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}4}]:} \PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{n}{y}\PY{p}{,}\PY{l+s}{\PYZdq{}}\PY{l+s}{.}\PY{l+s}{\PYZdq{}}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}4}]:} [<matplotlib.lines.Line2D at 0x10640dc10>]
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{Curve fitting_files/Curve fitting_6_1.png}
\end{center}
{ \hspace*{\fill} \\}
The data is pretty clearly linear, but we can fit a line to determine
the slope. A 1st order polynomial is a line, so we use \texttt{polyfit}:
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}5}]:} \PY{c}{\PYZsh{} execute the fit on the data; a 1\PYZhy{}dim fit (line)}
\PY{n}{fit} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{polyfit}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{,}\PY{n}{full}\PY{o}{=}\PY{n+nb+bp}{True}\PY{p}{)}
\end{Verbatim}
The fit is stored in a variable called \texttt{fit} which has several
elements. We can print them out with nice labels using the following
cell:
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}6}]:} \PY{k}{print} \PY{l+s}{\PYZdq{}}\PY{l+s}{coeffients:}\PY{l+s}{\PYZdq{}}\PY{p}{,} \PY{n}{fit}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}
\PY{k}{print} \PY{l+s}{\PYZdq{}}\PY{l+s}{residuals:}\PY{l+s}{\PYZdq{}}\PY{p}{,} \PY{n}{fit}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]}
\PY{k}{print} \PY{l+s}{\PYZdq{}}\PY{l+s}{rank:}\PY{l+s}{\PYZdq{}}\PY{p}{,} \PY{n}{fit}\PY{p}{[}\PY{l+m+mi}{2}\PY{p}{]}
\PY{k}{print} \PY{l+s}{\PYZdq{}}\PY{l+s}{singular\PYZus{}values:}\PY{l+s}{\PYZdq{}}\PY{p}{,} \PY{n}{fit}\PY{p}{[}\PY{l+m+mi}{3}\PY{p}{]}
\PY{k}{print} \PY{l+s}{\PYZdq{}}\PY{l+s}{rcond:}\PY{l+s}{\PYZdq{}}\PY{p}{,} \PY{n}{fit}\PY{p}{[}\PY{l+m+mi}{4}\PY{p}{]}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
coeffients: [ 1.42781042 0.75319408]
residuals: [ 16.0992094]
rank: 2
singular\_values: [ 1.36522772 0.36898954]
rcond: 2.22044604925e-14
\end{Verbatim}
The main thing we want is the list of coefficients. These are the values
in the polynomial that was a best fit. We can create a function (called
\texttt{f}) that is the best fit polynomial. Then it is easy to plot
both together and see that the fit is reasonable.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}7}]:} \PY{n}{f} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{poly1d}\PY{p}{(}\PY{n}{fit}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)} \PY{c}{\PYZsh{} create a function using the fit parameters}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}8}]:} \PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{n}{y}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{n}{f}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}8}]:} [<matplotlib.lines.Line2D at 0x1063d23d0>]
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{Curve fitting_files/Curve fitting_13_1.png}
\end{center}
{ \hspace*{\fill} \\}
\subsection{General function fitting}\label{general-function-fitting}
\subsubsection{For more than just
polynomials}\label{for-more-than-just-polynomials}
\begin{quote}
``When choosing a fit, Polynomial is almost always the wrong answer'' -
TVIMH
\end{quote}
Often there is a better model that describes the data. In most cases
this is a known function; something like a power law or an exponential.
In these cases, there are two options: 1. Convert the variables so that
a plot will be linear (i.e.~plot the \texttt{log} of your data, or the
square root, or the square etc.). This is highly effective becuase a
linear fit is always (yes always) more accurate than a fit of another
function. 2. Perform a nonlinear fit to the function that models your
data. We'll illustrate this below and show how even a ``decent'' fit
gives several \% error.
First, we import the functions that do nonlinear fitting:
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}9}]:} \PY{k+kn}{from} \PY{n+nn}{scipy.optimize} \PY{k+kn}{import} \PY{n}{curve\PYZus{}fit}
\end{Verbatim}
Then define a function that we expect models our system. In this case,
exponential decay with an offset.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}10}]:} \PY{k}{def} \PY{n+nf}{func}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{n}{a}\PY{p}{,} \PY{n}{b}\PY{p}{,} \PY{n}{c}\PY{p}{)}\PY{p}{:}
\PY{k}{return} \PY{n}{a} \PY{o}{*} \PY{n}{np}\PY{o}{.}\PY{n}{exp}\PY{p}{(}\PY{o}{\PYZhy{}}\PY{n}{b} \PY{o}{*} \PY{n}{x}\PY{p}{)} \PY{o}{+} \PY{n}{c}
\end{Verbatim}
Create a pure (i.e.~exact) set of data with some parameters, and then
simulate some data of the same system (by adding random noise).
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}11}]:} \PY{n}{y} \PY{o}{=} \PY{n}{func}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{l+m+mf}{2.5}\PY{p}{,} \PY{l+m+mf}{0.6}\PY{p}{,} \PY{l+m+mf}{0.5}\PY{p}{)}
\PY{n}{ydata} \PY{o}{=} \PY{n}{y} \PY{o}{+} \PY{l+m+mf}{0.2} \PY{o}{*} \PY{n}{np}\PY{o}{.}\PY{n}{random}\PY{o}{.}\PY{n}{normal}\PY{p}{(}\PY{n}{size}\PY{o}{=}\PY{n+nb}{len}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{p}{)}
\end{Verbatim}
Now carry out the fit. \texttt{curve\_fit} returns two outputs, the fit
parameters, and the covariance matrix. We won't use the covariance
matrix yet, but it's good practice to save it into a variable.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}12}]:} \PY{n}{parameters}\PY{p}{,} \PY{n}{covariance} \PY{o}{=} \PY{n}{curve\PYZus{}fit}\PY{p}{(}\PY{n}{func}\PY{p}{,} \PY{n}{x}\PY{p}{,} \PY{n}{ydata}\PY{p}{)}
\PY{n}{parameters} \PY{c}{\PYZsh{}the fit results for a, b, c}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}12}]:} array([ 2.40082231, 0.54439262, 0.48824428])
\end{Verbatim}
We can see the parameters are a reasonable match to the pure function we
created above. Next, we want to create a ``best fit'' data set but using
the parameters in the model function \texttt{func}. The ``splat''
operator is handy for this, it unpacks the \texttt{parameters} array
into function arguments \texttt{a}, \texttt{b}, and \texttt{c}.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}13}]:} \PY{n}{yfit} \PY{o}{=} \PY{n}{func}\PY{p}{(}\PY{n}{x}\PY{p}{,} \PY{o}{*}\PY{n}{parameters}\PY{p}{)}
\PY{c}{\PYZsh{} the splat operator unpacks an array into function arguments}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}14}]:} \PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{n}{ydata}\PY{p}{,}\PY{l+s}{\PYZdq{}}\PY{l+s}{.}\PY{l+s}{\PYZdq{}}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{n}{yfit}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{n}{y}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}14}]:} [<matplotlib.lines.Line2D at 0x1075a8610>]
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{Curve fitting_files/Curve fitting_24_1.png}
\end{center}
{ \hspace*{\fill} \\}
Looks pretty good as far as fits go. Let's check out the error:
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}15}]:} \PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{p}{(}\PY{p}{(}\PY{n}{yfit}\PY{o}{\PYZhy{}}\PY{n}{y}\PY{p}{)}\PY{o}{/}\PY{n}{y}\PY{p}{)}\PY{o}{*}\PY{l+m+mi}{100}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{title}\PY{p}{(}\PY{l+s}{\PYZdq{}}\PY{l+s}{Fit error }\PY{l+s}{\PYZpc{}}\PY{l+s}{\PYZdq{}}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}15}]:} <matplotlib.text.Text at 0x1077bb410>
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{Curve fitting_files/Curve fitting_26_1.png}
\end{center}
{ \hspace*{\fill} \\}
To further illustrate the variation in this fit, repeat all the cells
(to get new random noise in the data) and you'll see the fit changes.
Sometimes, the error is as large as 10\%. Compare this to a linear fit
of log data and I bet you see much less variation in the fit!
\subsection{Modeling by rescaling
data}\label{modeling-by-rescaling-data}
\subsubsection{\texorpdfstring{The ``fit a line to anything''
approach}{The fit a line to anything approach}}\label{the-fit-a-line-to-anything-approach}
\begin{quote}
``With a small enough data set, you can always fit it to a line''
\end{quote}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}16}]:} \PY{n}{ylog} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{log}\PY{p}{(}\PY{n}{ydata}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]} \PY{o}{\PYZhy{}} \PY{n}{ydata}\PY{p}{[}\PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{p}{]}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{,}\PY{n}{ylog}\PY{p}{,}\PY{l+s}{\PYZdq{}}\PY{l+s}{.}\PY{l+s}{\PYZdq{}}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}16}]:} [<matplotlib.lines.Line2D at 0x107900a90>]
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{Curve fitting_files/Curve fitting_29_1.png}
\end{center}
{ \hspace*{\fill} \\}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}17}]:} \PY{n}{fitlog} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{polyfit}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{,} \PY{n}{ylog}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{,}\PY{n}{full}\PY{o}{=}\PY{n+nb+bp}{True}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}18}]:} \PY{n}{fitlog}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}18}]:} (array([-0.5888762 , 0.82399638]),
array([ 0.44348398]),
2,
array([ 1.36277029, 0.37796447]),
5.5511151231257827e-15)
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}19}]:} \PY{n}{ylog}\PY{o}{.}\PY{n}{shape}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}19}]:} (25,)
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}20}]:} \PY{n}{flog} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{poly1d}\PY{p}{(}\PY{n}{fitlog}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{,}\PY{n}{ylog}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{,}\PY{n}{flog}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{)}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}20}]:} [<matplotlib.lines.Line2D at 0x10783fa10>]
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{Curve fitting_files/Curve fitting_33_1.png}
\end{center}
{ \hspace*{\fill} \\}
Now to finally back out the exponential from the linear fit:
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}21}]:} \PY{n}{ylogfit} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{exp}\PY{p}{(}\PY{n}{flog}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{n}{ylogfit}\PY{o}{+}\PY{n}{ydata}\PY{p}{[}\PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{p}{]}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{n}{ydata}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}21}]:} [<matplotlib.lines.Line2D at 0x10784db50>]
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{Curve fitting_files/Curve fitting_35_1.png}
\end{center}
{ \hspace*{\fill} \\}
Clearly the tail is a bit off, the next iteration is to average the tail
end and use that as the y shift instead of using just the last point.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}22}]:} \PY{n}{yshift} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{average}\PY{p}{(}\PY{n}{ydata}\PY{p}{[}\PY{o}{\PYZhy{}}\PY{l+m+mi}{20}\PY{p}{:}\PY{p}{]}\PY{p}{)}
\PY{n}{yshift}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}22}]:} 0.51212140091282721
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}23}]:} \PY{n}{ylog} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{log}\PY{p}{(}\PY{n}{ydata}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]} \PY{o}{\PYZhy{}} \PY{n}{yshift}\PY{p}{)}
\PY{n}{fitlog} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{polyfit}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{,} \PY{n}{ylog}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{,}\PY{n}{full}\PY{o}{=}\PY{n+nb+bp}{True}\PY{p}{)}
\PY{n}{flog} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{poly1d}\PY{p}{(}\PY{n}{fitlog}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{,}\PY{n}{ylog}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{,}\PY{n}{flog}\PY{p}{(}\PY{n}{x}\PY{p}{[}\PY{p}{:}\PY{l+m+mi}{25}\PY{p}{]}\PY{p}{)}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}23}]:} [<matplotlib.lines.Line2D at 0x107ace390>]
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{Curve fitting_files/Curve fitting_38_1.png}
\end{center}
{ \hspace*{\fill} \\}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}24}]:} \PY{n}{ylogfit} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{exp}\PY{p}{(}\PY{n}{flog}\PY{p}{(}\PY{n}{x}\PY{p}{)}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{n}{ylogfit}\PY{o}{+}\PY{n}{yshift}\PY{p}{)}
\PY{n}{plt}\PY{o}{.}\PY{n}{plot}\PY{p}{(}\PY{n}{x}\PY{p}{,}\PY{n}{ydata}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}24}]:} [<matplotlib.lines.Line2D at 0x107b4d310>]
\end{Verbatim}
\begin{center}
\adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{Curve fitting_files/Curve fitting_39_1.png}
\end{center}
{ \hspace*{\fill} \\}
Very nice.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:}
\end{Verbatim}
% Add a bibliography block to the postdoc
\end{document}
| {
"alphanum_fraction": 0.6171436539,
"avg_line_length": 50.9538188277,
"ext": "tex",
"hexsha": "7a060300efc515f3d56d9028b80c3c10a510add4",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-06-14T22:52:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-06-14T22:52:03.000Z",
"max_forks_repo_head_hexsha": "6eb68dc6d20484ad6d5199673be5dae70d565cfc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Shannon2673/LabNotebooks",
"max_forks_repo_path": "Curve Fitting/Curve fitting.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "6eb68dc6d20484ad6d5199673be5dae70d565cfc",
"max_issues_repo_issues_event_max_datetime": "2021-06-30T20:40:41.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-30T20:40:41.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Shannon2673/LabNotebooks",
"max_issues_repo_path": "Curve Fitting/Curve fitting.tex",
"max_line_length": 309,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "6eb68dc6d20484ad6d5199673be5dae70d565cfc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Shannon2673/LabNotebooks",
"max_stars_repo_path": "Curve Fitting/Curve fitting.tex",
"max_stars_repo_stars_event_max_datetime": "2020-09-11T04:30:23.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-01-25T10:24:09.000Z",
"num_tokens": 11811,
"size": 28687
} |
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage[english]{babel}
\usepackage[cache=false]{minted}
\usepackage{makecell}
\title{Data Structures in Python}
\author{Guillaume Macneil}
\begin{document}
\maketitle
\begin{abstract}
This is the fourth lesson on the 'Introduction to Python' course. This course is loosely based around the 'MIT Introduction to Computer Science - Fall 2016' course. The course is more focused towards the Python programming side of things.
\end{abstract}
\section{Introduction to Data Structures}
In prior lessons, we have shown how variables can be very useful as they can: \textbf{1.} store values \textbf{2.} have values re-assigned to them multiple times \textbf{3.} act as a further layer of abstraction above just operating with the plain values. We quickly run into an issue though, what if I want to store \textit{multiple values} under one name? To do this, we'll need to use some other Python data structures.
\subsection{Tuples}
Tuples are an ordered sequence of elements, each of which can have a different type. Each element is \textit{immutable} - the value of each element cannot be edited after assignment.
\begin{minted}{python}
assortment = (623, "Hello", 42.1, True)
print(assortment[0])
# Prints 623
print(assortment[1:3])
# Prints ("Hello", 42.1)
len(assortment)
# Returns 4
assortment = assortment + (False, "Another")
# The "+=" operator could (and should) be used here
print(assortment)
# Prints (623, "Hello", 42.1, True, False, "Another")
\end{minted}
This may at first seem pretty confusing (that's normal) but the code is actually relatively simple. First, we began with creating a tuple called \textit{"assortment"} and assigned it 4 values. After that, we used a method of accessing the elements of the tuple called \textit{indexing} - we specify the name of the data structure we want to index and then enclosed the index we wanted to access in square brackets, like this \textit{a[3]}. Thirdly, we used another method of accessing elements in the tuple called \textit{slicing}. This is done in a similar way to indexing, only instead of specifying a single index within the square brackets, we specify a range like this \textit{a[2:5]}. After that, we used the \textit{len()} function to return the length of the tuple and concatenated two tuples together, making one large tuple. It is important to note that these operations are not unique tuples, they can be used on many data structures.
\subsection{Lists}
Much like tuples, lists are ordered sequences of elements accessible by index. The elements in a list are usually homogeneous (of the same type), though they do not have to be. Unlike tuples, the elements of a list are mutable.
\begin{minted}{python}
a = [23, 54, 76, 12, 5, 7, 23, 342]
b = [True, False, "Yes", 0, ["A", ["B", 56.7]]]
len(a)
# Returns 8
len(b)
# Returns 5
print(a[3])
# Prints 12
print(b[3:])
# This '[3:]' notation says 'slice from the fourth element onward'
# Prints [0, ["A", ["B", 56.7]]]
a[0] = "An obvious change"
print(a)
# Prints ["An obvious change", 54, 76, 12, 5, 7, 23, 342]
\end{minted}
As you can see, lists look quite similar to tuples, with only a few minor differences. Lists are bound by square brackets and, as you can see, can contain any combination of objects. They can be indexed to access individual elements and can be sliced just like tuples. However, there are a few differences. Elements can be reassigned just like a normal variable, by specifying the desired index and using the \textit{'='} operator. Another interesting thing is that you can nest lists (you can actually do the same with tuples too) - you can have a list within a list, or in list \textit{'b'}, a list within a list within a list.
\subsection{Dictionaries}
Dictionaries are slightly different to the other data structures in the sense that instead of just holding a sequence of elements, dictionaries hold a series of key-value pairs. This means you can query a key and access a value.
\begin{minted}{python}
fruit_stock = {"apples": 43, "bananas": 76, "oranges": 90}
print(fruit_stock["apples"])
# Prints 43
print(fruit_stock["oranges"])
# Prints 90
print("apples" in fruit_stock)
# Prints True
print("cherries" in fruit_stock)
# Prints False
fruit_stock["apples"] = 42
print(fruit_stock["apples"])
# Prints 42
\end{minted}
Now this does, at first glance, look quite different. The dictionary is bound by curly brackets, instead of there being one object per element, there's two separated by a colon! This is an important feature of dictionaries, the object to the left of the colon is called the \textit{key} and the object to the right is called the \textit{value}. These function in much the same way as actual, physical dictionaries. You search for a word in a certain language, you find it and then get the equivalent word in the other language. In Python dictionaries, you index the dictionary with the key (\textit{ a["key"] }) and it returns the value associated with that key. Another thing to note is that dictionaries are also mutable, so you can re-associate a given key with a new value.
\section{Data Structure Methods}
The fun with data structures doesn't stop there though! Let's suppose that we wanted to add a new element to a list, would we have to completely re-define the list with a new element on the end? That seems like a pretty inefficient, no? Luckily, there is a solution to this - methods. \medskip
\subsection{Tuple Methods}
Tuples only have two methods because they are fundamentally simple data structures that, honestly, can't do very much due to their immutability. \medskip
\newpage
\begin{center}
\begin{tabular}{l|l|l|l}
Method: & Parameters: & Example: & Purpose:\\
\hline
\textbf{a.count()} & \makecell{element} & \makecell{a.count("apple")} & \makecell{Counts the number of \\instances of a given \\element in a tuple} \\
\hline
\textbf{a.index()} & \makecell{element\\(start)\\(end)} & \makecell{a.index("apple")} & \makecell{Returns the index\\ of a given element\\ in a tuple}
\end{tabular}
\end{center}
\subsection{List Methods}
Lists, unlike tuples, have many methods (as you can see). In many respects, most of the functionality of lists is due to the methods that can be performed on them. \medskip
\begin{center}
\begin{tabular}{l|l|l|l}
Method: & Parameters: & Example: & Purpose:\\
\hline
\textbf{a.append()} & \makecell{item} & \makecell{a.append("apple")} & \makecell{Adds a given item to\\the end of the list} \\
\hline
\textbf{a.clear()} & & \makecell{a.clear()} & \makecell{Empties a given list} \\
\hline
\textbf{a.copy()} & & \makecell{a.copy()} & \makecell{Duplicates a given list} \\
\hline
\textbf{a.count()} & \makecell{element} & \makecell{a.count("apple")} & \makecell{Counts the number of\\instances of an element\\ in a given list} \\
\hline
\textbf{a.extend()} & \makecell{iterable} & \makecell{a.extend(b)} & \makecell{Adds the elements\\of an iterable to the\\end of the list} \\
\hline
\textbf{a.index()} & \makecell{element\\(start)\\(end)} & \makecell{a.index("apple")} & \makecell{Returns the index of\\a given element in\\a list} \\
\hline
\textbf{a.insert()} & \makecell{index\\element} & \makecell{a.insert(2, "apple")} & \makecell{Inserts an element to\\the list at a specified\\ index} \\
\hline
\textbf{a.pop()} & \makecell{index} & \makecell{a.pop(3)} & \makecell{Removes and returns\\an element from a\\list at a given index} \\
\hline
\textbf{a.remove()} & \makecell{element} & \makecell{a.remove("apple")} & \makecell{Removes the first\\matching element\\from a list } \\
\hline
\textbf{a.reverse()} & & \makecell{a.reverse()} & \makecell{Reverses the elements\\of a list} \\
\hline
\textbf{a.sort()} & \makecell{(reverse)\\(key)} & \makecell{a.sort()} & \makecell{Sorts the elements of a\\list in ascending or\\descending order} \\
\end{tabular}
\end{center}
\subsection{Dictionary Methods}
Dictionaries, much like lists, also have many methods which allow for the manipulation and collection of data from the dictionary. \medskip
\begin{center}
\begin{tabular}{l|l|l|l}
Method: & Parameters: & Example: & Purpose: \\
\hline
\textbf{a.clear()} & & \makecell{a.clear()} & \makecell{Removes all items from\\the dictionary} \\
\hline
\textbf{a.copy()} & & \makecell{a.copy()} & \makecell{Makes a shallow copy\\of the dictionary} \\
\hline
\textbf{a.fromkeys()} & \makecell{sequence\\(value)} & \makecell{a.fromkeys(k)} & \makecell{Creates a new dictionary\\from the given sequence\\of elements} \\
\hline
\textbf{a.get()} & \makecell{key\\(value)} & \makecell{a.get("apple")} & \makecell{Returns the value for\\the specified key} \\
\hline
\textbf{a.items()} & & \makecell{a.items()} & \makecell{Displays a list of a\\dictionary's key-value\\tuple pairs} \\
\hline
\textbf{a.keys()} & & \makecell{a.keys()} & \makecell{Displays a list of a\\dictionary's keys} \\
\hline
\textbf{a.pop()} & \makecell{key\\(default)} & \makecell{a.pop("apple")} & \makecell{Removes and returns a\\given element from a\\dictionary} \\
\hline
\textbf{a.popitem()} & & \makecell{a.popitem()} & \makecell{Removes and returns\\the key-value pair in\\LIFO order} \\
\hline
\textbf{a.setdefault()} & \makecell{key\\(default)} & \makecell{a.setdefault(\\"apple")} & \makecell{Returns the value of\\a key if present,\\inserts a key-value\\pair otherwise} \\
\hline
\textbf{a.update()} & \makecell{dictionary} & \makecell{a.update(b)} & \makecell{Updates the dictionary\\from with elements\\from another dictionary} \\
\hline
\textbf{a.values()} & & \makecell{a.values()} & \makecell{Displays a list of a\\dictionary's values} \\
\end{tabular}
\end{center}
\end{document}
| {
"alphanum_fraction": 0.7119175174,
"avg_line_length": 58.5384615385,
"ext": "tex",
"hexsha": "2de5b6d41ac15669f173af90672d9d16015302ae",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7fe8eb1a2f024bca4fd7e3c364f3aa638d611150",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "MetallicSquid/intro-to-python",
"max_forks_repo_path": "4/data_structures.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7fe8eb1a2f024bca4fd7e3c364f3aa638d611150",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "MetallicSquid/intro-to-python",
"max_issues_repo_path": "4/data_structures.tex",
"max_line_length": 945,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7fe8eb1a2f024bca4fd7e3c364f3aa638d611150",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "MetallicSquid/intro-to-python",
"max_stars_repo_path": "4/data_structures.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2779,
"size": 9893
} |
\documentclass[12pt]{article}
\setlength{\evensidemargin}{-0.25in}
\setlength{\oddsidemargin} {-0.25in}
\setlength{\textwidth} {+7.00in}
\setlength{\topmargin} {+0.00in}
\setlength{\textheight} {+8.50in}
\usepackage{hyperref}
\hypersetup{
colorlinks=true,
linkcolor=blue,
filecolor=magenta,
urlcolor=cyan,
}
\title{recsforme: A media recommendation web app\thanks{\url{https://github.com/bobsmith947/recsforme}}}
\author{Lucas Kitaev}
\date{\today}
\begin{document}
\maketitle
\section*{Introduction}
I started working on this in 2017 as my high school senior project. Over the years, I have found myself coming back and making improvements with the new skills I have learned. The website allows users to search for music, TV shows, and movies (these are referred to as ``groups''). Users can vote on a group by either ``liking'' or ``disliking'' it. Users can view their list of likes and dislikes, and generate recommendations based on the groups they have liked and disliked. The backend of the application is built on the since-renamed Java EE platform for Java 8. The frontend of the application uses a Model-View-ViewModel (MVVM) architecture provided by \href{https://knockoutjs.com/}{Knockout} and styling is done through \href{https://getbootstrap.com/}{Bootstrap}.
\section*{The Problem}
Turns out the na\"{\i}ve recommendation engine implementation I wrote wasn't the most efficient! To generate recommendations for a user, the algorithm would essentially compare that user to all other users, and rank them based on similarity. The recommendations would then be taken from the users with the highest similarity. You can only imagine this approach would quickly become slow as the number of users increased. Given that this website is mostly for my own academic interest rather than to actually provide a service to real people, efficency is not much of a concern, but I was looking to see if machine learning could help speed up the process.
\newpage
\section*{The Solution}
Since Java doesn't have the best support for machine learning toolkits, I figured that I should stick with Python to implement the new recommendation engine. The proposed solution is a Python script can be periodically run to generate recommendations for all users. The script can communicate with the backend Postgres database using \href{https://www.psycopg.org/}{Psycopg}.
\subsection*{Data}
Group information is taken from the \href{https://musicbrainz.org/doc/MusicBrainz_Database/}{MusicBrainz Database}. As the name suggests, this only covers music groups, likes artists and albums. The website previously used an alternative API similar to IMDb to get information on movies and TV shows, however that has since been removed for the purposes of this project. After importing the MusicBrainz sample dataset, I created a list of users and randomly assigned groups as either likes or dislikes of that user. Obviously, randomly picking things to like and dislike may not accurately represent the taste of most real people, but given the absence of real people to use, this will serve as the training data. No feature selection or extraction methods were used.
\subsection*{Model}
The chosen model is the \href{https://github.com/iskandr/fancyimpute/blob/master/fancyimpute/similarity_weighted_averaging.py}{\tt SimilarityWeightedAveraging} class from the {\tt fancyimpute} package for matrix completion. This model seemed like a good choice because it expects sparse input, which is reasonable since most users will only ever add a small percentage of the total number of groups to their lists. The input to the model is a $n \times m$ incomplete matrix $A$ where $n$ is the number of users and $m$ is the number of groups. $A_{ij}=1$ if user $i$ likes group $j$, and $A_{ij}=-1$ if user $i$ dislikes group $j$, otherwise $A_{ij}=\mathtt{NaN}$. In order to complete the matrix, the model actually users a similar approach to the algorithm I wrote myself; it calculates similarity between rows (users) and uses those similarities to calculate missing entries. The model is run twice: once for artists and once for albums. Using the completed matrices, at most 10 of the highest scoring groups of both types is added to each user's recommendations. Scores are real numbers between $-1$ and $1$; groups must have a score of at least 0 to be added.
\subsection*{Evaluation}
Since it's impossible to know the true completed matrix, and obscuring entries isn't particularly valuable for randomly generated data, there is no scoring metric I'm interested in using to evaluate the model. Instead, I'm more interested in the computational performance of the model. For testing this, I created 1,000 users each with 100 artists and 100 albums on their lists. I ran it on my laptop with a 2 GHz CPU to get an idea of the lower end of performance; only one CPU core was utilized; memory usage peaked at 1.4 GB; 13,430 recommendations were generated in total. The verbose output and execution time (using {\tt time(1)}) is shown on the next page.
\newpage
\begin{verbatim}
Album recommendations:
[SimilarityWeightedAveraging] Creating dictionary from matrix with shape (1000, 18595)
[SimilarityWeightedAveraging] # rows = 1000
[SimilarityWeightedAveraging] # columns = 18513
[SimilarityWeightedAveraging] Computed 61416 similarities between rows
Artist recommendations:
[SimilarityWeightedAveraging] Creating dictionary from matrix with shape (1000, 184055)
[SimilarityWeightedAveraging] # rows = 1000
[SimilarityWeightedAveraging] # columns = 77367
[SimilarityWeightedAveraging] Computed 1205 similarities between rows
real 22m49.871s
user 20m35.516s
sys 0m13.719s
\end{verbatim}
\section*{Conclusion}
The implementation of this machine learning system has gone relatively well with no major challenges. There are no particular ethical implications (outside of storing usernames/passwords, which is not included in the scope of this report). The next step for the project will be setting up the website to get recommendations from the database. This will allow me to do further testing by manually creating users that have more representative tastes, and seeing if I can get reasonable recommendations for those users. I can also do more performance tuning, and potentially implement multithreading to allow utilization of multiple CPU cores.
\end{document}
| {
"alphanum_fraction": 0.7889650873,
"avg_line_length": 97.2121212121,
"ext": "tex",
"hexsha": "7b2ddecb5112ae9ee67f6bd3cbacd39a220da53e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6fd6bc32fe7fd8c5af63b3dfde695d12570ee104",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "bobsmith947/recsforme",
"max_forks_repo_path": "docs/report.tex",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "6fd6bc32fe7fd8c5af63b3dfde695d12570ee104",
"max_issues_repo_issues_event_max_datetime": "2019-11-23T01:54:00.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-02-22T21:17:50.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "bobsmith947/recsforme",
"max_issues_repo_path": "docs/report.tex",
"max_line_length": 1165,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "6fd6bc32fe7fd8c5af63b3dfde695d12570ee104",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "bobsmith947/recsforme",
"max_stars_repo_path": "docs/report.tex",
"max_stars_repo_stars_event_max_datetime": "2018-02-06T14:14:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-02-06T14:14:07.000Z",
"num_tokens": 1524,
"size": 6416
} |
\subsection{LCD}
| {
"alphanum_fraction": 0.6842105263,
"avg_line_length": 4.75,
"ext": "tex",
"hexsha": "abdd1ab83ced1705ebd818235df8a6138cb540dc",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/engineering/engineeringElectrical/07-03-LCD.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/engineering/engineeringElectrical/07-03-LCD.tex",
"max_line_length": 16,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/engineering/engineeringElectrical/07-03-LCD.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6,
"size": 19
} |
\documentclass[sigconf]{acmart}
\usepackage{booktabs} % For formal tables
\usepackage{listings}
\usepackage{amsmath} % AMS Math Package
\usepackage{amsthm} % Theorem Formatting
\usepackage{amssymb} % Math symbols such as \mathbb
\usepackage{tikz}
\usepackage{siunitx}
\usepackage{graphicx}
\usepackage{minted} % code
\usepackage{pgfplots}
\usetikzlibrary {positioning}
\usetikzlibrary {datavisualization}
\newcommand{\curl}[1]{{\nabla} \times #1} % for curl
% Copyright
\setcopyright{none}
% DOI
\acmDOI{}
% ISBN
\acmISBN{}
%Conference
\acmConference{}{}{}
%\acmBooktitle{}
%\settopmatter{printacmref=false}
\begin{document}
\title{Simulating Turbulence with Recurrent Neural Networks }
\subtitle{}
\author{Robert Jendersie}
\maketitle
\section{Introduction}
In fluid dynamics, the behaviour of turbulence remains an unsolved problem. While the dynamics are described by the Navier-Stokes equations, a numerical simulation of sufficiently high resolution, where turbulence occurs, remains infeasible. On a statistical level however, the different scales of frequencies are known to follow the Kolmogorov energy cascade, an observation which has been used to generate turbulence with plausible energy distributions and temporal coherence \cite{kim2008wavelet}. \\
Neural networks have enjoyed success in an increasingly wide range of problems beyond classification, including long term predictions and image synthesis.
Recently, recurrent neural networks (RNNs) have been considered to predict chaotic physical systems; see \cite{vlachas2019forecasting} for a comparison off different approaches.
The possibility of having a RNN learn to simulate a turbulent flow is explored in this report.
\section{Network Architecture}
The objective of the network is to simulate the high resolution vector-field $\vec{u}$ for an arbitrary number of steps in a scene where turbulence occurs.
Instead of directly operating on $\vec{u}$, the fluid's vorticity $\zeta$
\[
\zeta = \curl{\vec{u}},
\]
is used. Assuming that $\vec{u}$ is divergence free, $\zeta$ is sufficient to reconstruct the complete vector-field. In 2D, the vorticity is a scalar-field, thus reducing the number of dimensions to operate on and making it easy to visualize.
\subsection{Inputs and Outputs}
As input, both the full $\zeta$ from the previous time step and the current state of a simulation with lower resolution with scale $s$ are considered.
In addition, parameters describing the variable parts of the scene, such as the size of the obstacle and inflow velocity may be given.
As output, the high resolution $\zeta$ is expected.
When operating on the spatial data, a stack of convolutional layers is used to extract important features and deconvolutional layers to construct the full resolution output. Alternately, inputs and outputs can also be directly given in a frequency domain, in which case the outputs are handled by multiple dense layers of increasing size with hyperbolic tangent activations and single dense layer with linear activation at the end to match the correct output size.
Since tensorflow has limited support for complex numbers, real and imaginary parts are separated into two float channels. \\
Additional scene parameters given as scalar inputs are first processed by a small LSTM unit and then concatenated with the other processed inputs before the recurrent layers.
\subsection{Recurrent Layers}
\begin{figure}
\begin{tikzpicture}
[align=center,node distance=0.5cm, add/.style={circle,draw},layer/.style={rectangle,draw},placeholder/.style={}]
\node[placeholder] (input) {};
\node[add] (add2) [right=of input] {+};
\node[layer] (rnn0) [right=of add2] {LSTM};
\node[add] (add0) [right=of rnn0] {+};
\node[layer] (rnn1) [right=of add0] {LSTM};
\node[add] (add1) [right=of rnn1] {+};
\node[placeholder] (rnn2) [right=of add1] {};
\draw [->] (input.east) -- (add2.west);
\draw [->] (add2.east) -- (rnn0.west);
\draw [->] (add2) to [bend left=45] (add0);
\draw [->] (rnn0.east) -- (add0.west);
\draw [->] (add0.east) -- (rnn1.west);
\draw [->] (add0) to [bend left=45] (add1);
\draw [->] (rnn1.east) -- (add1.west);
\draw [->] (add1.east) -- (rnn2.west);
\end{tikzpicture}
\caption{Residual Layers.}
\label{residualLayers}
\end{figure}
The main work is done by the recurrent layers, for which both Long Short-Term Memory(LSTM) and Gated Recurrent Units(GRU) are suitable. Where dimensions are compatible, residual connections are inserted, adding together the inputs and outputs of the current layer as shown by Figure~\ref{residualLayers}. The idea is that the task of learning just modifications instead of the full mapping is simpler. This generally improves the training success, especially for longer networks, at low costs for both training and execution since no extra weights are needed and tensor addition operations are cheap \cite{he2016deep}. \\
An important parameter of the recurrent layers is their state-fullness. Usually RNNs are fed a fixed number of steps to predict the next step, after which the internal memory of each unit is reset to $0$. For a simulation, processing just one time step should yield the next one. Also, the training can impose some practical limits on the number of time steps given as input, which may be shorter than some long term dependencies in the data. Thus, only state-full networks are considered here.
%todo refs
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Training Setup}\label{sec:training}
\begin{figure}
\begin{tikzpicture}
\node[anchor=south west,inner sep=0] (image) at (0,0) {\includegraphics[width=0.45\textwidth]{imgs/scene.png}};
\begin{scope}[x={(image.south east)},y={(image.north west)}]
\draw[blue,ultra thick] (0.0,0.419) rectangle (0.05,0.581) node[above] {source};
\draw[blue,ultra thick] (0.2,0.5) ellipse [x radius=0.05, y radius=0.1] (0.2,0.6) node[above] {obstacle};
\end{scope}
\end{tikzpicture}
\caption{The Scene.}
\label{trainingScene}
\end{figure}
The training setup, as shown rotated by $\ang{90}$ in Figure~\ref{trainingScene}, consists of a source at the bottom, where smoke flows in and a circular obstacle above it. Through buoyancy, the smoke flows around the obstacle on both sides, causing turbulence in the area above when the streams merge. To add some variation, noise is applied to the inflow. Also the obstacle size can be changed and some random initial velocity may be applied to the input periodically.
Advection is computed with a second order semi Lagrangian scheme.\\
The low resolution inputs for the purpose of training are extracted from the 2D discrete Fourier Transform
\[
Z = \mathcal{F} (\zeta),
\]
which is also depicted in Figure~\ref{lowFreqs}.
Since $\zeta$ is real, symmetries in $Z$ allow the use of only the upper quadrants. The resulting real part is even $Z(u,v)=Z(-u,-v)$ and the imaginary part odd $Z(u,v)=-Z(-u,-v)$. The input consists of all frequencies up to $s$, the expected output of all larger frequencies.
\begin{figure}
\begin{tikzpicture}
\draw[fill=green!20] (-1.0,0.0) rectangle (1.0,1.0) node[right] {output};
\draw[fill=blue!20] (-0.5,0.0) rectangle (0.5,0.5) node[right] {input};
\draw (-0.5,-0.5) rectangle (0.5,0.5);
\draw (-0.5,-0.5) rectangle (0.5,0.5);
\draw[thin, gray] (-1.0,-1.0) rectangle (1.0,1.0);
\draw[->] (-1.5,0) -- (1.5,0) node[below] {$u$};
\draw[->] (0,-1.5) -- (0,1.5) node[right] {$v$};
\draw(-1.0,0.0) node[below left] {$-1$} (1.0,0.0) node[below right] {$1$};
\draw(0.0,-1.0) node[below left] {$-1$} (0.0,1.0) node[above left] {$1$};
\draw(0.5,0.0) node[below right] {$s$};
\end{tikzpicture}
\caption{Input and outputs from $Z(u,v)$.}
\label{lowFreqs}
\end{figure}
As optimizer, RMSprop with default arguments \cite{RMSprop} is used. Other options such as stochastic gradient descent and Adam where briefly considered but seem to perform worse. As loss functions, the mean square error
\begin{equation}\label{eq:mse}
\frac{1}{n_s}\sum_{x}\sum_{y} (\zeta(x,y) - \hat{\zeta}(x,y))^2,
\end{equation}\label{eq:ferr}
for spatial outputs and
\begin{equation}
\frac{1}{n_f}\sum_{u}\sum_{v} |Z(u,v) - \hat{Z}(u,v)|^2,
\end{equation}
for frequency outputs are taken.\\
Instead of a fixed size data set, the generator pattern is used. The numerical simulation can provide a continuous stream of new data, minimizing the risk of over-fitting. Also, common data preprocessing such as shuffling and batching after each epoch pose problems with the persistent state of the network over multiple training steps.
\subsection{Stateful Recurrent Neural Networks}
As mentioned before, RNN layers use a special training process displayed in Figure~\ref{rnnTraining}. Instead of passing through one sample and back propagating the error, $k$ time steps are processed by a loop and only the final output is forwarded to the error computation. During back propagation, the loop is unrolled, effectively stacking the same layer multiple times.
\begin{figure}
\begin{tikzpicture}
[gap/.style={},layer/.style={rectangle,draw}, error/.style={red}]
\node[gap] (inp0) {$t_0$};
\node[gap] (inp1) [right=of inp0] {$t_1$};
\node[layer] (rnn0) [below=of inp0] {LSTM};
\node[layer] (rnn1) [right=of rnn0, below=of inp1] {LSTM};
\node[gap] (rnn2) [right=of rnn1] {$\dots$};
\node[layer] (rnn3) [right=of rnn2] {LSTM};
\node[gap] (inp2) [right=of inp1, above=of rnn3] {$t_{k-1}$};
\node[gap] (outp) [below=of rnn3] {$t_k$};
\draw [->] (inp0.south) -- (rnn0.north);
\draw [->] (inp1.south) -- (rnn1.north);
\draw [->] (inp2.south) -- (rnn3.north);
\draw[transform canvas={yshift=0.5ex},->] (rnn0.east) -- (rnn1.west);
\draw[transform canvas={yshift=-0.5ex},error,->](rnn1.west) -- (rnn0.east);
\draw[transform canvas={yshift=0.5ex},->] (rnn1.east) -- (rnn2.west);
\draw[transform canvas={yshift=-0.5ex},error,->](rnn2.west) -- (rnn1.east);
\draw[transform canvas={yshift=0.5ex},->] (rnn2.east) -- (rnn3.west);
\draw[transform canvas={yshift=-0.5ex},error,->](rnn3.west) -- (rnn2.east);
\draw[transform canvas={xshift=0.5ex},->] (rnn3.south) -- (outp.north);
\draw[transform canvas={xshift=-0.5ex},error,->](outp.north) -- node[auto] {error} (rnn3.south);
\end{tikzpicture}
\caption{A single LSTM layer in training.}
\label{rnnTraining}
\end{figure}
While a larger window size $k$ may allow the network to pick up long term dependencies, the increased training time sets practical bounds and the impact of different choices is tested.\\
Processing multiple samples together as a batch and updating weights from the accumulated error is important both to speed up the training and to prevent over-fitting. However, for state-full networks, continuity across batches is required. Thus, batches of size $b$ would need to be build from $b$ different time-series. Instead of running multiple simulations a ring buffer of previous steps is kept. By choosing a sufficient distance $d$, a history of size $b \cdot d$ can be used to create batches of seemingly independent simulations. Let $p$ be the pointer to the current position of the ring buffer. Then after just one simulation step the next batch is constructed from the elements with indices
\[
ind_i = (p + i \cdot d) \mod (b \cdot d),
\]
where $i=0,\dots,b-1$.
Since tensorflow requires for state-full RNNs that the batch size is a fixed parameter, the trained model is impractical for the actual use case where just one time series should be simulated. Fortunately this issue, as well as the awkwardness of having to input multiple time steps to receive just one output, can be circumvented by building another model with the same architecture but $b=k=1$. Then the training results from \textit{modelT} can be transferred to \textit{modelP} via
\begin{minted}{python}
modelP.set_weights(modelT.get_weights())
\end{minted}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Evaluation}
Quality of the different networks is evaluated on validation sets of size $1024$ generated with the same setup as the training process, but varying seeds for the random number generator.
\subsection{Network Objective}\label{sec:eva:objective}
\begin{figure}
\begin{tikzpicture}
\begin{axis}[ymode=log, legend pos = south east, xlabel=timesteps $t$, ylabel=$(\zeta - \hat{\zeta})^2$]
\addplot+[mark size=1pt] table[x=t, y=FullRes, col sep=comma]{data/fullres.csv};
\addlegendentry{FullRes}
\addplot+[mark size=1pt] table[x=t, y=Conv, col sep=comma]{data/fullres.csv};
\addlegendentry{Conv}
\addplot+[mark size=1pt] table[x=t, y=LowFreq, col sep=comma]{data/fullres.csv};
\addlegendentry{LowFreq}
\end{axis}
\end{tikzpicture}
\caption{Error over time for different approaches.}
\label{objectiveError}
\end{figure}
First, we take a look at two different possible objectives. Either to perform the full simulation where the outputs are fed back as input of the next step, or to generate details from a lower frequency simulation ($s=8$).
In Figure~\ref{objectiveError} the mse for these approaches is shown, with the addition of a purely convolutional network without recurrent units for the former approach (see Appendix A). Unsurprisingly, the full resolution networks perform better initially, since they mostly have to reproduce the input. However, the outputs quickly diverge from the expected results until settling in a stable loop.
\begin{figure}
\includegraphics[width=0.5\textwidth]{imgs/fullres_series.png}
\caption{Closed loop network after 1, 8, 32, 64 steps.}
\label{closedLoop}
\end{figure}
A closer look at the outputs of FullRes in Figure~\ref{closedLoop} shows that while some major features of the scene are preserved, details are lost and artefacts have formed. On the other hand, the upscaling approach improves for over 40 steps while filling its internal memory and then continues to produce approximation of the expected state. Thus, in following experiments we attempt to improve on the second approach.
\subsection{Frequency and Spatial IO}
To compare spatial(S) and frequency(F) inputs/outputs, the same inner RNN is combined with appropriate outer processing. Since these additional layers can have a significant impact on the achieved quality, network with similar execution times are considered. In Table~\ref{tab:freqIO}, average times for a single step are shown in addition to the average errors \eqref{eq:mse} and the square root of \eqref{eq:ferr}.
\begin{table}
\centering
\caption{Error for different IO approaches.}
\label{tab:freqIO}
\begin{tabular}{lccc}
\hline
& S error & F error & t in s \\ \hline
input & 0.0277 & 7.9214 & 0.0000 \\
S$\rightarrow$S & 0.0077 & 4.6925 & 0.0046 \\
F$\rightarrow$S & 0.0081 & 4.7448 & 0.0045 \\
F$\rightarrow$F & 0.0080 & 4.3151 & 0.0037 \\
S$\rightarrow$F & 0.0074 & 4.1384 & 0.0037 \\ \hline
\end{tabular}
\end{table}
All variants minimize both error measures to a certain extent, when compared to the upscaled input.
From these aggregate values it does seem however, like extracting relevant features works better from the spatial view. For the outputs, expecting frequency leads to far better frequency results and similar spatial error.
A more detailed look at the power spectrum summed up over the v-axis in Figure~\ref{powerspectrum} and averaged over a whole validation set, reveals that both output variants manage to reproduce some details across all frequencies. However, the deconvolutional versions exhibit larger deviations in higher frequencies.
\begin{figure}
\includegraphics[width=0.5\textwidth]{imgs/powerspectrum.pdf}
\caption{Power spectrum of different IO approaches.}
\label{powerspectrum}
\end{figure}
\subsection{Network Size}
How the size of the recurrent layers impacts the results is explored in Table~\ref{tab:size}. Both the number of layers and the size of each unit appear to have only minor effects and diminishing returns. Going from $1$ layer to $5$ layers decreases the measured spatial error by just $2.5\%$ and from $3$ to $5$ the frequency error does not improve at all.
Similar observations are made for the unit size, where going from $40$ to $120$ gives a $5\%$ improvement in spatial error while the difference from $80$ to $120$ is only $1\%$.
\begin{table}
\centering
\caption{Size variations of the recurrent layers.}
\label{tab:size}
\begin{tabular}{lccccc}
\hline
& layers & unit size & S error & F error & t in s \\ \hline
Len1 & 1 & 80 & 0.0077 & 4.2271 & 0.0037 \\
Len2 & 3 & 80 & 0.0076 & 4.1755 & 0.0042 \\
Len3 & 5 & 80 & 0.0075 & 4.1773 & 0.0046 \\
Size1 & 3 & 40 & 0.0079 & 4.2675 & 0.0040 \\
Size3 & 3 & 120 & 0.0075 & 4.1768 & 0.0042 \\ \hline
\end{tabular}
\end{table}
\subsection{Window and Batch Size}
To determine the impact of training parameters on the success, we train the same model architecture for $50$ epochs on the same simulation but organize the data with varying window(w)- and batch(b)-sizes. How the loss on the validation set changes, is plotted in Figure~\ref{bwsize}.
\begin{figure}
\begin{tikzpicture}
\begin{axis}[legend style={at={(1.05,1.0)}, anchor=north}, xlabel=epochs $t$]
\addplot+[mark size=1pt] table[x=epoch, y=W1B1, col sep=comma]{data/wbsize.csv};
\addlegendentry{1 1}
\addplot+[mark size=1pt] table[x=epoch, y=W1B32, col sep=comma]{data/wbsize.csv};
\addlegendentry{1 32}
\addplot+[mark size=1pt] table[x=epoch, y=W4B4, col sep=comma]{data/wbsize.csv};
\addlegendentry{4 4}
\addplot+[mark size=1pt] table[x=epoch, y=W8B4, col sep=comma]{data/wbsize.csv};
\addlegendentry{8 4}
\addplot+[mark size=1pt] table[x=epoch, y=W16B4, col sep=comma]{data/wbsize.csv};
\addlegendentry{16 4}
\addplot+[mark size=1pt] table[x=epoch, y=W16B16, col sep=comma]{data/wbsize.csv};
\addlegendentry{16 16}
\end{axis}
\end{tikzpicture}
\caption{Frequency error during training with inputs (w b).}
\label{bwsize}
\end{figure}
To have the recurrent layers learn anything meaningful, windows of size larger than $1$ seem necessary. Both (1 1) and (1 32) show no improvement after the initial epoch, while (4 4) has an overall down trend. Further increasing the size to (8 4) or even (16 4) lead to even better results, but with diminishing returns. Also, the training time per epoch increases by more than $50\%$ each time, as shown in Table~\ref{tab:bwsize}.\\
For the batch size, mini batches of size $4$ help prevent the overhitting present due to similarity of the consecutive time steps. Furthermore, a larger batch size leads to convergence in fewer epochs at no cost to the quality of the result. Since back-propagation takes the majority of the time during the training process, as seen by a duration increase of less than $1\%$ from (16 4) to (16 16), this is also huge practical advantage in terms of training time required. With our training setup however, the time it takes to fill the data buffer is proportional to $w*b$, which imposes an upper bound to the effective speed up.
\begin{table}
\centering
\caption{Training times depending on batch- and window size.}
\label{tab:bwsize}
\begin{tabular}{ccc}
\hline
window & batch & t in ms \\ \hline
1 & 32 & 53 \\
4 & 4 & 154 \\
8 & 4 & 232 \\
16 & 4 & 370 \\
16 & 16 & 373 \\ \hline
\end{tabular}
\end{table}
\subsection{Additional Inputs}
Finally, we explore the robustness with respect to a changing scene and how additional information given to the network, impacts the performance. \\
A minor improvement of $~1\%$ is seen from providing the inflow as extra input, which is shown in Table~\ref{tab:errobs} (Ref+Inflow).
\begin{table}
\centering
\caption{Error with (1) and without (2) changing obstacle size.}
\label{tab:errobs}
\begin{tabular}{lcccc}
\hline
& (1) S error & F error & (2) S error & F error \\ \hline
Reference & 0.0192 & 7.5257 & 0.0076 & 4.2010 \\
Ref+Inflow & 0.0198 & 7.5895 & 0.0075 & 4.1664 \\
TrainObs & 0.0100 & 4.8752 & 0.0086 & 4.5852 \\
TrainObs+ & 0.0099 & 4.8177 & 0.0085 & 4.4513 \\
TrainVel & 0.0184 & 7.0310 & 0.0201 & 7.6623 \\
TrainVel+ & 0.0178 & 6.5250 & 0.0160 & 6.0167 \\ \hline
\end{tabular}
\end{table}
Both with or without however, the models largely fail when variations between $[0.8,1.2]$ to the obstacle size are introduced, with observed errors close to the low resolution simulation.
By also adjusting the training process with a periodically changing obstacle (TrainObs), this effect is somewhat mitigated. At a cost of $~12\%$ increased error for the simple setting, the loss in quality can also be seen in Figure~\ref{addinputs_img} from picture 2 to 3, with the right stream appearing notably less sharp above the obstacle.
\begin{figure}
\includegraphics[width=0.5\textwidth]{imgs/addinputs_comparison.png}
\caption{Output when trained with different scenes: original simulation, static, obstacle size, obstacle size + inflow velocity.}
\label{addinputs_img}
\end{figure}
As with the inflow, introducing the obstacle size as input(TrainObs+; see Appendix A), only slightly improves the results.\\
Now modifying the initial velocity has a large impact on the dynamics. The models trained in the environment (TrainVel and TrainVel+) display huge errors on both previous validation sets. With consideration of the fourth picture from Figure~\ref{addinputs_img}, this discrepancy seems to be not primarily caused by a lack of detail, but rather stronger amplitudes. Also, the addition of velocity input has a larger impact with a difference of $25\%$ in the spatial error on validation set (2).
Meanwhile, this gap does not appear in Figure~\ref{inflVelSteps}, where we take a closer look at the error over time with respect to a scene where both obstacle size and inflow velocity are subject to change.
In fact, although the model trained without inflow velocity performs much worse overall, the peaks and valleys remain largely the same across all versions and no consistent deviations caused by the extra inputs can be seen.
*take full advantage
\begin{figure}
\begin{tikzpicture}
\begin{axis}[legend style={at={(0.45,0.98)}, anchor=north}]%ymin=2, ymax=7
\addplot+[mark=none, each nth point=1] table[x=step, y=inflowVel, col sep=comma]{data/general.csv};
\addlegendentry{TrainVel+};
\addplot+[mark=none, each nth point=1] table[x=step, y=train, col sep=comma]{data/general.csv};
\addlegendentry{TrainVel};
\addplot+[mark=none, each nth point=1] table[x=step, y=ref, col sep=comma]{data/general.csv};
\addlegendentry{TrainObs+};
% \addplot +[mark=none] coordinates {({axis cs:200,0}|-{rel axis cs:0,0}) ({axis cs:200,0}|-{rel axis cs:0,1})};
% inflow
\draw[color=blue!40] ({axis cs:32,0}|-{rel axis cs:0,0}) -- ({axis cs:32,0}|-{rel axis cs:0,1});
\draw[color=blue!40] ({axis cs:398,0}|-{rel axis cs:0,0}) -- ({axis cs:398,0}|-{rel axis cs:0,1});
\draw[color=blue!40] ({axis cs:746,0}|-{rel axis cs:0,0}) -- ({axis cs:746,0}|-{rel axis cs:0,1});
% obstacle
\draw[color=gray!40] ({axis cs:100,0}|-{rel axis cs:0,0}) -- ({axis cs:100,0}|-{rel axis cs:0,1});
\draw[color=gray!40] ({axis cs:377,0}|-{rel axis cs:0,0}) -- ({axis cs:377,0}|-{rel axis cs:0,1});
\draw[color=gray!40] ({axis cs:654,0}|-{rel axis cs:0,0}) -- ({axis cs:654,0}|-{rel axis cs:0,1});
\draw[color=gray!40] ({axis cs:931,0}|-{rel axis cs:0,0}) -- ({axis cs:931,0}|-{rel axis cs:0,1});
\addlegendentry{Test}
\end{axis}
\end{tikzpicture}
\caption{Spatial error over time with changing obstacle size (gray) and inflow velocity (blue).}
\label{inflVelSteps}
\end{figure}
\section{Conclusion}
In conclusion, even a moderately sized recurrent neural network can learn some turbulent fluid dynamics. When guided by a low resolution simulation, the prediction quality remains stable over a long time.
Similar dynamics can be learned by operating in either the spatial or frequency domain, although best results where achieved with spatial input and frequency output.
In the training of a state-full RNN for this task, large windows and the use of batches are essential, with larger batches helping to accelerate the process.
The architecture can deal with changes to the scene if trained with them in mind at the cost of some visible loss in details and just augmenting it with these parameters as additional inputs is beneficial but not sufficient to alleviate this. \\
Overall, all the networks have trouble reproducing the smaller details of the flow and simply scaling up the approach is not possible.
%\subsection{Limitations}
The main limitation is that the internal state is far too small to preserve details in the scene. Since number of weights in the dense layers scales quadratic with their size, simply enlarging the state to the scene resolution is not feasible.
In particular, if frequency output is targeted, just upscaling the outputs from the inner layers already requires a large number of weights, since neither locality nor similarities can easily be exploited.
\subsection{Future Work}
There are a number of potential approaches and tweaks which circumvent the above mentioned problems. Instead of the usual LSTM units, convolutional LSTMs \cite{shi2015convolutional}, which are already implemented in tensorflow, could be employed. If one wants keep using an internal frequency representation, a wavelet basis might be desirable to preserve locality. \\
By dividing the scene into smaller tiles, each simulated by another instance of the same network, only local influences are taken into account, considerably reducing the required size. One difficulty would be to propagate changes from a tile to its neighbours in each step over a shared border.\\
Another way could be to more closely consider the physical model of eddies scattering into smaller ones in an incompressible fluid according to Kolmogorov's spectrum.
With this in mind, multiple units could be trained to each produce only a narrow frequency band given a band of lower frequencies. \\
With a look back at Section~\ref{sec:eva:objective}, the first attempt using a closed loop network does not have the problem of remembering details, as the full state is kept externally. The training process would need to be adjusted to keep the state stable and physically plausible over time. One way to do this could consist of incorporating the loop into the network during training, only providing a low resolution simulation as input and occasionally resetting this internal state to the actual high resolution simulation values.\\
Finally, all described approaches could profit from a more elaborate loss function as found in the training of generative adversarial networks. In addition to the generator producing the desired output, a second network, the discriminator, is trained to identify real and generated outputs. Such an approach has been applied successfully to generate a super-resolution fluid flow in \cite{xingjian2015convolutional}.
\bibliographystyle{ieeetr}
%\bibliographystyle{ACM-Reference-Format}
%\nocite{*}
\bibliography{references}
\appendix
\section{Example Networks}
\begin{figure*}
\includegraphics[height=\textheight]{imgs/model_full.pdf}
\caption{Convolutional network for full resolution inputs ("FullRes").}
\label{fullNet}
\end{figure*}
\begin{figure*}
\includegraphics[width=\textwidth]{imgs/model_allinps.pdf}
\caption{Network with spatial input, frequency output and additional scene information ("TrainObs+").}
\label{allInputsNet}
\end{figure*}
\end{document}
| {
"alphanum_fraction": 0.7396967058,
"avg_line_length": 74.6991869919,
"ext": "tex",
"hexsha": "5850b674f27e742bd9d09e3afe3c724b53d13c79",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "044729c39b77b0acf73071b15c5ddfa79cb1f9cb",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Thanduriel/NeuralTurbulence",
"max_forks_repo_path": "report/report.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "044729c39b77b0acf73071b15c5ddfa79cb1f9cb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Thanduriel/NeuralTurbulence",
"max_issues_repo_path": "report/report.tex",
"max_line_length": 703,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "044729c39b77b0acf73071b15c5ddfa79cb1f9cb",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Thanduriel/NeuralTurbulence",
"max_stars_repo_path": "report/report.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7819,
"size": 27564
} |
\subsection{Message signing}
| {
"alphanum_fraction": 0.75,
"avg_line_length": 6.4,
"ext": "tex",
"hexsha": "4e492baf3a8bed7f04593caa3d655b71e4eae665",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/statistics/encryptionAsymmetric/01-03-signing.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/statistics/encryptionAsymmetric/01-03-signing.tex",
"max_line_length": 28,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/statistics/encryptionAsymmetric/01-03-signing.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7,
"size": 32
} |
% ============
% = Concepts =
% ============
\clearpage
\section{Program Creation Concepts} % (fold)
\label{sec:program_creation_concepts}
Our first program is going to display some text to the Terminal. In this section you will be introduced to the programming artefacts and terminology you will need to use to create this program. This first step is important and will require you to have installed a C++ or Pascal compiler, see \cref{cha:building programs} \nameref{cha:building programs} for instructions.
A programming \textbf{artefact} is something that can be created and used within your code. In this chapter we will look at creating programs, and using a number of other artefacts. The following artefacts will be covered in this chapter:
\begin{itemize}
\item \nameref{sub:program}: A program is a sequence of instructions that when compiled creates an executable file that a user can run.
\item \nameref{sub:procedure}: A procedure is a named sequence of instructions that will get the computer to perform a task. When you want the task performed you can call the procedure.
\item \nameref{sub:library}: The program can use code from other Libraries. These libraries contain reusable Procedures and Types.
\item \nameref{sub:type}: A type defines how data is interpreted by the program. The programming language will support a number of basic types by default, and libraries can add other types.
\end{itemize}
In addition to these artefacts, you will need to understand some programming \textbf{terminology}. The following terms are discussed in this section:
\begin{itemize}
\item \nameref{sub:statement}: An \textbf{instruction} within the program.
\item \nameref{sub:expression}: A \textbf{value} used in a statement.
\item \nameref{sub:identifier}: The \textbf{name} of an artefact.
% \item Literal: A part of an \textbf{expression} where the value is entered directly into the code.
\end{itemize}
This section also introduces the following kinds of instructions. You can use these to get the computer to perform certain \textbf{actions} within your program.
\begin{itemize}
\item \nameref{sub:procedure call}: The instruction to run a procedure.
\end{itemize}
We can then use these concepts, artefacts, and instructions to create a program that will write some text to the Terminal as shown in Figure \ref{fig:program-creation-helloworld}.
\begin{figure}[h]
\centering
\includegraphics[width=0.8\textwidth]{./topics/program-creation/images/HelloWorld}
\caption[Hello World Terminal]{Hello World run from the Terminal}
\label{fig:program-creation-helloworld}
\end{figure}
\input{topics/program-creation/concepts/program}
\input{topics/program-creation/concepts/statement}
\input{topics/program-creation/concepts/procedure-call}
\input{topics/program-creation/concepts/procedure}
\input{topics/program-creation/concepts/expression}
\input{topics/program-creation/concepts/literal}
\input{topics/program-creation/concepts/type}
\input{topics/program-creation/concepts/identifier}
\input{topics/program-creation/concepts/library}
\input{topics/program-creation/concepts/comments}
\input{topics/program-creation/concepts/procedure-decl}
% section program_creation_concepts (end)
\clearpage
\subsection{Summary} % (fold)
\label{sub:program_creation_concepts_summary}
This section has introduced a number of programming artefacts, some programming terminology, and one kind of instruction. An overview of these concepts is shown in Figure \ref{fig:program-creation-summary}. The next section will look at how you can use these concepts to design some small programs.
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{./topics/program-creation/diagrams/Summary}
\caption[Chapter Concepts]{Key Concepts introduced in this Chapter}
\label{fig:program-creation-summary}
\end{figure}
\mynote{
\begin{itemize}
\item \textbf{Artefacts} are things you can \emph{create} and \emph{use}.
\item \textbf{Terms} are things you need to \emph{understand}.
\item \textbf{Actions} are things you can \emph{command} the computer to perform.
\end{itemize}
}
% subsection summary (end)
| {
"alphanum_fraction": 0.7809867629,
"avg_line_length": 54.6710526316,
"ext": "tex",
"hexsha": "64099189a1dadcbbf7ae380dbc67b09526720afb",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2022-03-24T07:42:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-02T03:18:37.000Z",
"max_forks_repo_head_hexsha": "8f3040983d420129f90bcc4bd69a96d8743c412c",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "macite/programming-arcana",
"max_forks_repo_path": "topics/program-creation/concepts/program-creation-concepts.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07",
"max_issues_repo_issues_event_max_datetime": "2021-12-29T19:45:10.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-29T19:45:10.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "thoth-tech/programming-arcana",
"max_issues_repo_path": "topics/program-creation/concepts/program-creation-concepts.tex",
"max_line_length": 370,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "thoth-tech/programming-arcana",
"max_stars_repo_path": "topics/program-creation/concepts/program-creation-concepts.tex",
"max_stars_repo_stars_event_max_datetime": "2021-08-10T04:50:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-10T04:50:54.000Z",
"num_tokens": 1023,
"size": 4155
} |
\chapter*{Acknowledgements}
Firstly, I want to thank my dissertation advisors, Pedro Salgueiro and Vítor
Beires Nogueira, for being patient with me, for their availability and for
their dedication in this project.
I want to thank my family who helped me finish this project, with their
unending support, words of wisdom and for always pushing me to do better.
I also want to thank my work colleagues, who always supported me, helped me
grow as a professional and person, challenged me to improve, and whom I
consider as a second family.
I need to thank my close friends for always believing in me and for always
being available when I needed the most, showing they are true friends.
Lastly, every person who supported me in some manner, I truly am grateful for
your support.
| {
"alphanum_fraction": 0.7961538462,
"avg_line_length": 41.0526315789,
"ext": "tex",
"hexsha": "c564437e09edd76f4cb877c0fa4edc30608edf91",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3ed0cf135399f8ac64a7c445880030cc9f4cc633",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "joaonsantos/MastersDegreeThesis",
"max_forks_repo_path": "thesisText/agradecimentos.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3ed0cf135399f8ac64a7c445880030cc9f4cc633",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "joaonsantos/MastersDegreeThesis",
"max_issues_repo_path": "thesisText/agradecimentos.tex",
"max_line_length": 77,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3ed0cf135399f8ac64a7c445880030cc9f4cc633",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "joaonsantos/MastersDegreeThesis",
"max_stars_repo_path": "thesisText/agradecimentos.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 172,
"size": 780
} |
\subsection{Machine learning models}
\label{sec:models}
The main goal of this work is to assert whether traditional machine learning models can ``separate'' documents according to BP citations. We will explore two approaches: unsupervised and supervised learning, with a focus on the latter.
Unsupervised learning means extracting patterns from data that is not labeled, \eg, our raw documents. Considering this, we will present the raw documents to some algorithms, without explaining which precedents are being cited, and we will check their outputs to see if there is some pattern.
We have vectors representing our texts as $X$ and cited BPs as $y$, the definition of supervised learning. With this in mind, we will also adjust several supervised models in this data.
\paragraph{Latent Dirichlet allocation.} Because we are dealing with texts, it is very convenient to experiment with latent Dirichlet allocation (LDA), the most common topic modeling technique. Assuming the existence of $K$ topics, each document has a distribution over the topics, and each topic has a distribution over the words. Mathematically, the formulation is:
\begin{itemize}
\item Each topic $k \in \{1, \cdots, K\}$ has distribution $\beta_k \sim \text{Dirichlet}(\eta)$ over the words;
\item Each document $d \in \{1, \cdots, D\}$ has distribution $\theta_d \sim \text{Dirichlet}(\alpha)$ over the topics;
\item Given a document $d$, the topics have distribution $z | \theta_d \sim \text{Multinomial}(\theta_d)$;
\item Given a topic $k$, the words have distribution $w | \beta_k \sim \text{Multinomial}(\beta_k)$.
\end{itemize}
The idea of using this model is to verify whether LDA can recover the topic of the BP being cited in a document. For example, if there were only two cited precedents on the dataset, one could fit LDA using two topics and verify if the most important words for each topic are representative of the precedents themselves. Even more, it is possible to verify if this topic-to-precedent assignment is good.
\paragraph{Truncated SVD dimensionality reduction.} We will reduce the dimensionality of TF-IDF vectors to visualize if they are of some kind separable. The dimensionality reduction technique will be the (truncated) singular value decomposition, already explained in \autoref{sec:document_embedding}. We will experiment with dimensionalities 2 and 3, which can be visualized on a 2-dimensional screen.
\paragraph{K-nearest neighbors.} As TF-IDF vectors lie in some vector space, the decision of which BP is being cited could be taken considering its neighbor's cited precedent. This is what the k-nearest neighbors (k-NN) model does. The number of neighbors (parameter $k$) is chosen by cross-validation.
\paragraph{Linear regression.} This model is already well known, but for regression. For classification, and when the target is binary, it is easy to fit a regression model using $y \in \{-1, 1\}$ and considering the predicted class as the prediction sign. For multiclass, it is fitted one regression per target, and the class with the highest value is chosen. We will also use Ridge regularization, and the hyperparameter is chosen by cross-validation.
Although using linear regression for classification is not that convenient, mainly because the output can't be interpreted as a probability, we can use this model to assert if our vectors are \textbf{linearly separable} in the very high dimensional space.
\paragraph{Logistic regression.} Logistic regression is like a linear regression for classification, so it is more suitable for our task. For adjusting the model for various classes, we will minimize the multinomial logistic regression loss \cite{bishop2006pattern}:
\[- \sum_{n=1}^N \sum_{k=1}^K y_{nk} \ln p_{nk},\]
$y_{nk}$ indicating that the sample $n$ belongs to class $k$, $p_{nk}$ the estimated probability of sample $n$ belonging to class $k$ (calculated with softmax of linear functions of $X_n$). We also experiment with $\ell^2$ regularization, with hyperparameter chosen by cross-validation.
\paragraph{Linear discriminant analysis.} Linear discriminant analysis fits a probability distribution for each class, considering the priori as the proportion of that class in the data and the distribution of data, given the class, as a multivariate Gaussian. The decision boundary is linear, so we can also assert the \textbf{linear separability} of the documents using this model.
\paragraph{Random forest.} From the decision tree-based models, we experiment with random forests. Just like an ordinary decision tree, but many of them aggregated, each considering a bootstrap sample of the dataset and also a sample of the predictors. For a regression task, we have learned that, when the decision space is divided into various nodes, the output of the model is the mean of the samples inside that node.
It is natural to extend regression decision trees to classification decision trees. For example, with the already adjusted model, the predicted class is the class with more samples inside the node, and the probabilities are the class sample proportions.
There are some hyperparameters to be chosen, \eg, depth of the trees and number of trees. These are chosen using cross-validation.
\paragraph{Support vector machines.} Support vector machines (SVM) are very powerful, even having a simple mathematical formulation. Intuitively, they fit an optimal hyperplane dividing (a transformation of) the dataset, but also allowing some points to not obey this restriction. The optimization problem is:
\[\begin{aligned}
\min_{w, b, \zeta} \quad & \frac{1}{2}w^\intercal w + C \sum_{i = 1}^{N}{\zeta_i} \\
\textrm{s.t.} \quad & y_i (w^\intercal \phi(x_i) + b) \ge 1 - \zeta_i \\
& \zeta_i \ge 0, \ \ i = 1, \cdots, n. \\
\end{aligned}\]
The regularization $C$ parameter is chosen by cross-validation. The kernel function also is chosen by CV, between linear and radial basis function (RBF), so as the $\gamma$ hyperparameter of RBF.
| {
"alphanum_fraction": 0.7583576761,
"avg_line_length": 128.375,
"ext": "tex",
"hexsha": "66d6cfce5e7b6068c145ee897bff0d4abd0b9029",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fd038632bf5c5d58a3e7ccf939b27b3d71ae648a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lucasresck/machine-learning",
"max_forks_repo_path": "a2_assignment/models.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fd038632bf5c5d58a3e7ccf939b27b3d71ae648a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lucasresck/machine-learning",
"max_issues_repo_path": "a2_assignment/models.tex",
"max_line_length": 457,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fd038632bf5c5d58a3e7ccf939b27b3d71ae648a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lucasresck/machine-learning",
"max_stars_repo_path": "a2_assignment/models.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1418,
"size": 6162
} |
\section{0Competitive}
\subsection{ TemplateCpp}
\raggedbottom\lstinputlisting[style=cpp]{0Competitive/TemplateCpp.cpp}
\hrulefill
\subsection{ TemplatePy}
\raggedbottom\lstinputlisting[style=py]{0Competitive/TemplatePy.py}
\hrulefill
\section{Arrays}
\subsection{ Combination}
\raggedbottom\lstinputlisting[style=py]{Arrays/Combination.py}
\hrulefill
\subsection{ Kadane}
\raggedbottom\lstinputlisting[style=cpp]{Arrays/Kadane.cpp}
\hrulefill
\subsection{ MapFunctions}
\raggedbottom\lstinputlisting[style=py]{Arrays/MapFunctions.py}
\hrulefill
\subsection{ Operations}
\raggedbottom\lstinputlisting[style=py]{Arrays/Operations.py}
\hrulefill
\subsection{ PermutationCPP}
\raggedbottom\lstinputlisting[style=cpp]{Arrays/PermutationCPP.cpp}
\hrulefill
\subsection{ PermutationPYTHON}
\raggedbottom\lstinputlisting[style=py]{Arrays/PermutationPYTHON.py}
\hrulefill
\section{BasicOperations}
\subsection{ Exponentiation}
\raggedbottom\lstinputlisting[style=cpp]{BasicOperations/Exponentiation.cpp}
\hrulefill
\subsection{ SumArray}
\raggedbottom\lstinputlisting[style=cpp]{BasicOperations/SumArray.cpp}
\hrulefill
\section{Combinatory}
\subsection{ BinomialCPP}
\raggedbottom\lstinputlisting[style=cpp]{Combinatory/BinomialCPP.cpp}
\hrulefill
\subsection{ BinomialPYTHON}
\raggedbottom\lstinputlisting[style=py]{Combinatory/BinomialPYTHON.py}
\hrulefill
\section{Geometry}
\subsection{ Astruct}
\raggedbottom\lstinputlisting[style=cpp]{Geometry/Astruct.cpp}
\hrulefill
\subsection{ CircleCenter}
\raggedbottom\lstinputlisting[style=cpp]{Geometry/CircleCenter.cpp}
\hrulefill
\subsection{ ConvexHull}
\raggedbottom\lstinputlisting[style=cpp]{Geometry/ConvexHull.cpp}
\hrulefill
\subsection{ EulerFormule}
\raggedbottom\lstinputlisting[style=cpp]{Geometry/EulerFormule.cpp}
\hrulefill
\subsection{ Line2Point}
\raggedbottom\lstinputlisting[style=cpp]{Geometry/Line2Point.cpp}
\hrulefill
\subsection{ LineIntersect2}
\raggedbottom\lstinputlisting[style=cpp]{Geometry/LineIntersect2.cpp}
\hrulefill
\subsection{ PickTheorem}
\raggedbottom\lstinputlisting[style=cpp]{Geometry/PickTheorem.cpp}
\hrulefill
\subsection{ PolygonArea}
\raggedbottom\lstinputlisting[style=cpp]{Geometry/PolygonArea.cpp}
\hrulefill
\subsection{ RayCasting}
\raggedbottom\lstinputlisting[style=cpp]{Geometry/RayCasting.cpp}
\hrulefill
\subsection{ Segment2Point}
\raggedbottom\lstinputlisting[style=cpp]{Geometry/Segment2Point.cpp}
\hrulefill
\section{Graphs}
\subsection{BestPath BellmanFord}
\raggedbottom\lstinputlisting[style=cpp]{Graphs/BestPath/BellmanFord.cpp}
\hrulefill
\subsection{BestPath Dijkstra}
\raggedbottom\lstinputlisting[style=cpp]{Graphs/BestPath/Dijkstra.cpp}
\hrulefill
\subsection{BestPath DijkstraHeap}
\raggedbottom\lstinputlisting[style=cpp]{Graphs/BestPath/DijkstraHeap.cpp}
\hrulefill
\subsection{BestPath FloydWarshal}
\raggedbottom\lstinputlisting[style=cpp]{Graphs/BestPath/FloydWarshal.cpp}
\hrulefill
\subsection{Traverse BFS}
\raggedbottom\lstinputlisting[style=cpp]{Graphs/Traverse/BFS.cpp}
\hrulefill
\subsection{Traverse DFS}
\raggedbottom\lstinputlisting[style=cpp]{Graphs/Traverse/DFS.cpp}
\hrulefill
\section{Math}
\subsection{Matrix GaussianElimination}
\raggedbottom\lstinputlisting[style=cpp]{Math/Matrix/GaussianElimination.cpp}
\hrulefill
\subsection{NumberSystems ChangeBases}
\raggedbottom\lstinputlisting[style=cpp]{Math/NumberSystems/ChangeBases.cpp}
\hrulefill
\subsection{NumberSystems ChangeBases}
\raggedbottom\lstinputlisting[style=py]{Math/NumberSystems/ChangeBases.py}
\hrulefill
\subsection{NumberTheory DivisorsCPP}
\raggedbottom\lstinputlisting[style=cpp]{Math/NumberTheory/DivisorsCPP.cpp}
\hrulefill
\subsection{NumberTheory DivisorsPYTHON}
\raggedbottom\lstinputlisting[style=py]{Math/NumberTheory/DivisorsPYTHON.py}
\hrulefill
\subsection{NumberTheory GCD_LCM}
\raggedbottom\lstinputlisting[style=cpp]{Math/NumberTheory/GCD_LCM.cpp}
\hrulefill
\subsection{NumberTheory Josephus}
\raggedbottom\lstinputlisting[style=cpp]{Math/NumberTheory/Josephus.cpp}
\hrulefill
\subsection{Polynomial HornersRule}
\raggedbottom\lstinputlisting[style=cpp]{Math/Polynomial/HornersRule.cpp}
\hrulefill
\subsection{Pow FastPow}
\raggedbottom\lstinputlisting[style=cpp]{Math/Pow/FastPow.cpp}
\hrulefill
\section{NP_Problem}
\subsection{ Knapsack}
\raggedbottom\lstinputlisting[style=cpp]{NP_Problem/Knapsack.cpp}
\hrulefill
\section{Primes}
\subsection{ Factorize}
\raggedbottom\lstinputlisting[style=cpp]{Primes/Factorize.cpp}
\hrulefill
\subsection{ IsPrime}
\raggedbottom\lstinputlisting[style=java]{Primes/IsPrime.java}
\hrulefill
\subsection{ MillerTest}
\raggedbottom\lstinputlisting[style=cpp]{Primes/MillerTest.cpp}
\hrulefill
\subsection{ PollarRhoCPP}
\raggedbottom\lstinputlisting[style=cpp]{Primes/PollarRhoCPP.cpp}
\hrulefill
\subsection{ PollarRhoPYTHON}
\raggedbottom\lstinputlisting[style=py]{Primes/PollarRhoPYTHON.py}
\hrulefill
\subsection{ PrimalyTest}
\raggedbottom\lstinputlisting[style=cpp]{Primes/PrimalyTest.cpp}
\hrulefill
\subsection{ Sieve}
\raggedbottom\lstinputlisting[style=cpp]{Primes/Sieve.cpp}
\hrulefill
\section{Probability}
\subsection{ ComposedProbability}
\raggedbottom\lstinputlisting[style=cpp]{Probability/ComposedProbability.cpp}
\hrulefill
\section{Search}
\subsection{ BinarySearch}
\raggedbottom\lstinputlisting[style=cpp]{Search/BinarySearch.cpp}
\hrulefill
\section{Sequences}
\subsection{ MatrixFibo}
\raggedbottom\lstinputlisting[style=cpp]{Sequences/MatrixFibo.cpp}
\hrulefill
\section{Snippets}
\subsection{ Assert}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Assert.cpp}
\hrulefill
\subsection{ CompareDoubles}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/CompareDoubles.cpp}
\hrulefill
\subsection{ For}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/For.cpp}
\hrulefill
\subsection{ Foreach}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Foreach.cpp}
\hrulefill
\subsection{ FreOpen}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/FreOpen.cpp}
\hrulefill
\subsection{ IsOdd}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/IsOdd.cpp}
\hrulefill
\subsection{ Show}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Show.cpp}
\hrulefill
\subsection{ Size}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Size.cpp}
\hrulefill
\subsection{ StringStream}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/StringStream.cpp}
\hrulefill
\subsection{ StructPriorityQueue}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/StructPriorityQueue.cpp}
\hrulefill
\subsection{ Swap}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Swap.cpp}
\hrulefill
\subsection{ Time}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Time.cpp}
\hrulefill
\subsection{ toBin}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/toBin.cpp}
\hrulefill
\subsection{ UpperLowerBound}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/UpperLowerBound.cpp}
\hrulefill
\subsection{Utilities ArrayPointers}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/ArrayPointers.cpp}
\hrulefill
\subsection{Utilities ClassPointers}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/ClassPointers.cpp}
\hrulefill
\subsection{Utilities CommaOperator}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/CommaOperator.cpp}
\hrulefill
\subsection{Utilities Debug}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/Debug.cpp}
\hrulefill
\subsection{Utilities Directives1}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/Directives1.cpp}
\hrulefill
\subsection{Utilities Directives2}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/Directives2.cpp}
\hrulefill
\subsection{Utilities Namespace1}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/Namespace1.cpp}
\hrulefill
\subsection{Utilities Namespace2}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/Namespace2.cpp}
\hrulefill
\subsection{Utilities PointersDeclaration}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/PointersDeclaration.cpp}
\hrulefill
\subsection{Utilities PredefinedMacros}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/PredefinedMacros.cpp}
\hrulefill
\subsection{Utilities Template}
\raggedbottom\lstinputlisting[style=cpp]{Snippets/Utilities/Template.cpp}
\hrulefill
\section{Sorting}
\subsection{BubbleSort Bubble}
\raggedbottom\lstinputlisting[style=cpp]{Sorting/BubbleSort/Bubble.cpp}
\hrulefill
\subsection{InsertionSort InsertionSortCPP}
\raggedbottom\lstinputlisting[style=cpp]{Sorting/InsertionSort/InsertionSortCPP.cpp}
\hrulefill
\subsection{InsertionSort InsertionSortPYTHON}
\raggedbottom\lstinputlisting[style=py]{Sorting/InsertionSort/InsertionSortPYTHON.py}
\hrulefill
\subsection{MergeSort MergeSortCPP}
\raggedbottom\lstinputlisting[style=cpp]{Sorting/MergeSort/MergeSortCPP.cpp}
\hrulefill
\subsection{MergeSort MergeSortPY}
\raggedbottom\lstinputlisting[style=py]{Sorting/MergeSort/MergeSortPY.py}
\hrulefill
\subsection{SelectionSort SelectionSortCPP}
\raggedbottom\lstinputlisting[style=cpp]{Sorting/SelectionSort/SelectionSortCPP.cpp}
\hrulefill
\subsection{SelectionSort SelectionSortPYTHON}
\raggedbottom\lstinputlisting[style=py]{Sorting/SelectionSort/SelectionSortPYTHON.py}
\hrulefill
\subsection{ StandardSort}
\raggedbottom\lstinputlisting[style=cpp]{Sorting/StandardSort.cpp}
\hrulefill
\section{Strings}
\subsection{ FunctionsOverChart}
\raggedbottom\lstinputlisting[style=cpp]{Strings/FunctionsOverChart.cpp}
\hrulefill
\subsection{ KMP}
\raggedbottom\lstinputlisting[style=cpp]{Strings/KMP.cpp}
\hrulefill
\subsection{ LCI}
\raggedbottom\lstinputlisting[style=cpp]{Strings/LCI.cpp}
\hrulefill
\subsection{ LCS}
\raggedbottom\lstinputlisting[style=cpp]{Strings/LCS.cpp}
\hrulefill
\subsection{ Palindrome}
\raggedbottom\lstinputlisting[style=cpp]{Strings/Palindrome.cpp}
\hrulefill
\subsection{ Regex}
\raggedbottom\lstinputlisting[style=cpp]{Strings/Regex.cpp}
\hrulefill
\subsection{ Split}
\raggedbottom\lstinputlisting[style=cpp]{Strings/Split.cpp}
\hrulefill
\subsection{ SuffixArray}
\raggedbottom\lstinputlisting[style=cpp]{Strings/SuffixArray.cpp}
\hrulefill
\section{Structures}
\subsection{ BinaryTree}
\raggedbottom\lstinputlisting[style=cpp]{Structures/BinaryTree.cpp}
\hrulefill
\subsection{ DisjointSets}
\raggedbottom\lstinputlisting[style=cpp]{Structures/DisjointSets.cpp}
\hrulefill
\subsection{ FenwickTree}
\raggedbottom\lstinputlisting[style=cpp]{Structures/FenwickTree.cpp}
\hrulefill
\subsection{ Kruskals}
\raggedbottom\lstinputlisting[style=cpp]{Structures/Kruskals.cpp}
\hrulefill
\subsection{ MaxFlow}
\raggedbottom\lstinputlisting[style=cpp]{Structures/MaxFlow.cpp}
\hrulefill
\subsection{ MaxMinPHeap}
\raggedbottom\lstinputlisting[style=cpp]{Structures/MaxMinPHeap.cpp}
\hrulefill
\subsection{ Prim}
\raggedbottom\lstinputlisting[style=cpp]{Structures/Prim.cpp}
\hrulefill
\subsection{ RecoveryTree}
\raggedbottom\lstinputlisting[style=cpp]{Structures/RecoveryTree.cpp}
\hrulefill
\subsection{ SegmentTree}
\raggedbottom\lstinputlisting[style=cpp]{Structures/SegmentTree.cpp}
\hrulefill
\subsection{ Trie}
\raggedbottom\lstinputlisting[style=cpp]{Structures/Trie.cpp}
\hrulefill
| {
"alphanum_fraction": 0.8379207653,
"avg_line_length": 33.5787878788,
"ext": "tex",
"hexsha": "fefdc50ef3368c5cd4dd6e43f77b590e334b7201",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "620f1eb9ce54fc05a923e087ef1b130c98251b60",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "xdanielsb/MarathonBook",
"max_forks_repo_path": "contentNotebook.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "620f1eb9ce54fc05a923e087ef1b130c98251b60",
"max_issues_repo_issues_event_max_datetime": "2017-03-06T03:24:27.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-02-21T01:00:51.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "xdanielsb/MarathonBook",
"max_issues_repo_path": "contentNotebook.tex",
"max_line_length": 85,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "620f1eb9ce54fc05a923e087ef1b130c98251b60",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "xdanielsb/Marathon-book",
"max_stars_repo_path": "contentNotebook.tex",
"max_stars_repo_stars_event_max_datetime": "2018-04-06T19:51:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-01-15T04:59:55.000Z",
"num_tokens": 3342,
"size": 11081
} |
% -------------------------------------------------------- %
% DEQueue Lock
% by: Isai Barajas Cicourel
% -------------------------------------------------------- %
% Document Start
\section{\textbf{Double Ended Queue}}
% -------------------------------------------------------- %
% Particular Caes
\subsection{Particular Case}
\par
In this experiment each thread keeps a pool of tasks waiting to be executed in the form of a double-ended queue (DEQueue), providing \textit{pushBottom()}, \textit{popBottom()}, and \textit{popTop()} methods.
\par
% -------------------------------------------------------- %
% Solution Information
\subsection{Solution}
\par
According to the theory when a thread creates a new task, it calls\textit{ pushBottom()} to push that task onto its \textit{DEQueue}. When a thread needs a task to work on, it calls \textit{popBottom()} to remove a task from its own \textit{DEQueue}. If the thread discovers its queue is empty, then it becomes a thief, it chooses a victim thread at random, and calls that thread’s DEQueue’s \textit{popTop()} method to steal a task for itself.
\par
\begin{lstlisting}[frame=single,breaklines=true]
public Runnable popTop() {
int[] stamp = new int[1];
int oldTop = top.get(stamp), newTop = oldTop + 1;
int oldStamp = stamp[0], newStamp = oldStamp + 1;
if (bottom <= oldTop) // empty
return null;
Runnable r= tasks[oldTop];
if (top.compareAndSet(oldTop, newTop, oldStamp, newStamp))
return r;
return null;
}
\end{lstlisting}
% -------------------------------------------------------- %
% Experiment
\subsection{Experiment Description}
\par
The test creates $16$ threads, eight that need to be coordinate in order to \textit{pushBottom()} and \textit{popBottom()} an array of values. All threads have to cooperate to add and remove elements from the queue and eight \textit{Dummy()} threads will be stealing execution from the running ones. After each thread execution, if everything works according to the test the map will have all values set to true.
If that is not the case, a duplicate or missing fail will be raised.
\par
% -------------------------------------------------------- %
% Results
\subsection{Observations and Interpretations}
\par
The tests executed as expected and no errors where found.
\begin{lstlisting}[frame=single,breaklines=true]
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.162 sec
------------- Standard Output ---------------
sequential pushBottom and popBottom
concurrent pushBottom and popBottom
------------- ---------------- ---------------
test-single:
BUILD SUCCESSFUL (total time: 2 seconds)
\end{lstlisting}
| {
"alphanum_fraction": 0.6223958333,
"avg_line_length": 36.8219178082,
"ext": "tex",
"hexsha": "02d71021e1e7e359a5c1c93ae0341f37f50d3580",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f27d4dd6f44172bb6c910552e50107838d653f2f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rzavalet/multiprocessor",
"max_forks_repo_path": "isai/Multiprocessor/sections/DEQueueTest.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f27d4dd6f44172bb6c910552e50107838d653f2f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rzavalet/multiprocessor",
"max_issues_repo_path": "isai/Multiprocessor/sections/DEQueueTest.tex",
"max_line_length": 444,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f27d4dd6f44172bb6c910552e50107838d653f2f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rzavalet/multiprocessor",
"max_stars_repo_path": "isai/Multiprocessor/sections/DEQueueTest.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 618,
"size": 2688
} |
\section{Academic}
\cventry{05/13 -- 11/16}{PhD in Computing Science}{Simon Fraser University}{British Columbia, Canada}{\textit{4.0/4.0}}
{Supervisor: Dr. Arrvindh Shriraman \newline{}
-- Adapted program analysis techniques to understand what to specialize in a workload. \newline{}
-- Designed an abstraction for partial specialization of workloads. \newline{}
-- Implemented automated, scalable characterization and program transformation tools in LLVM. \newline{}
-- Designed and evaluated a hybrid coherence protocol for accelerator rich architectures. \newline{}
-- Designed and evaluated a hardware accelerator for software data structures. \newline{}
Publications: \textbf{HPCA'17, IISWC'16, MICRO'16, ICS'16, ISCA'15, ICS'15}}
\vspace{9pt}
\cventry{01/11 -- 04/13}{MSc in Computing Science}{Simon Fraser University}{British Columbia, Canada}{\textit{3.8/4.0}}
{Supervisor: Dr. Arrvindh Shriraman \newline{}
-- Designed and evaluated a variable granularity cache memory system. \newline{}
-- Evaluated a variable granularity coherence protocol for multiprocessor systems. \newline{}
Publications: \textbf{ISCA'13, MICRO'12}}
\vspace{9pt}
\cventry{08/06 -- 04/10}{B. Tech in Computer Engineering}{Biju Patnaik University of Technology}{Orissa, India}{\textit{8.3/10.0}}{Supervisor: Dr. Satyananda Champati Rai}
| {
"alphanum_fraction": 0.7663903542,
"avg_line_length": 53.08,
"ext": "tex",
"hexsha": "50aa2d08a92b29f5219de173820eb1a2d819232e",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2020-12-20T13:00:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-06-10T10:47:16.000Z",
"max_forks_repo_head_hexsha": "2537e87b8186846b59e2066422de9baa6c369de0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "snehasish/cv",
"max_forks_repo_path": "tex/cv/02.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2537e87b8186846b59e2066422de9baa6c369de0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "snehasish/cv",
"max_issues_repo_path": "tex/cv/02.tex",
"max_line_length": 171,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "2537e87b8186846b59e2066422de9baa6c369de0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "snehasish/cv",
"max_stars_repo_path": "tex/cv/02.tex",
"max_stars_repo_stars_event_max_datetime": "2019-06-19T13:34:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-06-19T13:34:09.000Z",
"num_tokens": 356,
"size": 1327
} |
\chapter{Summary and Conclusion}
\label{chap:summary-and-conclusion}
In this chapter, we will summarize how we performed the analysis of word embeddings and our findings from \cref{chap:analysis-of-word-embeddings} in \cref{sec:summary}. Following, we will conclude the thesis in \cref{sec:conclusion}.
% Include sections
\input{chapters/summary_and_conclusion/summary}
\input{chapters/summary_and_conclusion/conclusion} | {
"alphanum_fraction": 0.8171021378,
"avg_line_length": 60.1428571429,
"ext": "tex",
"hexsha": "aee6d3b53f4a82fbccec8df742b3997762781238",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-02-05T18:05:56.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-05T18:05:56.000Z",
"max_forks_repo_head_hexsha": "78b3c971e4ffd51875d19bb2cb4b75e9d7be905c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "JonasTriki/masters-thesis-ml",
"max_forks_repo_path": "thesis/chapters/summary_and_conclusion/summary_and_conclusion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "78b3c971e4ffd51875d19bb2cb4b75e9d7be905c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JonasTriki/masters-thesis-ml",
"max_issues_repo_path": "thesis/chapters/summary_and_conclusion/summary_and_conclusion.tex",
"max_line_length": 233,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "78b3c971e4ffd51875d19bb2cb4b75e9d7be905c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "JonasTriki/masters-thesis-ml",
"max_stars_repo_path": "thesis/chapters/summary_and_conclusion/summary_and_conclusion.tex",
"max_stars_repo_stars_event_max_datetime": "2021-07-31T03:51:32.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-05-24T08:05:12.000Z",
"num_tokens": 106,
"size": 421
} |
\documentclass[journal]{vgtc} % final (journal style)
%\let\ifpdf\relax
%\documentclass[review,journal]{vgtc} % review (journal style)
%\documentclass[widereview]{vgtc} % wide-spaced review
%\documentclass[preprint,journal]{vgtc} % preprint (journal style)
%% Uncomment one of the lines above depending on where your paper is
%% in the conference process. ``review'' and ``widereview'' are for review
%% submission, ``preprint'' is for pre-publication, and the final version
%% doesn't use a specific qualifier.
%% Please use one of the ``review'' options in combination with the
%% assigned online id (see below) ONLY if your paper uses a double blind
%% review process. Some conferences, like IEEE Vis and InfoVis, have NOT
%% in the past.
%% Please use the ``preprint'' option when producing a preprint version
%% for sharing your article on an open access repository
%% Please note that the use of figures other than the optional teaser is not permitted on the first page
%% of the journal version. Figures should begin on the second page and be
%% in CMYK or Grey scale format, otherwise, colour shifting may occur
%% during the printing process. Papers submitted with figures other than the optional teaser on the
%% first page will be refused. Also, the teaser figure should only have the
%% width of the abstract as the template enforces it.
%% These few lines make a distinction between latex and pdflatex calls and they
%% bring in essential packages for graphics and font handling.
%% Note that due to the \DeclareGraphicsExtensions{} call it is no longer necessary
%% to provide the the path and extension of a graphics file:
%% \includegraphics{diamondrule} is completely sufficient.
%%
\ifpdf% % if we use pdflatex
\pdfoutput=1\relax % create PDFs from pdfLaTeX
\pdfcompresslevel=9 % PDF Compression
\pdfoptionpdfminorversion=7 % create PDF 1.7
\ExecuteOptions{pdftex}
\usepackage{graphicx} % allow us to embed graphics files
\DeclareGraphicsExtensions{.pdf,.png,.jpg,.jpeg} % for pdflatex we expect .pdf, .png, or .jpg files
\else% % else we use pure latex
\ExecuteOptions{dvips}
\usepackage{graphicx} % allow us to embed graphics files
\DeclareGraphicsExtensions{.eps} % for pure latex we expect eps files
\fi%
%% it is recomended to use ``\autoref{sec:bla}'' instead of ``Fig.~\ref{sec:bla}''
\graphicspath{{figures/}{pictures/}{images/}{./}} % where to search for the images
\usepackage{microtype} % use micro-typography (slightly more compact, better to read)
\PassOptionsToPackage{warn}{textcomp} % to address font issues with \textrightarrow
\usepackage{textcomp} % use better special symbols
\usepackage{mathptmx} % use matching math font
\usepackage{times} % we use Times as the main font
\renewcommand*\ttdefault{txtt} % a nicer typewriter font
%\usepackage{cite} % needed to automatically sort the references
\usepackage{tabu} % only used for the table example
\usepackage{booktabs} % only used for the table example
\usepackage[numbers]{natbib} % for citations
\usepackage{anyfontsize} %
%% We encourage the use of mathptmx for consistent usage of times font
%% throughout the proceedings. However, if you encounter conflicts
%% with other math-related packages, you may want to disable it.
%% In preprint mode you may define your own headline. If not, the default IEEE copyright message will appear in preprint mode.
%\preprinttext{To appear in IEEE Transactions on Visualization and Computer Graphics.}
%% In preprint mode, this adds a link to the version of the paper on IEEEXplore
%% Uncomment this line when you produce a preprint version of the article
%% after the article receives a DOI for the paper from IEEE
%\ieeedoi{xx.xxxx/TVCG.201x.xxxxxxx}
%% If you are submitting a paper to a conference for review with a double
%% blind reviewing process, please replace the value ``0'' below with your
%% OnlineID. Otherwise, you may safely leave it at ``0''.
\onlineid{0}
%% declare the category of your paper, only shown in review mode
\vgtccategory{Research}
%% please declare the paper type of your paper to help reviewers, only shown in review mode
%% choices:
%% * algorithm/technique
%% * application/design study
%% * evaluation
%% * system
%% * theory/model
\vgtcpapertype{$vgtcpapertype$}
%% Paper title.
\title{$title$}
%% This is how authors are specified in the journal style
%% indicate IEEE Member or Student Member in form indicated below
\author{$authors$}
\authorfooter{
%% insert punctuation at end of each item
\item{$author_footer1$}
\item{$author_footer2$}
\item{$author_footer3$}
}
%other entries to be set up for journal
\shortauthortitle{$lead_author$ \MakeLowercase{\textit{et al.}}: $shorttitle$}
%\shortauthortitle{Firstauthor \MakeLowercase{\textit{et al.}}: Paper Title}
%% Abstract section.
\abstract{$abstract$}
%% Keywords that describe your work. Will show as 'Index Terms' in journal
%% please capitalize first letter and insert punctuation after last keyword
\keywords{$index_terms$}
%% ACM Computing Classification System (CCS).
%% See <http://www.acm.org/class/1998/> for details.
%% The ``\CCScat'' command takes four arguments.
\CCScatlist{ % not used in journal version
\CCScat{$ACM_classification_code$}{$ACM_class_title$}%
}
%% A teaser figure can be included as follows
%\teaser{
% \centering
% \includegraphics[width=\linewidth]{CypressView}
% \caption{$intro_figure_caption$}
% \label{fig:teaser}
%}
%% Uncomment below to disable the manuscript note
%\renewcommand{\manuscriptnotetxt}{}
%% Copyright space is enabled by default as required by guidelines.
%% It is disabled by the 'review' option or via the following command:
% \nocopyrightspace
\vgtcinsertpkg
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%% START OF THE PAPER %%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
%% The ``\maketitle'' command must be the first command after the
%% ``\begin{document}'' command. It prepares and prints the title block.
%% the only exception to this rule is the \firstsection command
\firstsection{Introduction}
\maketitle
%% \section{Introduction} %for journal use above \firstsection{..} instead
{$introduction$}
$body$
\setlength{\bibsep}{0.0pt}
%\bibliographystyle{bib_styles/abbrv}
\bibliographystyle{bib_styles/abbrvnatdoi}
%\bibliographystyle{bib_styles/abbrv-doi}
%\bibliographystyle{bib_styles/abbrv-doi-narrow}
%\bibliographystyle{bib_styles/abbrv-doi-hyperref}
%\bibliographystyle{bib_styles/abbrv-doi-hyperref-narrow}
{\fontsize{8pt}{9.6pt}\selectfont \bibliography{bibliography}}
\end{document}
| {
"alphanum_fraction": 0.7016590389,
"avg_line_length": 40.8888888889,
"ext": "tex",
"hexsha": "50f42a5c13403894b3f7c3e5608126f813066764",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-02-11T12:30:58.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-11T12:11:51.000Z",
"max_forks_repo_head_hexsha": "f8d86d659478ae817b9f22b415cc76b1f5114e4e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "duncanbradley/position_magnitude",
"max_forks_repo_path": "template.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "f8d86d659478ae817b9f22b415cc76b1f5114e4e",
"max_issues_repo_issues_event_max_datetime": "2022-02-11T14:57:48.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-02-11T14:57:48.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "duncanbradley/position_magnitude",
"max_issues_repo_path": "template.tex",
"max_line_length": 126,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f8d86d659478ae817b9f22b415cc76b1f5114e4e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "duncanbradley/position_magnitude",
"max_stars_repo_path": "template.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1724,
"size": 6992
} |
%-------------------------------------------------------------------------------
% Kyle Westfall
% [email protected]
% UCO/Lick Observatory
% University of California, Santa Cruz
% 1156 High St.
% Santa Cruz, CA 95064
% USA
%
% SUBMITTED: 3 Jan 2019
%-------------------------------------------------------------------------------
\documentclass[onecolumn,floatfix,tighten]{aastex62}
\bibliographystyle{aasjournal}
% Units
%\input{shorthands}
% Some fancy commenting
\definecolor{todo}{RGB}{200,0,0}
\newcommand{\comment}[2][todo]{{\color{#1}[[{\bf #2}]]}}
\received{}
\revised{}
\accepted{}
\submitjournal{AJ}
%\shortauthors{Westfall et al.}
%\shorttitle{SDSS-IV/MaNGA Data Analysis Pipeline: Overview}
\begin{document}
\title{ FIDDLES: Fibers in DEIMOS demonstrating light extraction and stability }
%\correspondingauthor{Kyle B. Westfall}
%\email{[email protected]}
%
%\author[0000-0003-1809-6920]{Kyle B. Westfall}
%\affiliation{University of California Observatories, University of California, Santa Cruz, 1156 High St., Santa Cruz, CA 95064, USA}
\section{Motivation}
We want to test the performance of coupling of a fiber imaging system to
the focal plane of a slow (f/15) telescope in preparation for building
FOBOS for Keck.
Originally we proposed to do this by constructing and testing a
test-bench assembly at UCO and then designing a system that can be
slotted into the mask holder in DEIMOS. However, there will be very
limited time that we can perform this experiment at Keck, and we
would like to have observations from another telescope as a relative
comparison; e.g., how much does our understanding of the focal plane
affect our ability to couple fibers to it. APF would be good for
this, if we can get some engineering time on it.
\section{Signal-to-Noise Calculation}
I have done some initial S/N calculations for the fiber near-field
profile as measured by FIDDLES mounted on both Lick/APF and
MaunaKea/Keck.
\bigskip
\noindent {\bf Inputs \& Assumptions:}
\begin{itemize}
\item For the source, I use a reference spectrum with a constant AB
magnitude (i.e., constant $F_\nu$) normalized to a given $g$-band
magnitude. I adopt the ``no atmosphere'' response functions for the
$g$-band filter for SDSS taken from
\url{http://www.sdss.org/wp-content/uploads/2017/04/filter_curves.fits}.
\item I assume we take both on- and off-source (sky-only)
observations, where the source is a point source with a Gaussian
point-spread function. These could be dithered or simultaneous;
however, I have also assumed that I can perfectly subtract the
off-source flux from the the on-source observation (see below).
\item The Maunakea sky spectrum is approximated by taking an observed
dark-sky spectrum observed by MaNGA and rescaling it to match a
DEIMOS dark-sky spectrum over the same wavelength range. The dark-sky
surface brightness of the spectrum is $\mu_g$ = 22.14 mag/arcsec$^2$.
\item For bright-sky conditions, I just scale the dark-sky spectrum
up by 3 mag. This is not correct in that the bright-sky spectrum is
certainly {\bf not} just a scaled up version of the dark-sky
spectrum. However, this was the simplest thing to do for now. Scaling
up by 3 mag may also be a bit extreme.
\item I adopt the Maunakea atmospheric transmission curve used in the
IDL-based Keck ETCs.
\item I adopt the same sky emission and attenuation for both Lick and
Maunakea; light pollution is more significant at Lick meaning that
the S/N estimates at Lick are likely overestimates, at least in dark
conditions.
\item I assume the FIDDLES system performs identically at APF and
Keck.
\item I assume the fiber perfectly scrambles the light in the output
near-field.
\item I assume the off-source near-field image can be perfectly
subtracted from the on-source image.
\item I assume there is no scattered light in the system.
\item The telescope properties relevant to the calculation that I
assumed are in Table 1. The telescope-specific and generic properties
of FIDDLES are provided in Table 2.
\item I assume microlens foreoptics convert the telescope f/15 beam
to f/3 for input to the fiber, that there are no coupling losses, and
that there is no focal-ratio degradation.
\item The fiber attenuation is taken from a WFOS spreadsheet with a
10m fiber run of a Polymicro fiber.
\item I assume the near-field image is a perfectly in focus with no
image-quality degradation by the imaging optics or camera.
\item I use the measurements of the detector properties provided by
Molly for the test-bench camera.
\item I adopt the quantum efficiency curves from
\url{https://www.thorlabs.com/newgrouppage9.cfm?objectgroup_id=7900}
for their 4-megapixel monochrome camera.
\end{itemize}
\noindent {\bf S/N calculation comments:}
\begin{itemize}
\item The detector QE and filter are killer efficiency hits. We
probably don't care for this experiment, though. I thought
astro-grade detectors had efficiencits of more like 90\%, and it's
worth noting Thorlabs has filters that are ~400 \AA\ wide with peak
efficiencies of $\sim$75\%.
\item There is enough overlap between Keck and APF in the S/N plots
that we could feasibly observe the same stars in both (albeit with
the Keck observation at much higher S/N at fixed $m_g$.)
\end{itemize}
\noindent {\bf Considerations for development of FIDDLES and observing plan:}
\begin{itemize}
\item We want to directly measure the point-spread function during
the observations.
\item We want to measure both the near and far-field output of the
fiber; these S/N calculations are for the near-field only. I have not
tried to simulate the far-field.
\item We want to observe with different angles with respect to the
moon to estimate scattered-light effects.
\item We need to consider how differential atmospheric refraction may
affect the observations (e.g., acquisition and guiding).
\end{itemize}
%\section{Details of the experiment}
%
%\subsection{System}
%
%\subsection{Lab calibration}
%
%\section{On-sky tests}
%\input{authors}
%\bibliography{master}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{deluxetable}{ l c c}
%\tabletypesize{\scriptsize}
\tablewidth{0pt}
\tablecaption{Telescope Properties}
\tablehead{ & \colhead{APF} & \colhead{Keck} }
\startdata
Effective Area (m$^2$) & 4.47 & 72.37 \\
Plate Scale (mm/arcsec) & 0.175 & 0.725 \\
Focal Ratio & 15 & 15
\enddata
\label{tab:properties}
\end{deluxetable}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{deluxetable}{ l c c}
%\tabletypesize{\scriptsize}
\tablewidth{0pt}
\tablecaption{FIDDLES Properties}
\tablehead{Property & & }
\startdata
Readnoise (e$-$) & \multicolumn{2}{c}{6.45} \\
Dark Current (e$-$/s) & \multicolumn{2}{c}{5} \\
Full-Well (e$-$) & \multicolumn{2}{c}{18133} \\
Pixel size ($\mu$m) & \multicolumn{2}{c}{7.4} \\
Fiber diameter ($\mu$m) & \multicolumn{2}{c}{150} \\
IO Focal Ratio & \multicolumn{2}{c}{3} \\
\hline
& \colhead{APF} & \colhead{Keck} \\
\hline
Pixel scale (arcsec/pixel) & 0.212 & 0.051 \\
Fiber diameter (arcsec) & 4.30 & 1.03 \\
Aperture Efficiency ($0\farcs8$ seeing) & 1.0 & 0.69 \\
\enddata
\label{tab:fiddles}
\end{deluxetable}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\begin{center}
\includegraphics[width=0.9\textwidth]{fiddles_etc_elements.pdf}
\end{center}
\caption{Example components of the S/N calculation. {\it Top} --- The
surface brightness of the sky after artificially scaling up a
dark-sky spectrum by 3 $g$-band magnitudes (red) and the reference
spectrum (constant $F_\nu$) with $m_g = 18$ used to describe the
spectrum of the source. {\it Bottom} --- The effeciency of various
components of the observation, including the fiber (blue), sky (red),
detector quantum efficiency (orange) and $g$-band filter.}
\label{fig:spectra}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\begin{center}
\includegraphics[width=0.9\textwidth]{fiddles_etc_realization.pdf}
\end{center}
\caption{Example realization of the output near-field image both on
(left) and off (middle) the point source, as well as the difference
between the two (right). The S/N calculations provide the S/N per
pixel in the brightest part of the right image. The realization is
for an 18th magnitude star observed by Keck under bright conditions,
directly comparable to the data provided in Figure 3.}
\label{fig:images}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\begin{center}
\includegraphics[width=0.45\textwidth]{fiddles_etc_dark.pdf}
\includegraphics[width=0.45\textwidth]{fiddles_etc_bright.pdf}
\end{center}
\caption{S/N estimates in the brightest part of the near-field
difference image (see Figure 2) as a function of point source
$g$-band magnitude; the left panel assumes dark sky conditions,
whereas the right panel artificially amplifies the dark-sky spectrum
by 3 magnitudes. Results are shown for both APF (blue) and Keck
(green) observations; the observation of the 15th magnitude star at
Keck should saturate the current test-bench detector in a 60 second
integration.}
\label{fig:snr}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\end{document}
| {
"alphanum_fraction": 0.6886841288,
"avg_line_length": 35.380952381,
"ext": "tex",
"hexsha": "cc33973f53a1b3b09e85c6a655142064a24166ea",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "82dd4324083d456c78bcbafdd081bee53f0c7ba9",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "Keck-FOBOS/enyo",
"max_forks_repo_path": "docs/fiddles/ms.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "82dd4324083d456c78bcbafdd081bee53f0c7ba9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "Keck-FOBOS/enyo",
"max_issues_repo_path": "docs/fiddles/ms.tex",
"max_line_length": 133,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "82dd4324083d456c78bcbafdd081bee53f0c7ba9",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "Keck-FOBOS/enyo",
"max_stars_repo_path": "docs/fiddles/ms.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2463,
"size": 9659
} |
\section{Evaluation}
\label{sec:evaluation}
If not differently specified, all experiments discussed in this section are run with the following parameters: $N = 500$, $n_{max} = 200$, $n_D = 100$ and $c = 0$.
\subsection{Storage Capacity}
\label{subsec:capacity}
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{figures/base}
\caption{Storage success rate of a Rosenblatt perceptron as a function of $\alpha = P / N$. The experiments use $N = 500$, $n_{max} = 200$ and $n_D = 100$.}
\label{fig:base}
\end{figure}
\cref{fig:base} shows the results of the base experiment.
The x-axis represent different values of $\alpha = P / N$, while the y-axis the success rate $Q_{l.s.}$.
As expected, the function looks like a step function from $1$ to $0$.
For $\alpha \approx 1.7$, the success rate $Q_{l.s.}$ drops from $1$ to $0$ very quickly.
The value of $\alpha$ for which the function drops is called storage capacity of the perceptron.
For $N \to \infty$ (very large number of examples) and $n_{max} \to \infty$ (no limit on the maximum number of training iterations), the theoretical storage capacity of the Rosenblatt perceptron is $\alpha = 2$.
\subsection{Number of Iterations}
\label{subsec:epochs}
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{figures/multiple_epochs}
\caption{Storage success rate of a Rosenblatt perceptron as a function of $\alpha = P / N$ for different values of $n_{max}$.}
\label{fig:multiple_epochs}
\end{figure}
The difference between the theoretical value and the experimental one are mainly due to the limited number of training iterations.
\cref{fig:multiple_epochs} gives an experimental proof of this statement:
for a very small number of iterations (eg. $n_{max} = 10$), the step is close to $\alpha = 1$, while for higher values of iterations the step moves closer and closer to the theoretical value $\alpha = 2$ found with \cref{eq:prob-lin-sep-alpha}.
The theoretical result still remains far from the practical one because of the limited size of input's dimension $N$.
\subsection{Number of Dimensions}
\label{subsec:dimensions}
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{figures/multiple_n}
\caption{Storage success rate of a Rosenblatt perceptron as a function of $\alpha = P / N$ for different values of $N$.}
\label{fig:multiple_n}
\end{figure}
The theoretical results are valid for $N \to \infty$.
However, real datasets have a limited number of features.
\cref{fig:multiple_n} shows the behaviour of the perceptron for different values of $N$.
For high values of $N$, the shape of the success rate $Q_{l.s.}$ as a function of $\alpha$ is similar to a step function.
For small values of $N$, the function looks like a smoothed step function:
the smaller $N$ is, the higher is the smoothing.
\subsection{Weight Update Criterion}
\label{subsec:c}
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{figures/bonus_2_c}
\caption{Storage success rate of a Rosenblatt perceptron as a function of $\alpha = P / N$ for different values of $c$.}
\label{fig:multiple_c}
\end{figure}
\cref{fig:multiple_c} shows the effect of changing the values of $c$ in the training procedure of the perceptron.
For higher values of $c$, the curve is shifted to the left.
Since an example $\xi^\mu$ is considered correctly classified only when its local potential is greater than $c$ ($E = \mathsf{\bm{w}} \cdot \xi^\mu S^\mu > c$), the potential only depends on $\mathsf{\bm{w}}$ for a fixed $\xi^\mu$ and the update of $\mathsf{\bm{w}}$ is fixed for a given wrong classified example, the perceptron will need a higher number of updates to increase the norm of $\mathsf{\bm{w}}$ and make the local potentials higher than a threshold $c > 0$.
Formally, we can show that the value of $c > 0$ is irrelevant (provided the perceptron is trained long enough):
\begin{equation*}
\mathsf{\bm{w}}_1 : \{E_1^\mu \geq c\}_{\mu = 1}^{P} \Leftrightarrow \mathsf{\bm{w}}_2 = \lambda \mathsf{\bm{w}}_1 : \{E_2^\mu \geq c\}_{\mu = 1}^{P}, \lambda > 0
\end{equation*}
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{figures/bonus_2_epoch}
\caption{Storage success rate of a Rosenblatt perceptron as a function of $\alpha = P / N$ for different numbers of iterations with fixed value of $c=2$.}
\label{fig:fixed_c_multiple_epoch}
\end{figure}
To give an empirical proof of this, we fix the value of $c$ and train the perceptron for different number of iterations $n_{max}$.
We expect to see the curve shifted to the left for small $n_{max}$, and approximate a step function centered in $\alpha = 2$ for big $n_{max}$.
\cref{fig:fixed_c_multiple_epoch} shows that the results of the experiment confirm our hypothesis.
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{figures/bonus_2_c_epoch}
\caption{Storage success rate of a Rosenblatt perceptron as a function of $\alpha = P / N$ for different numbers of iterations and $c$.}
\label{fig:multiple_c_multiple_epoch}
\end{figure}
\cref{fig:multiple_c_multiple_epoch} compares the curves for $c = 0$ and $c = 2.0$ for different values of $n_{max}$:
by increasing $n_{max}$, the curve is ``pushed'' towards the right, contrasting the effect of the increased value of $c$.
We can somehow see $c$ as a simple version of the learning rate that is used in more complex neural networks: it ``regulates'' the speed of the training.
\subsection{Inhomogeneous Hyperplanes}
\label{subsec:homogeneous}
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{figures/homogeneous}
\caption{Storage success rate of a Rosenblatt perceptron and its inhomogeneous version for $N = 500$ as a function of $\alpha = P / N$.}
\label{fig:homogeneous}
\end{figure}
We run an experiment to verify the behaviour of $Q_{l.s.}$ by allowing inhomogeneous hyperplanes:
we train both a normal and modified perceptron for $N = 500$.
\cref{fig:homogeneous} shows the results of the experiment.
As expected, the success rate $Q_{l.s.}$ of the inhomogeneous perceptron is slightly higher than homogeneous one.
However, the difference is not significant, since data points follow a normal distribution $\xi^\mu_j \sim \mathcal{N}(0,\,1)$ and are therefore distributed around the origin.
The problem of finding an inhomogeneously solution in $R^{N}$ can be solved by finding a homogeneously solution in $R^{N + 1}$.
In a second experiment, we compare the success rate $Q_{l.s.}$ of an inhomogeneous perceptron for $N = 500$ with an homogeneous perceptron for $N = 501$.
\cref{fig:homogeneous_n_n1} shows the result of this experiment.
As expected, the success rates for the $2$ perceptrons are very close to each other.
\begin{figure}[t]
\centering
\includegraphics[width=\columnwidth]{figures/homogeneous_n_n1}
\caption{Storage success rate of a homogeneous Rosenblatt perceptron for $N = 501$ and its inhomogeneous version for $N = 500$ as a function of $\alpha = P / N$.}
\label{fig:homogeneous_n_n1}
\end{figure}
| {
"alphanum_fraction": 0.7456540325,
"avg_line_length": 63.2252252252,
"ext": "tex",
"hexsha": "2b49bc8bbe27d39d1e30e40738d6a4040aad2422",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "262a2b33d5c3fe67bbeb20fa6ef1f4870bdfa9a0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "davidepedranz/neural_networks_assignments",
"max_forks_repo_path": "assignment_1/report/04_evaluation.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "262a2b33d5c3fe67bbeb20fa6ef1f4870bdfa9a0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "davidepedranz/neural_networks_assignments",
"max_issues_repo_path": "assignment_1/report/04_evaluation.tex",
"max_line_length": 470,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "262a2b33d5c3fe67bbeb20fa6ef1f4870bdfa9a0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "davidepedranz/neural_networks_assignments",
"max_stars_repo_path": "assignment_1/report/04_evaluation.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1963,
"size": 7018
} |
\chapter{Evaluation}
\label{evaluation}
DELETEME: The evaluation chapter is one of the most important chapters of your work. Here, you will prove usability/efficiency of your approach by presenting and interpreting your results. You should discuss your results and interprete them, if possible. Drawing conclusions on the results will be one important point that your estimators will refer to when grading your work.
%###################################################################################
%###################### Results ########################################
%###################################################################################
\section{Results}
\label{results}
%###################################################################################
%###################### Discussions ########################################
%###################################################################################
\section{Discussions}
\label{discussions} | {
"alphanum_fraction": 0.4135559921,
"avg_line_length": 67.8666666667,
"ext": "tex",
"hexsha": "20e1e649812181f2bc7d60ef2c780fb69811df22",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b1a2d5dc9c0c589a39019126cf7a5cc775baa288",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Ohara124c41/TUB-MSc_Thesis",
"max_forks_repo_path": "msc-thesis-eng/input/05_evaluation.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b1a2d5dc9c0c589a39019126cf7a5cc775baa288",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Ohara124c41/TUB-MSc_Thesis",
"max_issues_repo_path": "msc-thesis-eng/input/05_evaluation.tex",
"max_line_length": 377,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b1a2d5dc9c0c589a39019126cf7a5cc775baa288",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Ohara124c41/TUB-MSc_Thesis",
"max_stars_repo_path": "msc-thesis-eng/input/05_evaluation.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 135,
"size": 1018
} |
\chapter{Relativistic spin-0 particles}
\section{The Klein-Gordon equation}
The Schr{\"o}dinger equation is the quantum mechanical equivalent of the classical $E = p^2/2m$. Now we want a relativistic version. Start from the relationship between energy, momentum, and mass,
\begin{equation}
E^2 - p^2 = m^2.
\end{equation}
Replacing the appropriate values with operators,
\begin{equation*}
E \rightarrow i \pdv{t}, \quad p \rightarrow -i \vec{\nabla}
\end{equation*}
and applying them to some general wavefunction,
\begin{equation}
\left( -\pdv[2]{t} + \nabla^2 \right)\psi = m^2 \psi.
\end{equation}
Rearranging and using equation \eqref{eq:nablaSquared} gives the Klein-Gordon equation
\begin{equation}\boxed{
\left( \partial^2 + m^2 \right)\psi = 0\label{eq:KleinGordon}
}.
\end{equation}
This is the fully relativistic equation of motion for spin-0 particles.
\subsection{4-current density}
We now wish to derive the probability 4-current from the Klein-Gordon equation. Taking the complex conjugate of \eqref{eq:KleinGordon} and multiplying by $\psi$ gives
\begin{equation}
\psi \left( \partial^2 + m^2 \right)\psi^* = 0\label{eq:KG1}.
\end{equation}
Similarly, multiplying \eqref{eq:KleinGordon} by $\psi^*$ gives
\begin{equation}
\psi^* \left( \partial^2 + m^2 \right)\psi = 0\label{eq:KG2}.
\end{equation}
Subtracting \eqref{eq:KG1} from \eqref{eq:KG2} and multiplying by $i$,
\begin{align}
i\psi^*\partial^2\psi - i\psi\partial^2\psi^* = 0 \\
\Rightarrow\quad \pdv{t} \left( i\psi^*\pdv{\psi}{t} - i\pdv{\psi^*}{t}\psi \right) - \vec{\nabla}\cdot\left( i\psi^* \vec{\nabla}\psi - i(\vec{\nabla}\psi)\psi^* \right) = 0
\end{align}
This result is a continuity equation,
\begin{equation}
\partial_\mu j^\mu = 0
\end{equation}
for the 4-current density
\begin{equation}
j^\mu = i\psi^* \partial^\mu \psi - i\psi \partial^\mu \psi^*
\end{equation}
\subsection{Application to a plane wave}
Consider a particle with wavefunction $\phi = \mathcal{N}e^{-iPX}$. Applying the above definition of 4-current density gives the values
\begin{align}
\rho = j^0 = 2\abs{\mathcal{N}}^2 \, E \\
\vec{j} = 2\abs{\mathcal{N}}^2 \, \vec{p}.
\end{align}
That $\rho$ is proportional to $E$ is to be expected. Under a Lorentz boost the volume element transforms as
\begin{equation*}
\dd[3]{\vec{x}} \rightarrow \frac{\dd[3]{\vec{x}}}{\gamma}
\end{equation*}
so in order to preserve $\rho \dd[3]{\vec{x}}$, $\rho$ must transform as $\rho\rightarrow \gamma\rho$, in the same way energy does.
If the covariant nomalization to $2E$ particles per unit volume is used, the result is that $\rho = 1$.
\section{Negative energy particles and the Fenyman-Stueckelberg interpretation}
For the Klein-Gordon equation to be an accurate description of spin-0 particles, it should be able to describe antiparticles. Indeed, the Klein-Gordon equation does allow for particles with negative energy, i.e.~the negative solution of
\begin{equation}
E_\pm = \pm\sqrt{p^2 + m^2}.
\end{equation}
There are some problems associated with this. Firstly, the existence of negative energy states means that the energy of a particle can always be lowered. Secondly, the $E_-$ solutions are associated with a negative probability density $\rho$, which doesn't make sense and is not allowed.
Pauli and Weisskopf showed that it is possible to have a negative energy solution of the Klein-Gordon equation if the scalar electron charge $-e$ is included in $j^\mu$ and it is interpreted as a charge-current density,
\begin{equation}
j^\mu = -ie\left(\psi^* \partial^\mu \psi - i\psi \partial^\mu \psi^*\right).
\end{equation}
Now $\rho=j^0$ represents the charge density which is allowed to be negative.
Now that the theory has been fixed to allow the $E_-$ particles, we need an interpretation for them. The Feynman-Stueckelberg interpretation states that the negative energy states correspond to antiparticles. Consider the 4-charge-current of a scalar electron with a plane wavefunction,
\begin{equation}
j^\mu (\Pelectron) = -2e\abs{\mathcal{N}}^2\, \mqty(E \\ \vec{p}).
\end{equation}
In the same way, a positron has the current
\begin{align}
j^\mu (\Ppositron) &= 2e\abs{\mathcal{N}}^2\, \mqty(E \\ \vec{p})\\
&= -2e\abs{\mathcal{N}}^2\, \mqty(-E \\ -\vec{p}).
\end{align}
This is the same as for an electron with $-E$, $-\vec{p}$. So far as a system is concerned, the emission of a positron with energy $E$ is the same as the absorption of an electron with energy $-E$. In other words, positive-energy antiparticles travelling forwards in time are negative-energy particles travelling backwards in time. This gives the $E_-$ solutions an interpretation.
| {
"alphanum_fraction": 0.7282773564,
"avg_line_length": 56.2804878049,
"ext": "tex",
"hexsha": "e2ee615fbf0c8294f810b14684dca3ee94756c14",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9ea0d388c93f21d1c636ee18c9210a1b91169308",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "adambozson/Standard-Model-I",
"max_forks_repo_path": "chapters/6_Relativistic_spin-0_particles.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9ea0d388c93f21d1c636ee18c9210a1b91169308",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "adambozson/Standard-Model-I",
"max_issues_repo_path": "chapters/6_Relativistic_spin-0_particles.tex",
"max_line_length": 381,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9ea0d388c93f21d1c636ee18c9210a1b91169308",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "adambozson/Standard-Model-I",
"max_stars_repo_path": "chapters/6_Relativistic_spin-0_particles.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1437,
"size": 4615
} |
\cleardoublepage
\chapter{Application game screens}
This appendix shows all the screens which conform the application. It goes through the three different game modes and shows all the screens contained within them.
\cleardoublepage
\section{Home}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/use-case/home_screen.jpg}
\vspace{0.05cm}
\caption{Application Home Screen}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/use-case/help_home_screen.jpg}
\vspace{0.05cm}
\caption{Help Home Screen}
\end{figure}
\section{Playing game mode}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/use-case/playing_xylo_start_screen.jpg}
\vspace{0.05cm}
\caption{Xylophone playing instrument game mode}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/use-case/help_playing_screen.jpg}
\vspace{0.05cm}
\caption{Help information playing instrument game mode}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/use-case/playing_panpipes_screen.jpg}
\vspace{0.05cm}
\caption{Playing panpipes screen}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/use-case/playing_trombone_screen.jpg}
\vspace{0.05cm}
\caption{Playing trombone screen}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/use-case/playing_piano_screen.jpg}
\vspace{0.05cm}
\caption{Playing piano screen}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/use-case/playing_harp_screen.jpg}
\vspace{0.05cm}
\caption{Playing harp screen}
\end{figure}
\section{Conducting game mode}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/use-case/conducting_home_screen.jpg}
\vspace{0.05cm}
\caption{Conducting game mode access screen}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/use-case/help_conducting_screen.jpg}
\vspace{0.05cm}
\caption{Help information conducting orchestra game mode}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/use-case/conducting_melodies_screen.jpg}
\vspace{0.05cm}
\caption{Melodies selection in the conducting orchestra game mode}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/use-case/conducting_all_stop_screen.jpg}
\vspace{0.05cm}
\caption{Conducting orchestra screen with all instruments activated}
\end{figure}
\section{Discovering game mode}
\label{sec:discoveringscreens}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/help_discovering_screen.jpg}
\vspace{0.05cm}
\caption{Help Discovering Screen}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_perc_drum_screen.jpg}
\vspace{0.05cm}
\caption{Discovering drum instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_perc_kettle_screen.jpg}
\vspace{0.05cm}
\caption{Discovering kettle instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_perc_cymbals_screen.jpg}
\vspace{0.05cm}
\caption{Discovering cymbals instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_perc_xylophone_screen.jpg}
\vspace{0.05cm}
\caption{Discovering xylophone instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_perc_marimba_screen.jpg}
\vspace{0.05cm}
\caption{Discovering marimba instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_perc_vibraphone_screen.jpg}
\vspace{0.05cm}
\caption{Discovering vibraphone instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_brass_trumpet_screen.jpg}
\vspace{0.05cm}
\caption{Discovering trumpet instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_brass_french_horn_screen.jpg}
\vspace{0.05cm}
\caption{Discovering French horn instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_brass_trombone_screen.jpg}
\vspace{0.05cm}
\caption{Discovering trombone instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_brass_tuba_screen.jpg}
\vspace{0.05cm}
\caption{Discovering tuba instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_brass_flugelhorn_screen.jpg}
\vspace{0.05cm}
\caption{Discovering flugelhorn instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_key_piano_screen.jpg}
\vspace{0.05cm}
\caption{Discovering piano instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_key_celesta_screen.jpg}
\vspace{0.05cm}
\caption{Discovering celesta instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_key_organ_screen.jpg}
\vspace{0.05cm}
\caption{Discovering organ instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_key_clavichord_screen.jpg}
\vspace{0.05cm}
\caption{Discovering clavichord instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_strings_violin_screen.jpg}
\vspace{0.05cm}
\caption{Discovering violin instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_strings_double_bass_screen.jpg}
\vspace{0.05cm}
\caption{Discovering double bass instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_strings_viola_screen.jpg}
\vspace{0.05cm}
\caption{Discovering viola instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_strings_chello_screen.jpg}
\vspace{0.05cm}
\caption{Discovering chello instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_strings_lute_screen.jpg}
\vspace{0.05cm}
\caption{Discovering lute instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_strings_guitar_screen.jpg}
\vspace{0.05cm}
\caption{Discovering guitar instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_strings_harp_screen.jpg}
\vspace{0.05cm}
\caption{Discovering harp instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_wind_flute_screen.jpg}
\vspace{0.05cm}
\caption{Discovering flute instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_wind_clarinet_screen.jpg}
\vspace{0.05cm}
\caption{Discovering clarinet instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_wind_oboe_screen.jpg}
\vspace{0.05cm}
\caption{Discovering oboe instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_wind_bassoon_screen.jpg}
\vspace{0.05cm}
\caption{Discovering bassoon instrument}
\vspace{0.6cm}
\includegraphics[width=350pt]{graphics/additional-screens/discovering_wind_piccolo_screen.jpg}
\vspace{0.05cm}
\caption{Discovering piccolo instrument}
\end{figure}
\begin{figure}[ht!]
\centering
\includegraphics[width=350pt]{graphics/additional-screens/discovering_wind_panpipes_screen.jpg}
\vspace{0.05cm}
\caption{Discovering panpipes instrument}
\end{figure}
| {
"alphanum_fraction": 0.782875,
"avg_line_length": 30.303030303,
"ext": "tex",
"hexsha": "8f077b32174bf6e55f6733d423a386e5c3e7ba2a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b55e5175a40452ab8094a3823880df8c1c9cd52b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "unai-ttxu/pfc-unai",
"max_forks_repo_path": "Appendix-screens.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b55e5175a40452ab8094a3823880df8c1c9cd52b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "unai-ttxu/pfc-unai",
"max_issues_repo_path": "Appendix-screens.tex",
"max_line_length": 162,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b55e5175a40452ab8094a3823880df8c1c9cd52b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "unai-ttxu/pfc-unai",
"max_stars_repo_path": "Appendix-screens.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2415,
"size": 8000
} |
\subsection{Propositional logic}\label{subsec:propositional_logic}
\begin{remark}\label{rem:propositional_language_is_alphabet}
The \hyperref[def:propositional_language]{language of propositional logic} is, strictly speaking, an \hyperref[def:formal_language/alphabet]{alphabet} rather than a \hyperref[def:formal_language/language]{language}. Nonetheless, this is the established terminology.
\end{remark}
\begin{definition}\label{def:propositional_language}\mcite[sec. 7.2]{OpenLogicFull}
The \term{language of propositional logic} consists of:
\begin{thmenum}
\thmitem{def:propositional_language/prop} A nonempty, \hyperref[def:set_countability/at_most_countable]{at most countable} set \( \boldop{Prop} \) of \term{propositional variables}. Technically, we can have different languages with different variables, but it is safe to assume that there is only one single language of propositional language.
\thmitem{def:propositional_language/constants} Two \term{propositional constants} (also known as \term{truth values}):
\begin{thmenum}
\thmitem{def:propositional_language/constants/verum} The \term{verum} \( \top \).
\thmitem{def:propositional_language/constants/falsum} The \term{falsum} \( \bot \).
\end{thmenum}
\thmitem{def:propositional_language/negation} \term{Negation} \( \neg \).
\thmitem{def:propositional_language/connectives} The set \( \Sigma \) of \term{propositional connectives}, namely
\begin{thmenum}
\thmitem{def:propositional_language/connectives/conjunction} \term{Conjunction} \( \wedge \) (also known as \hyperref[def:standard_boolean_operators]{\term{and}} and \hyperref[def:semilattice/meet]{\term{meet}}).
\thmitem{def:propositional_language/connectives/disjunction} \term{Disjunction} \( \vee \) (also known as \hyperref[def:standard_boolean_operators]{\term{or}} and \hyperref[def:semilattice/join]{\term{join}}).
\thmitem{def:propositional_language/connectives/conditional} \term{Conditional} \( \rightarrow \) (also known as \term{if\ldots then} and \hyperref[def:material_implication]{\term{material implication}}).
\thmitem{def:propositional_language/connectives/biconditional} \term{Biconditional} \( \leftrightarrow \) (also known as \term{iff} and \term{material equivalence}).
\end{thmenum}
Note that \enquote{conditional} and \enquote{biconditional} are nouns in this context.
\thmitem{def:propositional_language/parentheses} Parentheses \( ( \) and \( ) \) for defining the order of operations unambiguously (see \fullref{rem:propositional_formula_parentheses}).
\end{thmenum}
\Fullref{rem:smaller_propositional_language} shows we can actually utilize a smaller propositional language without losing any of its semantics.
\end{definition}
\begin{definition}\label{def:propositional_syntax}
The following related definitions constitute what is called the \term{syntax of propositional logic}.
\begin{thmenum}
\thmitem{def:propositional_syntax/grammar_schema} Consider the following \hyperref[ex:natural_number_arithmetic_grammar/backus_naur_form]{grammar schema}:
\begin{bnf*}
\bnfprod{variable} {P \in \boldop{Prop}} \\
\bnfprod{connective} {\circ \in \Sigma} \\
\bnfprod{formula} {\bnfpn{variable} \bnfor} \\
\bnfmore {\bnfts{\( \top \)} \bnfor \bnfts{\( \bot \)} \bnfor} \\
\bnfmore {\bnfts{\( \neg \)} \bnfpn{formula} \bnfor} \\
\bnfmore {\bnfts{(} \bnfsp \bnfpn{formula} \bnfsp \bnfpn{connective} \bnfsp \bnfpn{formula} \bnfsp \bnfts{)}}
\end{bnf*}
Note that \( \boldop{Prop} \) may be infinite, in which case the grammars may have infinitely many rules. If needed, we can circumvent this by introducing an appropriate naming convention for variables, for example by allowing arbitrary strings of alphanumeric characters for variable names.
For the sake of readability, we will be using the conventions in \fullref{rem:propositional_formula_parentheses} regarding parentheses.
\thmitem{def:propositional_syntax/formula} The set \( \boldop{Form} \) of \term{propositional formulas} is the language \hyperref[def:grammar_derivation/grammar_language]{generated} by this grammar schema with \( \bnfpn{formula} \) as a starting rule. Propositional formulas are also called sentenced unlike in first-order logic where only specific formulas are called sentences --- see \fullref{def:first_order_syntax/ground_formula}.
The grammar of propositional formulas is unambiguous as shown by \fullref{thm:propositional_formulas_are_unambiguous}, which makes it possible to perform proofs via \fullref{thm:structural_induction_on_unambiguous_grammars}.
\thmitem{def:propositional_syntax/subformula} If \( \varphi \) and \( \psi \) are formulas and \( \psi \) is a \hyperref[def:formal_language/subword]{subword} of \( \varphi \), we say that \( \psi \) is a \term{subformula} of \( \varphi \).
\thmitem{def:propositional_syntax/variables} For each formula \( \varphi \), we inductively define its \term{variables} to be elements of the set
\begin{equation}\label{eq:def:propositional_syntax/varables}
\boldop{Var}(\varphi) \coloneqq \begin{cases}
\varnothing, &\varphi \in \set{ \top, \bot } \\
\set{ P }, &\varphi = P \in \boldop{Prop} \\
\boldop{Var}(\psi), &\varphi = \neg \psi \\
\boldop{Var}(\psi) \cup \boldop{Var}(\theta), &\varphi = \psi \bincirc \theta, \bincirc \in \Sigma.
\end{cases}
\end{equation}
Note that \( \boldop{Var}(\varphi) \) can naturally be totally ordered by the position of the first occurrence of a variable.
\end{thmenum}
\end{definition}
\begin{proposition}\label{thm:propositional_formulas_are_unambiguous}
The grammar of \hyperref[def:propositional_syntax/formula]{propositional formulas} is \hyperref[def:grammar_derivation/unambiguous]{unambiguous}.
\end{proposition}
\begin{proof}
The proof is analogous to \fullref{ex:natural_number_arithmetic_grammar/derivation}.
\end{proof}
\begin{remark}\label{rem:propositional_formula_parentheses}
We use the following two \enquote{abuse-of-notation} conventions regarding parentheses:
\begin{thmenum}
\thmitem{rem:propositional_formula_parentheses/outermost} We may skip the outermost parentheses in formulas with top-level \hyperref[def:propositional_language/connectives]{connectives}, e.g. we may write \( P \wedge Q \) rather than \( (P \wedge Q) \).
\thmitem{rem:propositional_formula_parentheses/associative} Because of the associativity of \( \wedge \) and \( \vee \) (see \fullref{def:propositional_formula_induced_function} and \fullref{def:standard_boolean_operators}), we may skip the parentheses in chains like
\begin{equation*}
( \ldots ((P_1 \wedge P_2) \wedge P_3) \wedge \ldots \wedge P_{n-1} ) \wedge P_n.
\end{equation*}
and instead write
\begin{equation*}
P_1 \wedge P_2 \wedge \ldots \wedge P_{n-1} \wedge P_n.
\end{equation*}
\thmitem{rem:first_order_formula_parentheses/additional} Although not formally necessary, for the sake of readability we may choose to add parentheses around certain formulas like
\begin{equation*}
\neg P \vee \neg Q.
\end{equation*}
and instead write
\begin{equation*}
(\neg P) \vee \neg Q.
\end{equation*}
This latter convention is more useful for quantifiers in \hyperref[def:first_order_syntax/formula]{first-order formulas}.
\end{thmenum}
These are only notations shortcuts in the \hyperref[rem:metalogic]{metalanguage} and the formulas themselves (as abstract mathematical objects) are still assumed to contain parentheses that help them avoid syntactic ambiguity (see \fullref{thm:propositional_formulas_are_unambiguous}).
\end{remark}
\begin{definition}\label{def:material_implication}
Theorems in mathematics usually have the form \( P \rightarrow Q \). Formulas of this form are called \term{material implications} in order to distinguish them from logical implication, which relates to the metatheoretic concept of \hyperref[def:propositional_semantics/entailment]{entailment} (see \cite{MathSE:material_vs_logical_implication}). Note that the term \enquote{material implication} sometimes also refers to the \hyperref[def:propositional_language/connectives/conditional]{conditional connective \( \rightarrow \)} itself.
We introduce terminology that is conventionally used when dealing with theorems.
\begin{thmenum}
\thmitem{def:material_implication/sufficient_condition} \( P \) is a \term{sufficient condition} for \( Q \).
\thmitem{def:material_implication/necessary_condition} \( Q \) is a \term{necessary condition} for \( P \).
\thmitem{def:material_implication/antecedent} \( P \) the \term{antecedent} of \( \varphi \).
\thmitem{def:material_implication/consequent} \( Q \) the \term{consequent} of \( \varphi \).
\thmitem{def:material_implication/inverse} The formula \( \neg P \rightarrow \neg Q \) is the \term[bg=противоположна,ru=противоположная]{inverse} of \( \varphi \).
\thmitem{def:material_implication/converse} The formula \( Q \rightarrow P \) is the \term[bg=обратна,ru=обратная]{converse} of \( \varphi \).
\thmitem{def:material_implication/contrapositive} The formula \( \neg Q \rightarrow \neg P \) is the \term{contrapositive} of \( \varphi \). In classical logic, it is \hyperref[def:propositional_semantics/equivalence]{equivalent} to the original formula due to \fullref{thm:boolean_equivalences/contrapositive}.
\end{thmenum}
\end{definition}
\begin{definition}\label{def:propositional_valuation}
We define \term[bg=оценка,ru=оценка]{valuations} for propositional formulas. It is possible to define different valuations, so in case of doubt, we will refer to the one defined here as the \term{classical valuation} giving \term{classical semantics}.
This valuation implicitly depends on the \hyperref[def:boolean_algebra]{Boolean algebra} fixed in \fullref{def:boolean_function}. When dealing with Heyting semantics (see \fullref{def:propositional_heyting_algebra_semantics}), we use more general Heyting algebras where not only the top and bottom, but also other values are utilized.
\begin{thmenum}
\thmitem{def:propositional_valuation/interpretation} A \term{propositional interpretation} is a function with signature \( I: \boldop{Prop} \to \set{ T, F } \). See \fullref{def:boolean_value} for remarks regarding the \hyperref[def:boolean_algebra]{Boolean algebra} \( \set{ T, F } \) and the \fullref{def:standard_boolean_operators} for a list of some standard Boolean operators.
\thmitem{def:propositional_valuation/formula_valuation} Given an interpretation \( I \), we define the \term{valuation} of a formula \( \varphi \) inductively as
\begin{equation}\label{eq:def:propositional_valuation/formula_interpretation}
\varphi\Bracks{I} \coloneqq \begin{cases}
T, &\varphi = \top \\
F, &\varphi = \bot \\
I(P), &\varphi = P \in \boldop{Prop} \\
\overline{\psi\Bracks{I}}, &\varphi = \neg \psi \\
\psi_1\Bracks{I} \bincirc \psi_2\Bracks{I} &\varphi = \psi_1 \bincirc \psi_2, \bincirc \in \Sigma,
\end{cases}
\end{equation}
\end{thmenum}
where \( \bincirc \) on the left denotes the \hyperref[def:standard_boolean_operators]{Boolean operator} corresponding to the connective \( \bincirc \) on the right.
\end{definition}
\begin{remark}\label{rem:propositional_formula_valuation_without_variable_assignment}
If we know that \( \boldop{Var}(\varphi) \subseteq \{ P_1, \ldots, P_n \} \), it follows that the \hyperref[def:first_order_valuation/formula_valuation]{valuation} \( \varphi\Bracks{I} \) only depends on the particular values \( I(P_1), \ldots, I(P_n) \) of \( I \).
Let \( x_1, \ldots, x_n \in \set{ F, T } \) and let \( I \) be such that \( I(P_k) = x_k \) for \( k = 1, \ldots, n \). We introduce the notation
\begin{equation}\label{eq:rem:propositional_formula_valuation_without_variable_assignment/short_semantic}
\varphi\Bracks{x_1, \ldots, x_n}
\end{equation}
for \( \varphi\Bracks{I} \) because the rest of the interpretation \( I \) plays no role here. We may also use
\begin{equation}\label{eq:rem:propositional_formula_valuation_without_variable_assignment/short_syntactic}
\varphi[\psi_1, \ldots, \psi_n]
\end{equation}
to denote \hyperref[def:propositional_substitution]{substitution}.
When using this notation, we implicitly assume that \( \boldop{Var}(\varphi) \subseteq \set{ P_1, \ldots, P_n } \).
\end{remark}
\begin{definition}\label{def:propositional_formula_induced_function}
Let \( \varphi \) be a propositional formula and let \( \boldop{Var}(\varphi) = \set{ P_1, \ldots, P_n } \) be an ordering of the free variables of \( \varphi \). We define the \hyperref[def:boolean_function]{Boolean function}
\begin{equation}\label{eq:def:propositional_formula_induced_function}
\begin{split}
&\fun_\varphi: \set{ T, F }^n \to \set{ T, F } \\
&\fun_\varphi(x_1, \ldots, x_n) \coloneqq \varphi\Bracks{x_1, \ldots, x_n}.
\end{split}
\end{equation}
\end{definition}
\begin{definition}\label{def:propositional_semantics}
We now define \term{semantical} properties of propositional formulas. Because of the connection with \hyperref[def:boolean_function]{Boolean functions} given in \fullref{def:propositional_formula_induced_function}, we also formulate some of the properties using Boolean functions.
\begin{thmenum}
\thmitem{def:propositional_semantics/satisfiability}\mcite[def. 7.14]{OpenLogicFull} Given an interpretation \( I \) and a set \( \Gamma \) of formulas, we say that \( I \) \term{satisfies} \( \Gamma \) if, for every formula \( \varphi \in \Gamma \) we have \( \varphi\Bracks{I} = T \).
We also say that \( I \) is a \term{model} of \( \Gamma \) and write \( I \vDash \Gamma \).
If \( \Gamma = \set{ \gamma_1, \ldots, \gamma_n } \) is a finite ordered set, we use the shorthand \( I \vDash \gamma_1, \ldots, \gamma_n \) rather than \( I \vDash \set{ \gamma_1, \ldots, \gamma_n } \). In particular, if \( \Gamma = \set{ \varphi } \) we write \( I \vDash \varphi \).
Note that every interpretation vacuously satisfies the empty set \( \Gamma = \varnothing \) of formulas.
We say that \( \Gamma \) is \term{satisfiable} if there exists a model for \( \Gamma \).
\thmitem{def:propositional_semantics/entailment} We say that the set of formulas \( \Gamma \) \term{entails} the set of formulas \( \Delta \) and write \( \Gamma \vDash \Delta \) if either of the following hold:
\begin{itemize}
\thmitem{def:propositional_semantics/entailment/direct} Every model of \( \Gamma \) is also a model of \( \Delta \).
\thmitem{def:propositional_semantics/entailment/functional} The following \hyperref[thm:def:function/properties/preimage]{preimage} inclusion holds:
\begin{equation*}
\bigcap_{\varphi \in \Gamma} \fun_\varphi^{-1}(T) \subseteq \bigcap_{\psi \in \Delta} \fun_\psi^{-1}(T).
\end{equation*}
\end{itemize}
\thmitem{def:propositional_semantics/tautology} The formula \( \varphi \) is a (semantic) \term{tautology} if either:
\begin{itemize}
\thmitem{def:propositional_semantics/tautology/interpretations} Every interpretation satisfies \( \varphi \).
\thmitem{def:propositional_semantics/tautology/entailment} The empty set \( \Gamma = \varnothing \) of formulas entails \( \varphi \), i.e. \( \vDash \varphi \).
\thmitem{def:propositional_semantics/tautology/functional} The function \( \fun_\varphi \) is canonically true.
\end{itemize}
We also say that \( \varphi \) is \term{valid}.
\thmitem{def:propositional_semantics/contradiction} Dually, \( \varphi \) is a (semantic) \term{contradiction} if either:
\begin{itemize}
\thmitem{def:propositional_semantics/contradiction/interpretations} No interpretation satisfies \( \varphi \).
\thmitem{def:propositional_semantics/contradiction/entailment} The formula \( \varphi \) entails \( \bot \), i.e. \( \varphi \vDash \bot \).
\thmitem{def:propositional_semantics/contradiction/functional} The function \( \fun_\varphi \) is canonically false.
\end{itemize}
\thmitem{def:propositional_semantics/equivalence} We say that \( \varphi \) and \( \psi \) are \term{semantically equivalent} and write \( \varphi \gleichstark \psi \) if either:
\begin{itemize}
\thmitem{def:propositional_semantics/equivalence/interpretations} We have \( \varphi\Bracks{I} = \psi\Bracks{I} \) for every interpretation \( I \).
\thmitem{def:propositional_semantics/equivalence/entailment} Both \( \varphi \vDash \psi \) and \( \psi \vDash \varphi \).
\end{itemize}
\thmitem{def:propositional_semantics/equisatisfiability} A weaker notion than that of semantic equivalence is that of \term{equisatisfiability}. We say that the families \( \Gamma \) and \( \Delta \) are equisatisfiable if the following holds: \enquote{\( \Gamma \) is satisfiable if and only if \( \Delta \) is satisfiable}. For single-formula families \( \Gamma = \set{ \varphi } \) and \( \Delta = \set{ \psi } \), the following are equivalent conditions for equisatisfiability:
\begin{itemize}
\thmitem{def:propositional_semantics/equisatisfiability/interpretations} There exist interpretations \( I \) and \( J \) such that \( \varphi\Bracks{I} = \psi\Bracks{J} \).
\thmitem{def:propositional_semantics/equisatisfiability/functional} We have \( \fun_\varphi = \fun_\psi \) for the induced functions.
\end{itemize}
A trivial example of equisatisfiable, but not equivalent formulas are \( \varphi = P \) and \( \psi = Q \) for \( P \neq Q \).
\end{thmenum}
\end{definition}
\begin{theorem}\label{thm:lindenmaum_tarski_algebra_of_full_propositional_logic}
We give an explicit connection between \hyperref[def:propositional_syntax/formula]{propositional formulas} and \hyperref[def:boolean_function]{Boolean functions}.
\begin{thmenum}
\thmitem{thm:lindenmaum_tarski_algebra_of_full_propositional_logic/equivalence_classes} The \hyperref[def:propositional_semantics/equivalence]{semantic equivalence} \( \gleichstark \) is an equivalence relation on the set \( \boldop{Form} \) of all propositional formulas.
\thmitem{thm:lindenmaum_tarski_algebra_of_full_propositional_logic/bijection} The \hyperref[def:lindenbaum_tarski_algebra]{Lindenbaum-Tarski algebra} \( \boldop{Form} / {{}\gleichstark} \) of all propositional formulas with respect to semantic equivalence is bijective with the set of all \hyperref[def:boolean_function]{Boolean functions} of arbitrary arity.
Both are provably Boolean algebras, but with very different proofs --- the Lindenbaum-Tarski algebra is Boolean due to the purely syntactic \fullref{thm:intuitionistic_lindenbaum_tarski_algebra} and the set of all Boolean functions is a Boolean algebra due to the semantic \fullref{thm:functions_over_model_form_model}. This is another demonstration of \fullref{thm:classical_propositional_logic_is_sound_and_complete}.
See \fullref{rem:thm:intuitionistic_lindenbaum_tarski_algebra/syntactic_proof}.
\end{thmenum}
\end{theorem}
\begin{proof}
\SubProofOf{thm:lindenmaum_tarski_algebra_of_full_propositional_logic/equivalence_classes} Follows from the equivalences in \fullref{def:equivalence_relation}.
\SubProofOf{thm:lindenmaum_tarski_algebra_of_full_propositional_logic/bijection} Follows from the equivalences in \fullref{def:propositional_semantics/equivalence}.
\end{proof}
\begin{proposition}\label{thm:boolean_equivalences}
The following (and many more) are called \term{Boolean equivalences} because they are actually statements about our choice of \hyperref[def:standard_boolean_operators]{standard Boolean operators}. They are formulated here because the framework of propositional logic is more convenient for stating the equivalences. Note that most of these equivalences fail in \hyperref[def:intuitionistic_propositional_deductive_systems]{intuitionistic logic}.
For arbitrary propositional formulas \( \varphi \) and \( \psi \), the following semantic equivalences hold:
\begin{thmenum}
\thmitem{thm:boolean_equivalences/negation_bottom} \hyperref[def:propositional_language/negation]{Negation} can be expressed via the \hyperref[def:propositional_language/constants/falsum]{falsum}:
\begin{equation}\label{eq:thm:boolean_equivalences/negation_bottom}
\begin{split}
\mathllap{\neg \varphi} &\gleichstark \mathrlap{\varphi \rightarrow \bot}.
\end{split}
\end{equation}
\thmitem{thm:boolean_equivalences/double_negation} \hyperref[def:propositional_language/negation]{Negation} is an \hyperref[def:set_with_involution]{involution}:
\begin{equation}\label{eq:thm:boolean_equivalences/double_negation}
\begin{split}
\mathllap{\neg \neg \varphi} &\gleichstark \mathrlap{\varphi}.
\end{split}
\end{equation}
\thmitem{thm:boolean_equivalences/contrapositive} A \hyperref[def:material_implication]{material implication} is equivalent to its \hyperref[def:material_implication/contrapositive]{contrapositive}:
\begin{equation}\label{eq:thm:boolean_equivalences/contrapositive}
\begin{split}
\mathllap{\varphi \rightarrow \psi} &\gleichstark \mathrlap{\neg \psi \rightarrow \neg \varphi.}
\end{split}
\end{equation}
\thmitem{thm:boolean_equivalences/conditional_as_disjunction} A \hyperref[def:propositional_language/connectives/conditional]{conditional} is a \hyperref[def:propositional_language/connectives/disjunction]{disjunction} with the \hyperref[def:material_implication/antecedent]{antecedent} negated:
\begin{equation}\label{eq:thm:boolean_equivalences/conditional_as_disjunction}
\begin{split}
\mathllap{\varphi \rightarrow \psi} &\gleichstark \mathrlap{ \neg \varphi \vee \psi. }
\end{split}
\end{equation}
\thmitem{thm:boolean_equivalences/biconditional_via_conditionals} A \hyperref[def:propositional_language/connectives/biconditional]{biconditional} is a \hyperref[def:propositional_language/connectives/conjunction]{conjunction} of \hyperref[def:propositional_language/connectives]{conditionals}:
\begin{equation}\label{eq:thm:boolean_equivalences/biconditional_via_conditionals}
\begin{split}
\mathllap{\varphi \leftrightarrow \psi} &\gleichstark \mathrlap{(\varphi \rightarrow \psi) \wedge (\psi \rightarrow \varphi).}
\end{split}
\end{equation}
\thmitem{thm:boolean_equivalences/biconditional_as_conjunction} The \hyperref[def:propositional_language/connectives/biconditional]{biconditional} is a \hyperref[def:propositional_language/connectives/disjunction]{conjunction} of \hyperref[def:propositional_language/connectives/conjunction]{disjunctions}:
\begin{equation}\label{eq:thm:boolean_equivalences/biconditional_as_conjunction}
\begin{split}
\mathllap{\varphi \leftrightarrow \psi} &\gleichstark \mathrlap{(\neg \varphi \vee \psi) \wedge (\neg \varphi \vee \psi).}
\end{split}
\end{equation}
\thmitem{thm:boolean_equivalences/biconditional_as_disjunction} The \hyperref[def:propositional_language/connectives/biconditional]{biconditional} is a \hyperref[def:propositional_language/connectives/disjunction]{disjunction} of \hyperref[def:propositional_language/connectives/conjunction]{conjunctions}:
\begin{equation}\label{eq:thm:boolean_equivalences/biconditional_as_disjunction}
\begin{split}
\mathllap{\varphi \leftrightarrow \psi} &\gleichstark \mathrlap{(\varphi \wedge \psi) \vee (\neg \varphi \wedge \neg \psi).}
\end{split}
\end{equation}
\thmitem{thm:boolean_equivalences/biconditional_member_negation} A \hyperref[def:propositional_language/connectives/biconditional]{biconditional} is equivalent its termwise negation:
\begin{equation}\label{eq:thm:boolean_equivalences/biconditional_member_negation}
\begin{split}
\mathllap{\neg \varphi \leftrightarrow \neg \psi} &\gleichstark \mathrlap{\varphi \leftrightarrow \psi.}
\end{split}
\end{equation}
\thmitem{thm:boolean_equivalences/biconditional_negation} A negation of a \hyperref[def:propositional_language/connectives/biconditional]{biconditional} is again a biconditional with one of the terms negated:
\begin{equation}\label{eq:thm:boolean_equivalences/biconditional_negation}
\begin{split}
\mathllap{\neg \parens{\varphi \leftrightarrow \psi}}
&\gleichstark
\mathrlap{\neg \varphi \leftrightarrow \psi \gleichstark}
\\ &\gleichstark
\mathrlap{\varphi \leftrightarrow \neg \psi.}
\end{split}
\end{equation}
\end{thmenum}
\end{proposition}
\begin{proof}
The proofs follow directly from the table in \fullref{def:standard_boolean_operators}.
\end{proof}
\begin{definition}\label{def:propositional_substitution}
We sometimes want to substitute a propositional variable with another variable or even with a formula. This is akin to applying a \hyperref[def:boolean_function]{Boolean function} like \( x \vee y \) to different variables (e.g. to obtain \( x \vee x \)) or even concrete values (e.g. \( F \vee T \)), except that it is done on a purely syntactic level without involving any semantics involved.
It does not pose any technical difficulty to extend this definition beyond replacing a variable like it is usually done (e.g. \cite[def. 7.8]{OpenLogicFull}). Not only that, we can then use this mechanism to define complicated rewriting rules as in \fullref{alg:conjunctive_normal_form_reduction} and have semantic equivalence automatically follow from \fullref{thm:propositional_substitution_equivalence}.
\begin{thmenum}
\thmitem{def:propositional_substitution/single} We define the \term{substitution} of the propositional formula \( \theta \) with \( \chi \) in \( \varphi \) as
\begin{equation}\label{eq:def:propositional_substitution/single}
\varphi[\theta \mapsto \chi] \coloneqq \begin{cases}
\chi, &\varphi = \theta \\
\varphi, &\varphi \neq \theta \T{and} \varphi \in \set{ \top, \bot } \cup \boldop{Prop} \\
\neg \psi[\theta \mapsto \chi], &\varphi \neq \theta \T{and} \varphi = \neg \psi \\
\psi_1[\theta \mapsto \chi] \bincirc \psi_2[\theta \mapsto \chi], &\varphi \neq \theta \T{and} \varphi = \psi_1 \bincirc \psi_2, \circ \in \Sigma.
\end{cases}
\end{equation}
Note that it is not strictly necessary for \( \theta \) to be a subformula of \( \varphi \).
In the case where \( \theta \) is a single variable, if \( P \in \boldop{Var}(\varphi) \), then \( \varphi[P \mapsto \chi] \) is said to be an \term{instance} of \( \varphi \).
\thmitem{def:propositional_substitution/simultaneous} We will now define \term{simultaneous substitution} of \( \theta_1, \ldots, \theta_n \) with \( \chi_1, \ldots, \chi_n \). We wish to avoid the case where \( \theta_k \) is a subformula of \( \chi_{k-1} \) and it accidentally gets replaced during \( \varphi[\theta_{k-1} \mapsto \chi_{k-1}][\theta_k \mapsto \chi_k] \).
Define
\begin{equation*}
\cat{Bound} \coloneqq \boldop{Var}(\chi_1) \cup \ldots \cup \boldop{Var}(\chi_n).
\end{equation*}
and, for each variable \( P_k \) in \( \cat{Bound} \), pick a variable \( Q_k \) from \( \boldop{Prop} \setminus \boldop{Bound} \) (we implicitly assume the existence of enough variables in \( \boldop{Prop} \)). Let \( m \) be the \hyperref[def:cardinal]{cardinality} of \( \boldop{Bound} \). The simultaneous substitution can now be defined as
\begin{align*}
\varphi[\theta_1 \mapsto \chi_1, \ldots, \theta_n \mapsto \chi_n] \coloneqq \varphi
[\theta_1 \mapsto \chi_1[P_1 \mapsto Q_1, \ldots, P_m \mapsto Q_m]] \\
\vdots \hspace{3cm} \\
[\theta_n \mapsto \chi_n[P_1 \mapsto Q_1, \ldots, P_m \mapsto Q_m]] \\
[Q_1 \mapsto P_1, \ldots, Q_m \mapsto P_m].
\end{align*}
\end{thmenum}
\end{definition}
\begin{proposition}\label{thm:propositional_substitution_equivalence}
If \( \theta \) is a subformula of \( \varphi \) and if \( \theta \gleichstark \chi \), then
\begin{equation}\label{eq:thm:propositional_substitution_equivalence}
\varphi[\theta \mapsto \chi] \gleichstark \varphi.
\end{equation}
By induction, this also holds for \hyperref[def:propositional_substitution/simultaneous]{simultaneous substitution}.
\end{proposition}
\begin{proof}
We use structural induction on \( \varphi \):
\begin{itemize}
\item If \( \varphi = \theta \), then \( \varphi[\theta \mapsto \chi] = \chi \) and, by definition,
\begin{equation*}
\varphi = \theta \gleichstark \chi = \varphi[\theta \mapsto \chi].
\end{equation*}
\item If \( \varphi \neq \theta \) and \( \varphi \in \set{ \top, \bot } \cup \boldop{Prop} \), then \( \varphi[\theta \mapsto \chi] = \varphi \) and \eqref{eq:thm:propositional_substitution_equivalence} again holds trivially.
\item If \( \varphi \neq \theta \) and \( \varphi = \neg \chi \) and if the inductive hypothesis holds for \( \chi \), then \( \varphi[\theta \mapsto \chi] = \neg \psi[\theta \mapsto \chi] \). For any interpretation \( I \),
\begin{equation*}
\parens[\Big]{ \varphi[\theta \mapsto \chi] }\Bracks{I}
=
\overline{\parens[\Big]{ \psi[\theta \mapsto \chi] }\Bracks{I}}
\reloset {\T{ind.}} =
\overline{\psi\Bracks{I}}
=
\varphi\Bracks{I}.
\end{equation*}
Therefore, \eqref{eq:thm:propositional_substitution_equivalence} holds in this case.
\item If \( \varphi \neq \theta \) and \( \varphi = \psi_1 \bincirc \psi_2, \bincirc \in \Sigma \) and if the inductive hypothesis holds for both \( \psi_1 \) and \( \psi_2 \), then for any interpretation \( I \),
\begin{equation*}
\parens[\Big]{ \varphi[\theta \mapsto \chi] }\Bracks{I}
=
\parens[\Big]{ \psi_1[\theta \mapsto \chi] }\Bracks{I} \bincirc \parens[\Big]{ \psi_2[\theta \mapsto \chi] }\Bracks{I}
\reloset {\T{ind.}} =
\psi_1\Bracks{I} \bincirc \psi_2\Bracks{I}
=
\varphi\Bracks{I}.
\end{equation*}
Therefore, \eqref{eq:thm:propositional_substitution_equivalence} holds in this case also.
\end{itemize}
We have verified that \eqref{eq:thm:propositional_substitution_equivalence} holds in all cases.
\end{proof}
\begin{remark}\label{rem:smaller_propositional_language}
For \hyperref[def:propositional_semantics]{semantical} concepts, it is immaterial which element of an equivalence class we consider. \hyperref[def:boolean_closure]{Complete sets of Boolean operations} allow us to represent each formula using a strict subset of the \hyperref[def:propositional_language/constants]{propositional constants}, \hyperref[def:propositional_language/negation]{negation} and \hyperref[def:propositional_language/connectives]{connectives}. \Fullref{ex:posts_completeness_theorem} shows some concrete commonly used complete sets of Boolean operations. This is also the motivation for studying \hyperref[def:lindenbaum_tarski_algebra]{Lindenbaum-Tarski algebras}.
This is useful in
\begin{itemize}
\item Reduction to normal forms such as the \hyperref[def:conjunctive_disjunctive_normal_form]{conjunctive normal form} in \fullref{alg:conjunctive_normal_form_reduction}.
\item \hyperref[def:propositional_semantics/satisfiability]{Satisfiability} proofs that rely on \hyperref[rem:structural_recursion_and_induction]{structural induction} because it allows us to consider less cases in the induction.
\item Having fewer rules in \hyperref[alg:conjunctive_normal_form_reduction]{deductive systems}. For example, we may choose to add \eqref{eq:thm:minimal_propositional_negation_laws/pierce} to the axioms of the \hyperref[def:positive_implicational_deductive_system]{positive implicational derivation system} and due to \fullref{thm:minimal_propositional_negation_laws} this derivation system would be able to emulate the \hyperref[def:classical_propositional_deductive_systems]{classical derivation system}.
\end{itemize}
\end{remark}
\begin{definition}\label{def:conjunctive_disjunctive_normal_form}
We will now introduce the conjunctive and disjunctive normal forms.
\begin{thmenum}
\thmitem{def:conjunctive_disjunctive_normal_form/literal} A \term{literal} is either a propositional variable \( L = P \) or a negation \( L = \neg P \) of a propositional variable. These are called \term{positive} and \term{negative} literals, correspondingly.
\thmitem{def:conjunctive_disjunctive_normal_form/normal_form} A propositional formula \( \varphi \) is in \term{conjunctive normal form} (resp. \term{disjunctive normal form}) if \( \varphi \) is a finite conjunction of disjunctions (resp. finite disjunction of conjunctions) of literals. That is, if \( \varphi \) is in conjunctive normal form, it has the form
\begin{equation*}
(L_{1,1} \vee \ldots \vee L_{1,n_1}) \wedge \cdots \wedge (L_{k,1} \vee \ldots \vee L_{k,n_k}).
\end{equation*}
\thmitem{def:conjunctive_disjunctive_normal_form/conjunct_disjunct} A \term{disjunct} (resp. a \term{conjunct}) is a set of literals, the difference between the two being the context in which they are used. To each formula in conjunctive normal form there corresponds a set of disjuncts and to each formula in disjunctive normal form there corresponds a set of conjuncts.
\end{thmenum}
\end{definition}
\begin{algorithm}\label{alg:conjunctive_normal_form_reduction}
Let \( \varphi \) be any formula. We explicitly derive a formula in conjunctive normal form that is \hyperref[def:propositional_semantics/equivalence]{semantically equivalent} to \( \varphi \). In a software implementation, we can easily construct an efficient recursive procedure based on the following steps.
\begin{thmenum}
\thmitem{alg:conjunctive_normal_form_reduction/constants} Pick any variable \( P \) and substitute
\begin{align*}
\top \T{with} P \vee \neg P, && \bot \T{with} P \wedge \neg P
\end{align*}
to get rid of the \hyperref[def:propositional_language/constants]{propositional constants}.
\thmitem{alg:conjunctive_normal_form_reduction/iff} For any subformulas \( \psi \) and \( \theta \) of \( \varphi \), perform the substitution
\begin{equation*}
\psi \leftrightarrow \theta \T{with} (\psi \rightarrow \theta) \wedge (\theta \rightarrow \psi)
\end{equation*}
to get rid of \hyperref[def:propositional_language/connectives/biconditional]{biconditional connectives}. Semantic equivalence with \( \varphi \) is then justified by \fullref{thm:boolean_equivalences/biconditional_via_conditionals}.
\thmitem{alg:conjunctive_normal_form_reduction/implies} For any subformulas \( \psi \) and \( \theta \) of \( \varphi \), perform the substitution
\begin{equation*}
\psi \rightarrow \theta \T{with} \neg \psi \vee \theta.
\end{equation*}
to get rid of \hyperref[def:propositional_language/connectives/conditional]{conditional connectives}. Equivalence with \( \varphi \) is justified by \fullref{thm:boolean_equivalences/conditional_as_disjunction}.
\thmitem{alg:conjunctive_normal_form_reduction/de_morgan} For any subformulas \( \psi \) and \( \theta \) of \( \varphi \), use \fullref{thm:de_morgans_laws} to justify the substitution
\begin{align*}
\neg(\psi \vee \theta) \T{with} \neg \psi \wedge \neg \theta
&&
\neg(\psi \wedge \theta) \T{with} \neg \psi \vee \neg \theta.
\end{align*}
In order to ensure that \hyperref[def:propositional_language/negation]{negation} is only present before propositional variables, repeat \ref{alg:conjunctive_normal_form_reduction/de_morgan} until we reach a fixed point, i.e. until nothing is substituted anymore.
\thmitem{alg:conjunctive_normal_form_reduction/double_negation} For any variable \( P \) of \( \varphi \), use \fullref{eq:thm:boolean_equivalences/double_negation} to justify the substitution
\begin{equation*}
\neg \neg P \T{with} P.
\end{equation*}
\thmitem{alg:conjunctive_normal_form_reduction/distributivity} Finally, for any subformulas \( \psi \), \( \theta \) and \( \chi \) of \( \varphi \), use \hyperref[eq:def:semilattice/distributive_lattice/finite/join_over_meet]{distributivity} of \( \vee \) over \( \wedge \) to justify the substitution
\begin{equation*}
\psi \vee (\theta \wedge \chi) \T{with} (\psi \vee \theta) \wedge (\theta \vee \chi).
\end{equation*}
In order to ensure that conjunction is always one level higher than disjunction, repeat \ref{alg:conjunctive_normal_form_reduction/distributivity} until we reach a fixed point.
\end{thmenum}
The resulting formula is in conjunctive normal form. As a consequence of \fullref{thm:propositional_substitution_equivalence}, it is equivalent to \( \varphi \).
\end{algorithm}
| {
"alphanum_fraction": 0.7291269692,
"avg_line_length": 75.8879837067,
"ext": "tex",
"hexsha": "cf6187842cd94261ebe37648d42bceef430e0826",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d9bdfbab9f35095db2721f991a3418f58f997a56",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "v--/notebook",
"max_forks_repo_path": "src/propositional_logic.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d9bdfbab9f35095db2721f991a3418f58f997a56",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "v--/notebook",
"max_issues_repo_path": "src/propositional_logic.tex",
"max_line_length": 687,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d9bdfbab9f35095db2721f991a3418f58f997a56",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "v--/notebook",
"max_stars_repo_path": "src/propositional_logic.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 11018,
"size": 37261
} |
%% SECTION HEADER /////////////////////////////////////////////////////////////////////////////////////
\section{Subsection}
\label{sec53}
%% SECTION CONTENT ////////////////////////////////////////////////////////////////////////////////////
\lipsum[1]
%% SUBSECTION HEADER //////////////////////////////////////////////////////////////////////////////////
\subsection{Subsubsection}
\label{sec531}
\lipsum[1]
%% SUBSECTION HEADER //////////////////////////////////////////////////////////////////////////////////
\subsection{Subsubsection}
\label{sec532}
\lipsum[1]
%% SUBSECTION HEADER //////////////////////////////////////////////////////////////////////////////////
\subsection{Subsubsection}
\label{sec533}
\lipsum[1]
| {
"alphanum_fraction": 0.3165075034,
"avg_line_length": 28.1923076923,
"ext": "tex",
"hexsha": "69b6cd9ee85b658a641fb197abfd8bd9edc1e0f1",
"lang": "TeX",
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2020-09-22T10:10:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-09-11T05:12:18.000Z",
"max_forks_repo_head_hexsha": "9e8255d5406211b07253fca29788a3557860edc0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SeaShadow/LaTeX-AMC-PhD-Thesis-Template",
"max_forks_repo_path": "Chapters/Chapter5/sect53.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9e8255d5406211b07253fca29788a3557860edc0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SeaShadow/LaTeX-AMC-PhD-Thesis-Template",
"max_issues_repo_path": "Chapters/Chapter5/sect53.tex",
"max_line_length": 103,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "9e8255d5406211b07253fca29788a3557860edc0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SeaShadow/LaTeX-AMC-PhD-Thesis-Template",
"max_stars_repo_path": "Chapters/Chapter5/sect53.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-16T10:40:13.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-09-05T01:29:35.000Z",
"num_tokens": 102,
"size": 733
} |
\section{Applications}\label{sec:app}
\subsection{Voter Registration}\label{sec:voter}
Improving the privacy and integrity of the United States voter registration system was a primary motivation of the developed protocols. In the United States Electoral College, each state has the responsibility of maintaining their own list of registered citizens. A shortcoming of this distributed process is that without coordination between states it is possible for a voter to register in more than one state. If this person then went on to cast more than one vote the integrity of the system would be compromised. In the case of double registering, it is often a result of a person moving to a new state and failing to unregister from the old state. Alternatively, when a voter moves to a new state it may take them some time to register in the new state, and as such their vote may go uncast. The Pew Charitable Trust\cite{pew} reported 1 in 8 voter registration records in the United States contains a serious error while 1 in 4 eligible citizens remain unregistered. The goal in this application of our framework is to improve the accuracy of the voting registration data and help register eligible voters.
A naive solution to this problem is to construct a centralized database of all the registered voters and citizen records. It is then a relatively straightforward process to identify persons with inaccurate records, attempt to double register or are simply not register at all. However, the construction of such a centralized repository of information has long had strong opposition in the United States due to concerns of data privacy and excessive government overreach. As a compromise many states have volunteered to join the Electronic Registration Information Center (ERIC)\cite{eric} which is a non-profit organization with the mission of assisting states to improve the accuracy of America's voter rolls and increase access to voter registration for all eligible citizens. This organization acts as a semi-trusted third party which maintains a centralized database containing hashes of the relevant information, e.g. names, addresses, drivers license number and social security number.
\iffullversion
\input{appendix}
%In particular, instead of storing this sensitive information in plaintext, all records are randomized using two cryptographically strong salted hash functions. Roughly speaking, before this sensitive information is sent to ERIC, each state is provided with the first salt value $salt_1$ and updates each value $v$ as $v := H(salt_1 || v)$. This hashed data is then sent to ERIC where the data is hashed a second time by ERIC which possesses the other salt value. The desired comparisons can then be applied to the hashed data inside ERIC's secure data center. When compared with existing alternative, this approach provides a moderate degree of protection. In particular, so long as the salt values remain inaccessible by the adversary, deanatomized any given record is likely non-trivial. However, a long series of works, e.g. \cite{deanon0,deanon1,deanon2,deanon3,deanon4}, have shown that a significant amount of information can be extracted with sophisticated statistical techniques. Moreover, should the adversary possess the salt values a straightforward dictionary attack can be applied.
%
%We propose adding another layer of security with the deployment of our secure database join framework. In particular, two or more of the states and ERIC will participate in the MPC protocol. From here we consider two possible solutions. The first option is to maintain the existing repository but now have it secret shared between the computational parties. Alternatively, each state could be the long-term holder of their own data and the states perform all pairwise comparison amongst themselves. For reason of preferring the more distributed setting we further explore the pairwise comparison approach.
%
%The situation is further complicated by how this data is distributed within and between states. In the typical setting no single state organization has sufficient information to identify individuals which are incorrectly or double registered. For example, typical voter registration forms requires a name, home address and state ID/driver's license number. If two states compared this information there would be no reliable attribute for joining the two records. The name of the voter could be used but names are far from a unique identifier. The solution taken by ERIC is to first perform a join between a state's registered voters and their Department of Motor Vehicles (DMV) records, using the state ID/driver's license number as the join-key. Since the DMV typically possesses an individual's Social Security Number (SSN), this can now be used as a unique identifier across all states. However, due to regulations within some states this join is only allowed to be performed on the hashed data or, presumably, on secret shared data.
%
%In addition to identifying individuals that are double registered, the mission of ERIC is to generally improve the accuracy of all voter records. This includes identifying individuals that have moved and not yet registered in their new state or that have simply moved within a state and not updated their current address. In this case the joins between/within states should also include an indicator denoting that an individual has updated their address at a DMV which is different than the voter registration record. There are likely other scenarios which ERIC also identifies but we leave the exploration of them to future work.
%
%Given the building blocks of \sectionref{sec:construction} it is a relatively straightforward task to perform the required joins. First a state performs a left join between their DMV data and the voter registration data. Within this join the addresses in the inner join are compared. In the event of a discrepancy, the date of when these addresses were obtained can be compared to identify the most up to date address. Moreover, the agency with the older address can be notified and initial a procedure to determine which, if any, of the addresses should be updated.
%
%
%Once this join is performed, each state holds a secret shared table of all their citizens that possess a state ID and their current registration status. Each pair of states can then run an inner join protocol using the social security number as the key. There are several cases that a result record can be in. First it is possible for a person to have a DMV record in two states and be registered in neither. The identity of these persons should not be revealed as this does not effect the voting process. The next case is that a person is registered in both states. We wish to reveal this group to both states so that the appropriate action can be taken. The final case that we are interested in is when a person is registered in state \emph{A} and has a newer DMV address in state \emph{B}. In this case we want to reveal the identity of the person to the state that they are registered to. This state can then contact the person to inquire whether they wish to switch their registration to the new state.
%
%
%
%
%This approach has additional advantages over the hashing technique of ERIC. First, all of the highly sensitive information such as a persons address, state ID number and SSN can still be hashed before being added to the database\footnote{The hashing originally performed by ERIC can be replaced with the randomized encoding protocol.}. However, now that the data is secret shared less sensitive information such as dates need not be hashed. This allows for the more expressive query described above which uses a numerical comparison. To achieve the the same functionality using the current ERIC approach these dates would have to be stored in plaintext which leaks significant information. In addition, when the ERIC approach performs these comparison the truth value for each party of the predicate is revealed. Our approach reveals no information about any intermediate value.
\else
We propose adding another layer of security with the deployment of our secure database join framework. Within a single state, different agencies will first secret share their data to construct a join table containing the registration status of everyone within that state. This joined table can then be joined with the respective table from all of the other stated. In total, there would be 50 intra-state joins and then $50\times49$ inter-state joins.
We envision that the intra-state join will be perform with ERIC and the state agencies as the participating parties. The inter-state joins can then be performed by ERIC and one of the agencies from each state. This ensures that the data remains secret shared at all times. The data that each state requires can then be revealed at the end of the computation. For more details see \appendixref{sec:voterDetails}.
\fi
The average US state has an approximate population of 5 million with about 4 million of that being of voting age. For this set size, our protocol is capable of performing the specified query in 30 seconds and 6GB of total communication. If we consider running the same query where one of the states is California with a voting population of 30 million, our protocol can identify the relevant records in five minutes. For a more details see \sectionref{sec:eval}.
| {
"alphanum_fraction": 0.8128512353,
"avg_line_length": 248.1842105263,
"ext": "tex",
"hexsha": "3a586c02ccd03305f7f855d37d9e0b23c6117eb3",
"lang": "TeX",
"max_forks_count": 36,
"max_forks_repo_forks_event_max_datetime": "2022-01-14T11:57:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-09-05T08:35:09.000Z",
"max_forks_repo_head_hexsha": "99af31ccaef6cd2c22df8ef57d8b7a07d62c66cf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cyckun/aby3",
"max_forks_repo_path": "tex/voter.tex",
"max_issues_count": 33,
"max_issues_repo_head_hexsha": "99af31ccaef6cd2c22df8ef57d8b7a07d62c66cf",
"max_issues_repo_issues_event_max_datetime": "2022-01-23T12:41:22.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-01-21T16:47:09.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cyckun/aby3",
"max_issues_repo_path": "tex/voter.tex",
"max_line_length": 1115,
"max_stars_count": 121,
"max_stars_repo_head_hexsha": "99af31ccaef6cd2c22df8ef57d8b7a07d62c66cf",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cyckun/aby3",
"max_stars_repo_path": "tex/voter.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T12:53:17.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-06-25T01:35:50.000Z",
"num_tokens": 1844,
"size": 9431
} |
\documentclass[12pt,letterpaper]{article}
\usepackage{fullpage}
%\usepackage[top=2cm, bottom=4.5cm, left=2.5cm, right=2.5cm]{geometry}
\usepackage{amsmath,amsthm,amsfonts,amssymb,amscd}
\usepackage{lastpage}
\usepackage{enumerate}
\usepackage{fancyhdr}
\usepackage{mathrsfs}
\usepackage{xcolor}
\usepackage{graphicx}
\usepackage{listings}
\usepackage[hidelinks]{hyperref}
\usepackage{verbatim}
\usepackage{float}
\begin{document}\sloppy
\vspace*{\fill}
\begin{center}\Large%\bfseries\itshape
Perspicasso\\
\end{center}
\vspace*{\fill}
\newpage
Technique for visualizing neural network activations\cite{samek2015evaluating}
\section*{Aug. 17}
As noted in the weekly progress document, there were four material tasks to complete
\subsection*{Image frame size function}
The largest_frame_size() function in src/functions.py iterates through the image files and determines the largest height/width dimensions. As Suleman noted in the chat, the largest aggregate frame was (1600, 1600) pixels.
\subsection*{Greyscale -> RGB transform comparison }
My implementation of Greyscale to RGB is a simple copy of the [0-255] pixel values across all three RGB channels. This is recommended by Ates Goral on SO (https://stackoverflow.com/questions/835753/convert-grayscale-value-to-rgb-representation).
As it turns out, this method is also (implicitly) recommended by the pytorch devs (https://discuss.pytorch.org/t/grayscale-to-rgb-transform/18315).
\subsection*{Performance vs. Num_epochs vs. Frame_size}
\newpage
\bibliography{perspicasso}
\bibliographystyle{IEEEtran}
\end{document}
| {
"alphanum_fraction": 0.7939508507,
"avg_line_length": 31.74,
"ext": "tex",
"hexsha": "884de9c8fe5cccec8b2ce773d183ae0003ee6c18",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "90d7b7afc34aa062aad23dd23813284f66bf1f4d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "JanSKowalski/ese440-ese441",
"max_forks_repo_path": "doc/perspicasso.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "90d7b7afc34aa062aad23dd23813284f66bf1f4d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JanSKowalski/ese440-ese441",
"max_issues_repo_path": "doc/perspicasso.tex",
"max_line_length": 245,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "90d7b7afc34aa062aad23dd23813284f66bf1f4d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "JanSKowalski/ese440-ese441",
"max_stars_repo_path": "doc/perspicasso.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 437,
"size": 1587
} |
\documentclass[9pt]{beamer}
% \documentclass[9pt,handout]{beamer}
\usepackage{ucs}
\usepackage[utf8x]{inputenc}
% \usepackage{beamerthemeplain}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage[english]{babel}
\usepackage{fontenc}
% \usepackage{verbatim}
\usepackage{graphics}
\usepackage{textcomp}
\usepackage[absolute,overlay]{textpos}
\usepackage{wasysym}
\usepackage{slashed}
\usepackage{array}
\usetheme{CNRScolors}
\input{../newcommands.tex}
\input{../custom-definitions.tex}
\graphicspath{ {../figures/}{./} }
\setbeamertemplate{navigation symbols}{}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% \setbeameroption{show notes} % un-comment to see the notes
% \setbeamertemplate{note page}[plain]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newif\ifmynote
% \mynotetrue
\mynotefalse
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newif\ifmyhide
% \myhidetrue
\myhidefalse
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand\mynote[1]{%
\ifmynote \textbf{#1} \else \fi
}
\newcommand\myhide[1]{%
\ifmyhide \vspace{15pt} \begin{center} \myexample{(blackboard)}\end{center} \vspace{15pt} \else #1 \fi
}
\newcolumntype{x}[1]{%
>{\centering\hspace{0pt}}p{#1}}%
\newcommand{\tn}{\tabularnewline}
\date[Stat2]{Sept. 27, 2018}
\title{Methods of statistical analysis and simulation}
\subtitle{2. Parameter estimation}
\author[E. Chapon]{Émilien Chapon}
% \institute[(CERN)]{CERN}
% \logo{\includegraphics[height=0.6cm]{../../CMS-Color-Label.pdf}\hspace{1.05\textwidth}\includegraphics[height=0.6cm]
% {../../LogoBadge.pdf} }
\begin{document}
{
\setbeamertemplate{footline}{}
\setbeamertemplate{headline}{}
% \logo{\includegraphics[height=1.2cm]{../../CMS-Color-Label.pdf}
% \hspace{0.94\textwidth}\includegraphics[height=1.2cm]{../../LogoBadge.pdf}}
\begin{frame}
\maketitle
% \setcounter{framenumber}{0}
\end{frame}
}
% CONTENU
% point estimation... cf cours ED, compléter avec le F. James
\begin{frame}
\frametitle{Outline}
\tableofcontents
\end{frame}
\section{Information}
\begin{frame}
\frametitle{The likelihood function}
We discuss a random variable $X$, with pdf $f(X|\theta)$, where $\theta$ is a real parameter (or a set of $k$ real parameters).
The set of allowed values of $X$ is denoted $\Omega_\theta$.
\myhide{
We consider $N$ independent observations of $X$: $X_1,\dots,X_N$. The joint pdf is, by independence,
$$P(\vec{X}|\theta) = P(X1,\dots,X_N)|\theta) = \prod_{i=1}^N f(X_i|\theta)$$
\begin{block}{Likelihood function}
The likelihood function is a function of $\theta$, given the observed data $X^0$:
$$\mathcal{L}(\theta) = P(X^0|\theta)$$
\end{block}
}
\end{frame}
\begin{frame}
\frametitle{Notes on the likelihood function}
\begin{itemize}
\item It is tempting to consider the area under $\mathcal{L}$, but $\mathcal{L}(\theta)$ is not a probability distribution function in $\theta$: \alert{the area under $\mathcal{L}$ is meaningless}.
\item We will see that likelihood ratios are often used.
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Likelihood function: example}
\begin{itemize}
\item Poisson probability $P(n|\mu) = \mu^n \frac{e^{-\mu}}{n!}$
\item Suppose $n=3$ is observed. We get the likelihood function:
$$\mathcal{L}(\mu) = \mu^3 \frac{e^{-\mu}}{3!}$$
\end{itemize}
\begin{center}
\includegraphics[width=0.6\textwidth]{likelihood_Poisson.png}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Statistic}
\begin{block}{Definition}
Suppose a new random variable: $T = T(X_1,\dots,X_N)$. Any such function $T$ is called a \textbf{statistic}.
\end{block}
Example: sample mean $\bar{X}$.
\vspace{10pt}
NB: careful not to confuse this \textbf{statistic} with \textbf{statistics} (the field of mathematics we are discussing) or \textbf{statistics} (physicist's jargon as a substitute for
``data'' or ``amount of data''. Better avoid the latter usage when writing papers!
\end{frame}
\begin{frame}
\frametitle{Information of R.A. Fisher}
\begin{block}{Definition}
\myhide{If $\Omega_\theta$ is independent of $\theta$, and if $\mathcal{L}(X|\theta)$ is regular enough to allow the operators $\partial^2/\partial\theta^2$ and $\int \dd X$ to comute, then
\begin{eqnarray}
I_X(\theta) & = & E \left[ \left( \frac{\partial \ln \mathcal{L}(X|\theta)}{\partial \theta} \right)^2 \right] \nonumber \\
& = & \int_{\Omega_\theta} \left( \frac{\partial \ln \mathcal{L}(X|\theta)}{\partial \theta} \right)^2 \mathcal{L}(X|\theta) \dd X \nonumber \\
& = & - E \left[ \frac{\partial^2 \ln \mathcal{L}(X|\theta)}{\partial \theta^2} \right] \nonumber
\end{eqnarray}
}
\end{block}
NB: this is an additive property (the information of $N$ observations is $N$ times the information of 1 observation).
\end{frame}
\begin{frame}
\frametitle{Sufficient statistics}
\begin{block}{Definition}
A statistic is said to be \textbf{sufficient} if $f(\vec{X}|T)$ is independent of $\theta$.
\end{block}
Properties:
\begin{itemize}
\item<2-> If $T$ is a sufficient statistic for $\theta$, then any strictly monotonic function of $T$ is also a sufficient statistic for $\theta$.
\item<3> $T(\vec{X})$ is a sufficient statistic for $\theta$ iff the likelihood factorises as
$$\mathcal{L}(\vec{X}|\theta) = g(T,\theta) h(\vec{X}),$$
where:
\begin{enumerate}
\item $h(\vec{X})$ does not depend on $\theta$
\item $g(T,\theta) \propto A(T|\theta)$, the conditional pdf for $T$ given $\theta$.
\end{enumerate}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Darmois theorem}
This theorem proves that only a very restricted class of probability density functions admits a number of sufficient statistics independent of the number of observations.
\begin{itemize}
\item<2-> Whatever $\Omega_\theta$, if there exists a number $N>1$ such that the set $X1,\dots,X_N$ admits a sufficient statistic for $\theta$, then the pdf is of the ``exponential form''
$$f(X|\theta) = \exp [ \alpha(X)a(\theta) + \beta(X) + c(\theta)]$$
\item<3> Inversely, $(X1,\dots,X_N)$ admits a sufficient statistic for all $N>1$ (but only if $\Omega_\theta$ does not depend on $\theta$), if $f(X|\theta)$ has the exponential form,
and if the mapping $(X_1,\dots,X_N) \Rightarrow (R,X_2,\dots,X_N)$, with
$$R = \sum_{i=1}^N \alpha(X_i),$$
is one-to-one and continuously differentiable for all $X$. $R$ is sufficient for $\theta$, as well as any monotonic function of $R$.
\end{itemize}
\end{frame}
\section{Parameter estimation on unbinned data}
\begin{frame}
\frametitle{Outline}
\tableofcontents[current]
\end{frame}
\begin{frame}
\frametitle{Parameter estimation}
Let $X$ be a random variable of pdf $f(x;\theta_0)$, with $\theta_0$ unknown. We draw $N$ independent trials of $X$, $\{x_1,\dots,x_N\}$.
An \textbf{estimator} is a statistic $t_N(x_1,\dots,x_N)$ that can be used to estimate $\theta_0$. It can have the following properties:
\mynote{Écrire au tableau le nom des différentes prioriétés, les afficher 1 par 1 au tableau}
\begin{description}
\item<2->[unbiased]: if $\langle t_N \rangle = \theta_0$ (otherwise the bias is $\langle t_N \rangle - \theta_0 = b_N$)
\item<3->[convergent] or consistent: e.g. consistency in probability, $\forall \epsilon>0, \forall \eta>0, \exists N_0 / \forall N>N_0, P(|t_N - \theta_0|>\epsilon)<\eta$
\begin{itemize}
\item NB: The law of large numbers is equivalent to the statement that the sample mean is a consistent estimator of the parent mean.
\end{itemize}
\item<4->[efficient]: if the variance of the estimator $V(t_N) \xrightarrow[N\to\infty]{} \text{minimum variance bound}$ (this property can be only asymptotic)
\item<5->[optimal]: if $t_N$ minimises the Mean Square Error (MSE): $\text{MSE}(t_N) = V(t_N) + b_N$
\begin{itemize}
\item NB: unbiasedness and efficiency imply optimality
\end{itemize}
\item<6>[robust]: if it does not depend on a hypothesis on the pdf
\end{description}
\end{frame}
\begin{frame}
\frametitle{Illustration}
pdfs for $t_N$ in different cases
\includegraphics[width=\textwidth]{estimators.jpg}
\end{frame}
\begin{frame}
\frametitle{Minimum variance: Cramér-Rao inequality}
Let $\vec{X}$ be observations from a distribution with pdf $f(\vec{X}|\theta)$, the likelihood is $\mathcal{L}_{\vec{X}} = L(\vec{X}|\theta)$.
\myhide{
\begin{block}{Cramér-Rao inequality}
If the range of $\vec{X}$ does not depend on $\theta$, and if $\mathcal{L}_{\vec{X}}$ is sufficiently regular that differentiation with respect fo $\theta$ and integration over
$\theta$ commute, then:
$$V(\hat{\theta}) \geq \frac{[1+(\dd b / \dd \theta)]^2}{I_{\hat{\theta}}}
\geq \frac{[1+(\dd b / \dd \theta)]^2}{I_{\vec{X}}}
= \frac{\left( \frac{\dd \tau(\theta)}{\dd \theta} \right)^2}{E\left[ \left( \frac{\partial \ln \mathcal{L}_{\vec{X}}}{\partial \theta} \right)^2 \right]}$$
where $I_{\vec{X}}$ is the Fisher information, $I_{\vec{X}}(\theta) = E\left[ \left( \frac{\partial \ln \mathcal{L}(\theta|\vec{X})}{\partial \theta} \right)^2 \right]$, and
$\tau(\theta) \equiv E(\hat{\theta}) = \theta + b(\theta)$
\end{block}}
\end{frame}
\begin{frame}
\frametitle{Efficiency and minimum variance}
\begin{block}{First part of the inequality: minimum variance}
$V(\hat{\theta}) = \frac{[1+(\dd b / \dd \theta)]^2}{I_{\hat{\theta}}} $ iff the sampling distribution of $\hat{\theta}$ is of the exponential form:
$$\mathcal{L}_{\hat{\theta}} = \exp [a(\theta)\hat{\theta} + \beta(\hat{\theta}) + c(\theta)]$$
\end{block}
\uncover<2>{
\begin{block}{Second part of the inequality: minimum bound variance (efficient estimator)}
$V(\hat{\theta}) = \frac{[1+(\dd b / \dd \theta)]^2}{I_{\vec{X}}} $ iff $ I_{\hat{\theta}} = I_{\vec{X}} $,
ie iff $\hat{\theta}$ is a sufficient statistic for $\theta$,
ie iff $f(\vec{X}|\theta)$ is of the exponential form (Darmois' theorem).
\end{block}}
\end{frame}
\begin{frame}
\frametitle{Usual methods of constructing consistent estimators}
We will see:
\begin{itemize}
\item the moments method
\item the maximum likelihood method
\item the linear least squares method
\end{itemize}
\uncover<2>{NB: the last two are \textbf{implicitly defined estimators}, defined through an equation of the type $\xi(\hat{\theta})=0$}
\end{frame}
\subsection{Moments method}
\begin{frame}
\frametitle{The moments method}
We can use the law of large numbers:
$$\frac{1}{N} \sum_{i=1}^N a(X_i) \xrightarrow[N\to\infty]{} E[a(X)] = \int a(X)f(X,\theta_0)\dd X$$
Let $a(X)$ such that $E[a(X)] = \int a(X) f(X;\theta) \dd X = h(\theta)$ where $h$ is known.
If $h$ is invertible, we can find the true value of $\theta$:
$\theta_0 = h^{-1}(E[a]) = h^{-1}\left(\int a(X) f(X;\theta) \dd X\right)$
\begin{block}{}
The estimator is then:
$$\hat{\theta} = h^{-1} \left( \frac{1}{N} \sum_{i=1}^N a(x_i) \right)$$
\end{block}
NB: $\hat{\theta}$ does not directly depend on $f$, only on the $x_i$: this is a \textbf{robust estimator}.
\end{frame}
\begin{frame}
\frametitle{The moments method: application}
\myhide{
\begin{block}{1D case ($\theta \in \mathbb{R}$)}
We take simply $a(X) = X$. Then $h(\theta_0) = \bar{X} = \mu$: it is the sample mean.
\end{block}
\begin{block}{ND case: $\vec{\theta} = (\theta_1,\dots,\theta_K)$}
We take $a_j(X) = X^j$. Then $h_j(\vec{\theta}) = \mu_j(\vec{\theta})$: $j$-ith moment of $f(X;\vec{\theta})$
\end{block}
}
\end{frame}
\subsection{Maximum likelihood method}
\begin{frame}
\frametitle{The maximum likelihood method (ML)}
\myhide{
In general the logarithm of $\mathcal{L}$ is used: $\ln \mathcal{L}\left(\vec{X};\theta\right) = \sum_{i=1}^N \ln f(X_i;\theta)$
\begin{block}{Maximum likelihood estimator}
$$\left. \frac{\partial (\ln \mathcal{L} \left(\vec{X};\theta\right)}{\partial \theta}\right|_{\hat{\theta}_\text{ML}} = \left.\frac{\partial}{\partial \theta} \left( \sum_{i=1}^N \ln f(X_i;\theta) \right)\right|_{\hat{\theta}_\text{ML}} = 0$$
$\hat{\theta}_\text{ML}$ is the maximum likelihood estimator of $\theta$.
\end{block}}
\uncover<2>{Note: numerical methods are often designed to look for a minimum rather than a maximum. $-2 \ln \mathcal{L}$ is more commonly used.}
\end{frame}
\begin{frame}
\frametitle{The maximum likelihood estimator (MLE)}
This estimator is
\begin{itemize}
\item<1-> \textbf{asymptotically efficient}
\item<2-> \textbf{biased} (except when the likelihood is of the exponential form)
\item<3-> \textbf{non optimal} (except when the likelihood is of the exponential form)
\item<4-> \textbf{convergent}
\item<5-> \textbf{invariant}: the ML estimate $\hat{\tau}$ of a function $\tau(\theta)$ is $\hat{\tau} = \tau(\hat{\theta})$
\begin{itemize}
\item However other properties of the MLE (e.g. the bias) are not invariant under change of parameter.
\end{itemize}
\item<6-> \textbf{not robust}: it requires to know the form of the PDF!
\end{itemize}
\uncover<7->{NB: invariance is an important and convenient property!}
\end{frame}
\begin{frame}
\frametitle{Variance of the MLE}
Because of asymptotic minimum variance bound:
$$V(\hat{\theta}_{ML}) \xrightarrow[N\to\infty]{} \frac{1}{N} \left[ \left.-E\left(\frac{\partial^2\ln f(x;\theta)}{\partial \theta^2}\right)\right|_{\theta=\theta_0} \right]^{-1} \approx \frac{1}{D_2(\theta=\hat{\theta}_{ML})}$$
\vspace{20pt}
For finite samples, this can result in a misestimation of the variances. In the large sample limit (or in a linear model with Gaussian data), $\mathcal{L}$ is Gaussian and $\ln\mathcal{L}$ is (hyper)parabolic. Then contours with $s$ times the standarad deviations $\sigma_i$ can be found from the (hyper)surface defined by $\theta$ such that:
$$\ln\mathcal{L}(\theta) = \ln\mathcal{L}(\hat{\theta}_\text{ML}) - s^2/2$$
\uncover<2>{NB: $\ln(\mathcal{L}(\theta))$ can always be made parabolic through a change of variable, without changing the MLE, thanks to invariance.}
\end{frame}
\begin{frame}
\frametitle{MLE: illustration for Gaussian data (unknown $\mu$, known $\sigma$)}
% Plot $-\ln\mathcal{L}$, analytically, in 2 cases: a) Gaus, b) Poisson
$$\mathcal{L}(\mu) = \frac{1}{\sqrt{2\pi}\sigma} e^{-\frac{(x-\mu)^2}{2\sigma^2}}$$
$$-2\ln(\mathcal{L}(\mu)) = \frac{(x-\mu)^2}{\sigma^2} + \text{constant}$$
\mynote{Jupyter here}
\end{frame}
\begin{frame}
\frametitle{MLE: illustration for Poisson data}
\mynote{Exercise}
$$\mathcal{L}(\mu) = \mu^N \frac{e^{-\mu}}{N!}$$
$$-2\ln(\mathcal{L}(\mu)) = -2 \times \left(N \times \ln \mu - \mu \right) + \text{constant} $$
\begin{exampleblock}{}
\begin{itemize}
\item What is the MLE?
\item What is the variance of the MLE? How does it compare to the minimum variance bound?
% \item Compare to the value of $\mu$ such that $$-2 \times \ln\mathcal{L}(\theta) = -2\times\ln\mathcal{L}(\hat{\theta}_\text{ML}) +1$$
\end{itemize}
\end{exampleblock}
\mynote{Jupyter here, + solving in the blackboard. $I_N(\mu) = E(-\frac{\partial^2}{\partial\mu^2} \ln \mathcal{L}(\mu|N)) = E(N/\mu^2) = 1/\mu$}
\end{frame}
\begin{frame}
\frametitle{MLE: likelihood scans}
From the CMS Higgs boson mass measurement (\href{http://cms-results.web.cern.ch/cms-results/public-results/publications/HIG-14-009/index.html}{EPJC 75 (2015) 212}):
\begin{center}
\includegraphics[width=0.7\textwidth]{CMS-HIG-14-009_Figure_002-a}
\end{center}
\end{frame}
\begin{frame}
\frametitle{(Academic) example of a poor MLE}
\mynote{Exercise}
\begin{exampleblock}{}
A random variable $X$ is uniformly distributed on the interval $[0,\theta]$. $N$ indpendent trials $\{x_i\}$ are drawn. What is the MLE? Can you think of a better estimate?
\end{exampleblock}
\uncover<2->{
\begin{itemize}
\item The likelihood function is $\mathcal{L} = \prod_{i=1}^N \theta^{-1} = \theta^{-N}$ and the MLE is $\hat{\theta} = \max\{X_i\}$.
\item<3-> The MLE is biased (always too small by definition)... intuitively $\hat{\theta}_{CS} = \max\{X_i\} + \max\{X_i\}/N$ is a better estimate.
\end{itemize}
}
\end{frame}
\begin{frame}
\frametitle{(Academic) example of a poor MLE}
\begin{center}
\includegraphics[width=0.7\textwidth]{academic_MLE.jpg}
\end{center}
\end{frame}
\subsection{Least squares method}
\begin{frame}
\frametitle{Outline}
\tableofcontents[current]
\end{frame}
\begin{frame}
\frametitle{Least squares method (aka $\chi^2$ estimator)}
We consider $N$ observations $\vec{x}$
$E(x_i;\vec{\theta})$ is a and $V_{ij}$ ($i,j = 1\dots N$) are \textbf{known functions} of $\vec{\theta}$
\myhide{
\begin{block}{Least squares estimator}
The estimator is the value $\vec{\theta}$ such that $Q$ is minimum:
$$Q = \left[ \vec{X} - E(\vec{X};\vec{\theta}\right]^\intercal V^{-1} (\vec{\theta}) \left[ \vec{X} - E(\vec{X};\vec{\theta}) \right]$$
$$Q = \sum_{i=1}^N \sum_{j=1}^N \left[ X_i - E(X_i;\vec{\theta})\right] V^{-1}_{ij} \left[ X_j - E(X_j;\vec{\theta})\right]$$
\end{block}
}
\uncover<2>{
This estimator is:
\begin{itemize}
\item \textbf{consistent}
\item (generally) \textbf{biased}
\item \textbf{non-optimal}
\end{itemize}
}
\end{frame}
\begin{frame}
\frametitle{Gaussian case}
In the Gaussian case, the least squares and maximum likelihood methods coincide.
Assuming $N$ independent measurements $y_i$ at known points $x_i$, Gaussian distributed with mean
$\mu(x_i;\theta)$ and known variance $\sigma_i^2$:
$$\chi^2(\theta) = -2\ln\mathcal{L}(\theta) + \text{constant} = \sum_{i=1}^N \frac{\left(X_i - \mu(\theta)\right)^2}{\sigma_i^2}$$
\end{frame}
\begin{frame}
\frametitle{$\chi^2$ estimator: uncorrelated case}
\myhide{
Uncorrelated case: $V_{ij} = 0 $ for $i \neq j$
$$Q = \sum_{i=1}^N \frac{\left(x_i - E(x_i; \vec{\theta})\right)^2}{\sigma_i^2(\vec{\theta})}$$}
\end{frame}
\begin{frame}
\frametitle{Variance of the $\chi^2$ estimator}
\myhide{
If $\theta \in \mathbb{R}$:
$$V(\hat{\theta}_{LS}) \xrightarrow[N\to\infty]{} 2\left(\left.\frac{\partial^2 Q}{\partial\theta^2}\right|_{\theta=\theta_0}\right)^{-1} \approx \frac{2}{D_2(\theta=\hat{\theta}_{LS})}$$
}
\end{frame}
\begin{frame}
\frametitle{Specific cases of the $\chi^2$ estimator}
\begin{block}{Linear case}
If \structure{$\sigma_i$ are independent of $\theta$, and $E(x_k;\theta)$ linear function of $\theta$}: $Q$ is \textbf{optimal} and \textbf{convergent}.
\end{block}
\uncover<2->{
\begin{block}{Gaussian case}
\begin{itemize}
\item If \structure{the $x_i$ follow a normal law $G(X_i;\mu_i,\sigma_i)$}: $Q$ follows a $\chi^2$ law, $\chi^2(Q;N)$:
$$\chi^2(\vec{\theta}) = \sum_{i=1}^N \frac{\left( X_i - \mu_i (\vec{\theta})\right)^2}{\sigma_i^2 (\vec{\theta})}$$
\item<3>If in addition \structure{the model is linear ($\sigma_i$ independent of $\theta$)}: $\chi^2_\text{min} = \chi^2 (\chi^2_\text{min};N-r)$ (with $r$ the dimension of $\vec{\theta}$), and $\vec{\theta}_{LS}$ follows a normal law of dimension $r$ with $\langle \hat{\theta}_{LS}\rangle = \vec{\theta}_0$, $V = 2 D_2^{-1}$
$N-r$ is \textbf{the number of degrees of freedom}.
\end{itemize}
\end{block}}
\end{frame}
\section{Parameter estimation with histograms}
\begin{frame}
\frametitle{Outline}
\tableofcontents[current]
\end{frame}
\begin{frame}
\frametitle{Histograms}
Let's assume an histogram with $N$ uncorrelated bins (the total number of events is not fixed): $d_i$ events in bin $i$, with $i = 1 \dots N$. The $d_i$ follow Poisson laws: $E(d_i;\theta) = f_i(\theta)$,
$\sigma^2(d_i;\theta) = f_i(\theta)$.
\vspace{10pt}
\begin{center}
\includegraphics[width=0.8\textwidth]{histo}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Usual methods for fitting histograms}
\myhide{
\begin{block}{Minimum $\chi^2$ method (expected uncertainties)}
\vspace{-7pt}
$$Q_P = \sum_{i=1}^N = \frac{\left(d_i - f_i\right)^2}{f_i^2}$$
\vspace{-7pt}
\end{block}
\begin{block}{Modified minimum $\chi^2$ method (observed uncertainties)}
\vspace{-7pt}
$$Q_N = \sum_{i=1}^N = \frac{\left(d_i - f_i\right)^2}{d_i^2}$$
\vspace{-7pt}
\end{block}
\begin{block}{Binned likelihood (multinomial data)}
\vspace{-7pt}
$$\ln \lambda = - \sum_{i=1}^N d_i \ln (d_i/f_i) = \sum_{i=1}^N d_i \ln (f_i) + \text{constant}$$
\vspace{-7pt}
\end{block}
\begin{block}{Binned likelihood (Poisson data)}
\vspace{-7pt}
$$\ln \lambda = - \sum_{i=1}^N f_i - d_i + d_i \ln (d_i/f_i) = - \sum_{i=1}^N f_i - d_i \ln (f_i) + \text{constant}$$
\vspace{-7pt}
\end{block}}
\end{frame}
\begin{frame}
\frametitle{}
All methods are asymptotically equivalent. The binned likelihood method converges faster (and the modified $\chi^2$ is the slowest), and is less sensitive to empty bins.
\end{frame}
\section{Some basic estimators}
\begin{frame}
\frametitle{Outline}
\tableofcontents[current]
\end{frame}
\begin{frame}
\frametitle{Sample mean}
\mynote{Écrire d'abord au tableau}
\uncover<2>{
$$\bar{X} = \sum_{i=1}^N \frac{x_i}{N} = \bar{\mu}$$
This estimator is \textbf{unbiased}, thanks to the central limit theorem.
Its variance is:
$$V(\hat{\mu}) = \frac{\sigma^2}{N},\qquad\text{i.e.}\quad \sigma(\hat{\mu}) = \frac{\mu}{\sqrt{N}}$$
The sample mean is an \textbf{efficient estimator} of the mean of a Gaussian, but not in the general case.
}
\end{frame}
\begin{frame}
\frametitle{Variance estimator: known mean}
\mynote{Écrire d'abord au tableau}
\uncover<2>{
$$\hat{V}_\mu = \frac{1}{N} \sum_{i=1}^N (x_i-\mu)^2$$
This estimator is \textbf{consistent and unbiased}: $\langle \hat{V}_\mu \rangle = \frac{N\langle (x-\mu)^2 \rangle}{N} = V$
}
\end{frame}
\begin{frame}
\frametitle{Variance estimator: unknown mean}
\myhide{
Using $\hat{\mu} = \bar{X}$
$$\hat{V}_b = \frac{1}{N} \sum_{i=1}^N (x_i-\bar{X})^2 = \frac{1}{N} \sum_{i=1}^N (x_i^2 - \bar{X}^2)$$
\begin{eqnarray}
\langle \hat{V}_b \rangle & = & \frac{N \langle X^2 - \bar{X}^2 \rangle}{N} = \langle X^2 \rangle - \langle \bar{X} \rangle^2 \nonumber \\
& = & \langle X^2 \rangle - \langle X \rangle^2 - \left( \langle \bar{X}^2 \rangle - \langle \bar{X} \rangle ^2 \right) \nonumber \\
& = & V(X) - V(\bar{X}) \nonumber \\
& = & V(X) - \frac{V(X)}{N} \nonumber \\
& = & \left( 1 - \frac{1}{N} \right) V(X)\quad \neq V(X) \nonumber
\end{eqnarray}
This estimator is biased! $\rightarrow$ \textbf{Bessel correction}
}
\end{frame}
\begin{frame}
\frametitle{Variance estimator: unknown mean}
\mynote{Écrire d'abord au tableau}
\begin{block}{}
$$\hat{V} = \frac{1}{N-1} \sum_{i=1}^N (x_i - \bar{X})^2$$
\end{block}
$$V(\hat{V}) = \frac{2V}{N}$$
\end{frame}
\section{Point estimation in practice}
\begin{frame}
\frametitle{Outline}
\tableofcontents[current]
\end{frame}
\begin{frame}
\frametitle{Choice of estimator}
\structure{How to choose an estimator?} It should have the following properties:
\begin{itemize}
\item<2-> Consistency and unbiasedness
\item<3-> Minimum loss of information
\item<4-> Minimum variance (efficient estimator)
\item<5-> Robustness
\item<6-> Simplicity (e.g. if possible normally distributed, etc.)
\item<7-> Minimum computer time
\item<8-> Minimum loss of physicist's time
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Monte-Carlo (``toy'') studies}
\begin{block}{}
Monte-Carlo studies: directly related to the frequentist paradigm (simulate experiment many times to estimate pdf)
\end{block}
It is possible to write simple, short, fast Monte Carlo programs
that generate data for fitting. Can then look at fit values,
uncertainties, and pulls. These are often called “toy” Monte
Carlos to differentiate them from complicated event and detector
simulation programs.
\begin{itemize}
\item Tests likelihood function.
\item Tests for bias.
\item Tests that uncertainty from fit is correct.
\end{itemize}
\uncover<2>{
This does NOT test the correctness of the model of the data. For
example, if you think that some data is
Gaussian
distributed, but
it is really
Lorentzian, then the simple Monte Carlo test will not reveal this.}
\end{frame}
% \begin{frame}
% \frametitle{Simple Monte Carlo}
%
% \begin{exampleblock}{Exercise}
% \begin{itemize}
% \item Generate exponential ($\tau_0=0.5$ and $N=1000$)
% \item Find MLE
% \item Repeat many times (e.g. 1000 times)
% \item Plot $\hat{\tau}$%, $\sigma(\hat{\tau})$, pulls ($= (\hat{\tau}-\tau_0)/\sigma(\hat{\tau})$) in histograms
% \item Repeat for other estimators, especially binned
% \end{itemize}
% \end{exampleblock}
%
% \end{frame}
% \subsection{More realistic cases}
% cf Fred James + PDG stat review
\begin{frame}
\frametitle{Extended likelihood}
In many cases the number of events $n$ is not fixed: this dependence should be included in the likelihood: this is called the extended likelihood.
\myhide{$$\mathcal{L}(\vec{\theta}) = \structure{\frac{\mu^n}{n!}e^{-\mu}} \prod_{i=1}^{n} f(x_i | \vec{\theta})$$}
$\mu$ sometimes depends on $\vec{\theta}$ itself, providing additional information.
\end{frame}
\begin{frame}
\frametitle{Extended likelihood: illustration}
\FrameText{\href{http://lhcbproject.web.cern.ch/lhcbproject/Publications/LHCbProjectPublic/LHCb-PAPER-2016-020.html}{JHEP 1609 (2016) 153}}
\begin{center}
\includegraphics[width=0.7\textwidth]{fig1a}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Nuisance parameters}
The parameters of the likelihood are usually split in two categories:
\begin{itemize}
\item \textbf{parameters of interest} $\vec{\theta}$: the parameters we are interested in measuring;
\item \textbf{nuisance parameters} $\vec{\nu}$: other parameters.
\end{itemize}
Example: the likelihood could depend on $\sigma_S$, the signal cross section (a parameter of interest), and $\mu_B$, a background cross section (a nuisance parameter).
\end{frame}
\begin{frame}
\frametitle{Nuisance parameters}
\centering
\includegraphics[width=0.9\textwidth]{ATLAS_WW_ex1.png}
\end{frame}
\begin{frame}
\frametitle{Nuisance parameters}
\centering
\includegraphics[width=0.9\textwidth]{ATLAS_WW_ex2.png}
\end{frame}
\begin{frame}
\frametitle{Nuisance parameters in practice}
It is often useful to ``constrain'' the nuisance parameters if possible:
\uncover<2->{
\begin{block}{Using prior knowledge}
Assume that we know the pdf for $\vec{\nu}$: $P_{\vec{\nu}}$. Then the likelihood becomes:
\myhide{$$\mathcal{L}(\vec{\theta},\vec{\nu}) = P_x(\vec{x}|\vec{\theta},\vec{\nu}) P_{\vec{\nu}}(\vec{\nu})$$ }
\end{block}
}
\uncover<3->{
\begin{block}{Using auxiliary data}
Assume that we have $\vec{y}$ measurements, statistically independent from $\vec{x}$, and described by a model $P_y(\vec{y}|\vec{\nu})$. Then:
\myhide{$$\mathcal{L}(\vec{\theta},\vec{\nu}) = P_x(\vec{x}|\vec{\theta},\vec{\nu}) P_y(\vec{y}|\vec{\nu})$$}
\end{block}
Technical note: if one wants to simulate the experiment with Monte Carlo, $\vec{x}$ and $\vec{y}$ must be generated under assumption of \textbf{fixed} values for $\vec{\theta}$ and $\vec{\nu}$. (Do not vary $\vec{\theta}$ and $\vec{\nu}$ for each toy!)}
\end{frame}
\begin{frame}
\frametitle{Frequentist treatment of nuisance parameters: profile likelihood}
It is useful to remove the dependence of the likelihood $\mathcal{L}(\vec{\theta},\vec{\nu})$ on the nuisance parameters $\vec{\nu}$, by defining the profile likelihood:
\myhide{$$\mathcal{L}_\text{p}(\vec{\theta}) = \mathcal{L}(\vec{\theta},\hat{\hat{\vec{\nu}}}(\vec{\theta})),$$}
where $\hat{\hat{\vec{\nu}}}(\vec{\theta})$ is the value of $\vec{\nu}$
the maximises the likelihood for a given value of $\vec{\theta}$. We'll come back to profile likelihood later.
\end{frame}
\begin{frame}
\frametitle{Profile likelihood scan}
\FrameText{\href{http://cms-results.web.cern.ch/cms-results/public-results/publications/HIG-13-001/index.html}{EPJC 74 (2014) 3076}}
\begin{center}
\includegraphics[width=0.44\textwidth]{CMS-HIG-13-001_Figure_019}\hfill
\includegraphics[width=0.55\textwidth]{CMS-HIG-13-001_Figure_022-a}
\end{center}
``stat only'' means nuisance parameters $\vec{\nu}$ fixed to their best fit value $\hat{\hat{\vec{\nu}}}(\hat{\vec{\theta}})$: $\mathcal{L}(\vec{\theta},\hat{\hat{\vec{\nu}}}(\hat{\vec{\theta}}))$ (conditional PDF),\\
the other curve is the \textbf{profile likelihood scan} $\mathcal{L}_\text{p}(\vec{\theta}) = \mathcal{L}(\vec{\theta},\hat{\hat{\vec{\nu}}}(\vec{\theta}))$.
\end{frame}
\section{Bayesian inference}
\begin{frame}
\frametitle{Outline}
\tableofcontents[current]
\end{frame}
\begin{frame}
\frametitle{Bayesian inference}
\mynote{Write Bayes' theorem on the black board}
Reminder: Bayes' theorem:
\uncover<2->{
$$\alert{p(\theta | x_0)} = \frac{\mathcal{L}(x_0|\theta) \structure{\pi(\theta)}}{\int \mathcal{L}(x_0|\theta^\prime) \structure{\pi(\theta^\prime)} \dd \theta^\prime}$$
Bayesian inference is about determining the posterior probability, $\alert{p(\theta | x_0)}$. We see that it depends on the choice of prior,
$\structure{\pi(\theta)}$.
\begin{alertblock}{}
How to represent total lack of knowledge on the parameter(s) of interest?
\end{alertblock}
}
\uncover<3>{
\begin{block}{}
An intuitive (and popular) choice: \textbf{uniform prior}, by Laplace's \emph{Principle of insufficient reason} (in the absence of any other reason, all hypotheses are equally probable).
\end{block}
}
\end{frame}
\begin{frame}
\frametitle{The uniform prior}
Several issues:
\begin{itemize}
\item<1-> If one choses a uniform prior for $\theta$, then the prior for some function $g(\theta) = \tau$ is not uniform. But what is the most ``natural'' variable for which the prior should be uniform? An angle or its cosine, mass or mass squared, lifetime or decay rate...?
\item<2-> If the parameter range is infinite or semi-infinite, the prior over any finite range is 0...
\item<3-> Issues even in the discrete case: consider we wish to determine if an object is red or white: $P=1/2$ for each. However consider that we have 100 possible shades of red.
Is $P(\text{white})$ now 1/101 or 1/2?
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Jeffrey's prior}
Physicist Harold Jeffrey expressed \textbf{objective priors} known today as \textbf{Jeffrey's priors}, or sometimes (somewhat wrongly) \emph{uninformative} or \emph{objective} priors.
They are expressed applying the principle of minimum Fisher information:
\myhide{1D:
$$\pi(\theta) = \sqrt{I(\theta)} = \sqrt{- E\left(\frac{\partial^2 \ln f(x|\theta)}{\partial \theta^2}\right)}$$
ND:
$$\pi(\vec{\theta}) = \sqrt{\det (I(\vec{\theta}))}$$
}
\uncover<2->{Note: Jeffrey's priors are designed so that \textbf{inference does not depend on the chosen metric}. In other words, if $\tau = f(\theta)$, $p(\theta|x_0)$ and $p^\prime(\tau|x_0)$ are correctly related
by the Jacobian of the transformation.}
\end{frame}
\begin{frame}
\frametitle{Examples}
\mynote{Write them on the blackboard too}
Jeffrey's priors for some common cases:
\begin{itemize}
\item Poisson signal mean $\mu$, no background: \structure{$\pi(\mu) = 1/\sqrt{\mu}$}
\item<2-> Poisson signal mean $\mu$, mean background $b$: \structure{$\pi(\mu) = 1/\sqrt{\mu+b}$}
\item<3-> Unbounded mean of Gaussian: \structure{$\pi(\mu)=1$} (uniform)
\item<4-> Std. dev. of a Gaussian with fixed $\mu$: \structure{$\pi(\sigma) = 1/\sigma$}
\end{itemize}
\uncover<5>{Note: these priors are \textbf{improper} (not normalisable). This can be allowed because priors are multiplied by $\mathcal{L}$ and one can may still get a normalisable posterior density.}
\end{frame}
\begin{frame}
\frametitle{How to choose the prior?}
Pros and cons of Jeffrey's priors:
\begin{itemize}
\item<2-> Pro: inference does not depend on the chosen metric
\item<3-> Con (for lazy physicists): harder to compute than an uniform prior...
\item<4-> Con: violates the likelihood principle
\item<5-> Con: depends on the likelihood, i.e. on the experimental apparatus (ex: Poisson with background)
\item<6-> Con: difficult to apply for more than one parameter (solution: \textit{reference prior}, Bernardo and Berger)
\end{itemize}
\uncover<7>{
\textbf{What is the correct prior?}
There is not unique choice. Recall that Bayesian statistics is subjective by essence... However, to get meaningful results, one should check that results are insensitive
to the choice of prior (within some sensible range).
}
\end{frame}
\begin{frame}
\frametitle{Likelihood principle}
\begin{block}{Likelihood principle}
Given a measurement $x$, all the relevant information regarding $\theta$ is encoded in the likelihood $\mathcal{L}(x_0|\theta)$.
\end{block}
This is encoded in Bayesian inference (see Bayes' theorem), but generally violated in frequentist inference (which needs to also account for the probability to observe
other results than the one measured...), also by some of Jeffrey's priors.
\end{frame}
\begin{frame}
\frametitle{Bayesian inference about the Poisson parameter}
$$P(n|\mu) = \frac{e^{-\mu} \mu^n}{n!}$$
\begin{itemize}
\item A flat prior would give $E(\mu) = n+1$... not good since $E(n)=\mu$
\item If there is background, Jeffrey's prior is $\pi(\mu) = 1/\sqrt{\mu+b}$. Depends on $b$?! Another proof that Jeffrey's prior does not reflect one's prior knowledge...
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Bayesian treatment of nuisance parameters}
Back to the case of parameters of interest $\vec{\theta}$ and nuisance parameters $\vec{\nu}$.
One may have data $\vec{y}$ constraining $\vec{\nu}$, with likelihood $\mathcal{L}(\vec{y}|\vec{\nu})$.
Then one may use Bayes' theorem to obtain the posterior probability on $\vec{\nu}$:
$$p(\vec{\nu}|\vec{y}) \propto \mathcal{L}(\vec{y}|\vec{\theta}) \pi(\vec{\nu})$$
\uncover<2>{One often lacks knowledge on $\mathcal{L}(\vec{y}|\vec{\theta})$... possibilities:
\begin{itemize}
\item Gaussian pdf (centered on nominal value, with certain standard deviation)
\item Log-normal or gamma pdf (better than truncated pdf if $\nu>0$ -- e.g. for multiplicative factors: efficiency, ...)
\end{itemize}
}
\end{frame}
\begin{frame}
\frametitle{Bayesian treatment of nuisance parameters}
The likelihood function, prior and posterior all depend on the nuisance parameters:
\myhide{$$p(\vec{\theta},\vec{\nu} | \vec{x}_0) \propto \mathcal{L}(\vec{x}_0|\vec{\theta},\vec{\nu}) \pi(\vec{\theta},\vec{\nu})$$}
One can obtain the posterior pdf for $\vec{\theta}$ alone by marginalising the likelihood (integrating over the nuisance parameters):
\myhide{\begin{eqnarray}
p(\vec{\theta} | \vec{x}_0) & \propto & \left(\int \mathcal{L}(\vec{x}_0|\vec{\theta},\vec{\nu}) \pi(\vec{\nu} | \vec{\theta}) \dd \vec{\nu}\right) \pi(\vec{\theta}) \nonumber \\
& \propto & \mathcal{L}(\vec{x}_0|\vec{\theta}) \pi(\vec{\theta}) \nonumber
\end{eqnarray}}
\end{frame}
\begin{frame}
\frametitle{Marginalisation vs profiling}
Nuisance parameters in Bayesian inference are removed by \textbf{marginalisation}:
\begin{eqnarray}
p(\theta|x_0) & = & \int p(\theta,\nu|x_0) \dd\nu \nonumber \\
& \propto & \int \mathcal{L}(x_0|\theta,\nu)\pi(\theta)\pi(\nu)\dd\nu \nonumber
\end{eqnarray}
\uncover<2>{in contrast to \textbf{profiling}, which can be viewed as marginalization
with the data-dependent prior $\pi(\theta,\nu) = \delta(\nu-\hat{\nu}(\theta,x_0))$
\begin{eqnarray}
p(\theta|x_0) & \propto & \int \mathcal{L}(x_0|\theta,\nu)\pi(\theta,\nu)\dd\nu \nonumber\\
& \propto & \int \mathcal{L}(x_0|\theta,\nu)\pi(\theta)\delta(\nu-\hat{\nu}(\theta,x_0))\dd\nu \nonumber\\
& \propto & \mathcal{L}(x_0 | \theta,\hat{\nu}(\theta))\nonumber
\end{eqnarray}
}
\end{frame}
\begin{frame}
\frametitle{Marginalisation and numerical integration}
The integrals involved in marginalisation are often very complex and over a many-dimension space.
Integration may be difficult with standard Monte-Carlo methods:
\begin{itemize}
\item transformation method not possible (requires inversion of the cumulative function)
\item hit-and-miss method too inefficient
\end{itemize}
\uncover<2>{$\rightarrow$ \textbf{Markov Chain Monte-Carlo (MCMC)}\footnote{\uncover<2>{Note: MCMC is frequently used as a tool in Bayesian statistics but is not Bayesian by itself.}}: Markov chain following a pdf proportional to a desired function $\pi(\vec{\theta})$. $\pi(\vec{\theta})$ does not have to be normalised!
One samples $(\vec{\theta},\vec{\nu})$ from $\pi(\vec{\theta},\vec{\nu})$ and records the marginal distribution for $\vec{\theta}$.}
\end{frame}
\begin{frame}
\frametitle{MCMC: Metropolis-Hastings algorithm}
One starts with a proposal pdf $q(\vec{\theta},\vec{\theta}_0)$ (example: Gaussian pdf centered around $\vec{\theta}_0$). Starting at $\vec{\theta}_0$:
\begin{enumerate}
\item<2-> Generate a value $\vec{\theta}$ using the proposal density $q(\vec{\theta},\vec{\theta}_0)$
\item<3-> Form the Hastings test ratio, $\alpha = \min \left[ 1, \frac{\pi(\vec{\theta})q(\vec{\theta}_0,\vec{\theta})}{\pi(\vec{\theta}_0)q(\vec{\theta},\vec{\theta}_0)}\right]$
\item<4-> Generate $u$ uniformly in $[0,1]$
\item<5-> If $u \leq \alpha$, take $\vec{\theta}_1 = \vec{\theta}$. Otherwise, repeat the old point: $\vec{\theta}_1 = \vec{\theta}_0$
\item<6-> Set $\vec{\theta}_0 = \vec{\theta}_1$ and go back to step 1
\end{enumerate}
\uncover<7>{If $q$ is symmetric in $\vec{\theta}$ and $\vec{\theta}_0$, this is the \textit{Metropolis}-Hastings algorithm, and $\alpha = \min [ 1, \pi(\vec{\theta})/\pi(\vec{\theta}_0) ]$}
\end{frame}
\begin{frame}
\frametitle{MCMC: visualisation}
\centering
\includegraphics[width=\textwidth]{MCMC.pdf}
\end{frame}
\begin{frame}
\frametitle{Example: cosmological parameters from Planck}
\FrameText{\href{http://arxiv.org/abs/arXiv:1303.5076}{AA 571 (2014) A16}}
\begin{figure}
\centering
\includegraphics[width=0.67\textwidth]{triangle_planckonly_vs_WMAP_6pars}
\caption{\tiny Comparison of the base $\Lambda$CDM model parameters for Planck+lensing only (colour-coded samples), and the 68\% and 95\%
constraint contours adding WMAP low-$\ell$ polarization (WP; red contours), compared to WMAP-9 (Bennett et al. 2012; grey contours).
}
\end{figure}
\end{frame}
\section{Data combination}
\begin{frame}
\frametitle{Outline}
\tableofcontents[current]
\end{frame}
\begin{frame}
\frametitle{Combination of data}
\FrameText{\href{http://cms-results.web.cern.ch/cms-results/public-results/publications/HIG-14-009/index.html}{EPJC 75 (2015) 212},
\href{https://arxiv.org/abs/1403.4427}{arXiv:1403.4427}}
Combining data:
\begin{itemize}
\item Best is to go back to the original data (i.e. combine the full likelihoods)
\begin{itemize}
\item Includes full information, including correlations, non-Gaussianities, etc.
\item May be a lot of work / not possible (e.g. combining accross experiments)
\end{itemize}
\item<2-> Discussed here: combining results
\end{itemize}
\begin{center}
\includegraphics[width=0.44\textwidth]{CMS-HIG-14-009_Figure_002-a}\hfill
\uncover<2>{\includegraphics[width=0.55\textwidth]{mtop_combo_Fig5}}
\end{center}
\end{frame}
\begin{frame}
\frametitle{One parameter, no correlations: weighted average}
Assume $N$ determinations $x_i \pm \sigma_i$ of a single physical quantity $x$. We use the least squares method:
\myhide{
$$S(x) = \sum_i \left( \frac{x-x_i}{\sigma_i} \right)^2$$
Minimising $S(x)$ yields
$$\hat{x}_\text{comb} = \frac{\sum_i w_i x_i}{\sum_i w_i}$$
where
$$w_i = \frac{1}{\sigma_i^2}$$
$\hat{x}_\text{comb}$ is the weighted average of the $x_i$. The variance of $\hat{x}_\text{comb}$ is:
$$\frac{1}{\sigma^2_\text{comb}} = \sum_i \frac{1}{\sigma_i^2}$$
}
\end{frame}
\begin{frame}
\frametitle{Notes on the weighted average}
\begin{itemize}
\item $\frac{1}{\sigma^2_\text{comb}} = \sum_i \frac{1}{\sigma_i^2}$: $\frac{1}{\sigma^2_\text{comb}}$ is at least as small as the smallest of the individual uncertainties.
\item $\frac{1}{\sigma^2_\text{comb}}$ does \textbf{not} depend on the degree of consistency of the individual measurements. Combining $0 \pm 3$ and $z \pm 3$ gives a combined
uncertainty of $\approx 2$, independent of whether $z$ is 1 or 7.
\item The method assumes a \textbf{linear} problem: it assumes that the $\sigma_i$ are independent of the $x_i$.
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{An apparent counter-example}
Assume we want to combine $100 \pm 10$ and $1 \pm 1$\footnote{In real life one should not try to combine such discrepant measurements... There must be a mistake in either of them.},
two independent measurements of the same Poisson parameter.
The weighted sum gives $2 \pm 1$, which seems ridiculous. \textbf{Where is the issue?}
\uncover<2->{
Here we have estimated the \textbf{estimated} uncertainties, instead of the true ones. These observed uncertainties depend on the measured rate, and a downward fluctuation
artificially increases the weight in the combination.
Both measurements should have been given the same weight, then yielding $50.5 \pm 5$.
}
\uncover<3>{A way to overcome this is to make the procedure \textbf{iterative}, trying to approach the true uncertainty (variance) for each measurement.}
\end{frame}
\begin{frame}
\frametitle{General case with correlations: Best Linear Unbiased Estimate (BLUE)}
$N$ (unbiased) measurements $x_i$ with covariance matrix $V$
\begin{itemize}
\item \structure{Best}: smallest variance
\item \structure{Linear}: linear dependence on the inputs, $\hat{x}_\text{BLUE} = \sum_i w_i x_i$
\item \structure{Unbiased}: implies $\sum_i w_i = 1$.
\end{itemize}
The weights are obtained by minimising the variance, i.e. minimising $\sum_{i,j} w_i w_j \left(V^{-1}\right)_{ij}$:
$$w_i = \frac{\sum_{j} \left(V^{-1}\right)_{ij}}{\sum_{i,j} \left(V^{-1}\right)_{ij}}$$
This is equivalent to the least squares method, but explicitly gives the weights for each measurement in the combination.
\end{frame}
\begin{frame}
\frametitle{Correlations and extrapolation}
Two measurements $x_1 \pm \sigma_1$, $x_2 \pm \sigma_2$, $\sigma_1 \leq \sigma_2$, $\rho > \sigma_1 / \sigma_2$
In this case, \structure{$x \notin [x_1,x_2]$}: BLUE involves \textbf{extrapolation}!
Counterintuitive, but sensible: this is because, in the case of strongly positive correlation, it is likely that $x_1$ and $x_2$ are both on the same side of the true value.
\structure{Corollary}: correlations between measurements may be poorly known, but BLUE is highly sensitive to the assumptions on correlations.
\end{frame}
\begin{frame}
\frametitle{Correlated measurements of several physical quantities}
$N$ observables $X_\alpha = (X_1,\dots,X_N)$, $n$ experimental results $y_i = (y_1,\dots,y_n)$. Each $y_i$ is a measurement of a given $X_\alpha$, and all observables are measured at least
once: $n \geq N$. The covariance matrix of the $n$ measurements is $V$.
Let us define the $(n \times N)$ matrix $\mathcal{U}$:
\myhide{
$$\mathcal{U}_{i\alpha} = \begin{cases}
1 & \text{if } y_i \text{ is a measurement of } X_\alpha \\
0 & \text{otherwise}
\end{cases}$$
Then the best linear estimate of each observable $X_\alpha$ is:
$$\hat{x}_\alpha = \sum_{i=1}^n \lambda_{\alpha i} y_i$$
where the weights are:
$$\lambda_{\alpha i} = \sum_{\beta=1}^N \left( \mathcal{U}^t V^{-1} \mathcal{U} \right)^{-1}_{\alpha \beta} \left(\mathcal{U}^t V^{-1} \right)_{\beta i} $$
The covariance for the estimates is
$$\text{cov}(\hat{x}_\alpha, \hat{x}_\beta) =\left( \mathcal{U}^t V^{-1} \mathcal{U} \right)^{-1}_{\alpha \beta} $$
}
\end{frame}
\begin{frame}
\frametitle{Example: straight line fitting}
Two tracking planes, each with 3 layers providing uncorrelated measurements $y_i \pm \sigma_i$. No magnetic field: one fits straight lines $y = a + bx$.
The first sub-detector gives $(a_1,b_1)$, the second $(a_2,b_2)$, combined into $(a_\text{comb},b_\text{comb})$
\includegraphics[width=\textwidth]{BLUE/fig1}
\end{frame}
\begin{frame}
\frametitle{Example: straight line fitting}
Now the two tracking planes are on either side of the origin.
\includegraphics[width=\textwidth]{BLUE/fig2_ab}
\end{frame}
\begin{frame}
\frametitle{Real life example 1: determination of the fraction of dark energy in the universe, $\Omega_\Lambda$}
\centering
\includegraphics[width=0.45\textwidth]{BLUE/fig3.pdf}
\end{frame}
\begin{frame}
\frametitle{Real life example 2: preliminary world top quark mass combination}
\FrameText{\href{https://arxiv.org/abs/1403.4427}{arXiv:1403.4427}}
\begin{columns}
\begin{column}{1.19\textwidth}
\includegraphics[width=0.4\textwidth]{mtop_combo_correl.png}\hfill
\includegraphics[width=0.19\textwidth]{mtop_combo_BLUE_coef.png}\hfill
\includegraphics[width=0.4\textwidth]{mtop_combo_Fig5}
\end{column}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Real life example 3: Standard Model fits}
\FrameText{\href{http://project-gfitter.web.cern.ch/project-gfitter/}{arXiv:1803.01853}}
\begin{columns}
\begin{column}{1.15\textwidth}
\includegraphics[width=0.49\textwidth]{2018_03_20_Scan2D_MWvsmt_logo}\hfill
\includegraphics[width=0.49\textwidth]{2018_03_20_WMassScan_logo}
\end{column}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Back to profile likelihood}
\hfill\includegraphics[width=0.8\textwidth]{BLUE/fig2_ab}
\begin{columns}
\begin{column}{0.55\textwidth}
The correct combination is obtained by going back to the full likelihoods.
Combining profile likelihoods $\mathcal{L}_{\text{prof},1|2}(a) = \mathcal{L}_{1|2}(a,\hat{\hat{b}}(a))$ does not give correct results.
\end{column}
\begin{column}{0.37\textwidth}
\includegraphics[width=\textwidth]{BLUE/fig2_cd}
\end{column}
\end{columns}
\end{frame}
% \begin{frame}
% \frametitle{Combining $p$-values}
% \end{frame}
% Leave this for when I introduce p-values...
\end{document}
| {
"alphanum_fraction": 0.6800051772,
"avg_line_length": 33.9114850037,
"ext": "tex",
"hexsha": "edde23a3913d8dcde12bbd6f7dd1bc1a41599fc9",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2db74273a8a5e543fe034b7c9054c1976c0c9aa7",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "echapon/CoursStatX",
"max_forks_repo_path": "Cours2/cours2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2db74273a8a5e543fe034b7c9054c1976c0c9aa7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "echapon/CoursStatX",
"max_issues_repo_path": "Cours2/cours2.tex",
"max_line_length": 343,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2db74273a8a5e543fe034b7c9054c1976c0c9aa7",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "echapon/CoursStatX",
"max_stars_repo_path": "Cours2/cours2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 15102,
"size": 46357
} |
\documentclass[journal,onecolumn]{IEEEtran}
\usepackage{cite}
\usepackage[pdftex]{graphicx}
\usepackage[cmex10]{amsmath}
\interdisplaylinepenalty=2500
\usepackage{array}
\usepackage{url}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[ascii]{inputenc}
\usepackage{listings}
\lstset{language=Python,
numbers=left,
keywordstyle=\color[rgb]{0,0,1},
commentstyle=\color[rgb]{0.133,0.545,0.133},
stringstyle=\color[rgb]{0.627,0.126,0.941},
columns=fixed,}
\markboth{EE643}{Jonathan Klein}
\begin{document}
\title{Parallelized Viterbi Algorithm in Python using PyCUDA and PP}
\author{\IEEEauthorblockN{Jonathan Klein}\\
\IEEEauthorblockA{Department of Electrical and\\Computer Engineering\\
University of Alaska, Fairbanks\\
Fairbanks, Alaska 99775, USA\\
Email: [email protected]}}
\maketitle
\begin{abstract}
%\boldmath
Pyterbi is an open source Viterbi decoder parallelized either with CUDA or SMP. It is written in Python and uses the PyCUDA and PP modules. The PyCUDA parallelized Viterbi decoder is approximately 100 times faster than the single core unparallelized reference implementation. Each probable path at each state is computed in parallel, and the backtraces of multiple trellises is computed in parallel. The PP parallelized path can process multiple Viterbi trellises simultaneously and can dispatch them across multiple cores or computers.
\hfill \today
\end{abstract}
\section{Introduction}
Pyterbi is an open source parallelized Viterbi decoder. It is capable of finding the most likely sequence of states through an arbitrary number of probabilistic finite state machines given noisy output observations. The PyCUDA parallelized path extracts parallelism by computing the most likely previous state for each possible state every time step in parallel. The PP parallelized path dispatches trellises to multiple CPU cores, or servers over a network.
Viterbi decoders have applications to pattern recognition and communication systems, including speech recognition, convolution code decoding, and continuous phase modulation decoding [1] [2]. A parallelized Viterbi decoder could be useful for wireless base stations communicating with many devices using convolutional coding or continuous phase modulation or a call center performing voice recognition on several incoming calls. More realistically, pyterbi provides an open source example of PyCUDA, PP, and generating graphs using matplotlib.
The Viterbi algorithm maintains the most likely sequence of states leading to state. This is repeated for each observation. See Figure 2 for the reference Python implementation of the forward pass of the Viterbi algorithm.
\begin{figure}[h!]
\begin{lstlisting}
# set initial probabilities
path_p[0,:] = init_p + emit_p[obs[0]]
for n in range(1, nobs):
for m in states:
# calculate probabilities of previous states transitioning to state m
p = emit_p[obs[n]][m] + trans_p[:,m] + path_p[n-1]
# select most likely previous state for state m
back[n][m] = numpy.argmax(p)
# set that as probability of state m at time n
path_p[n][m] = numpy.amax(p)
\end{lstlisting}
\label{fig:forcode}
\caption{Python code for Viterbi forward path.}
\end{figure}
Once the most likely prior state for each state and observation in the trellis has been tabulated, the most likely path through a trellis can be computed. Starting with the most likely final state, the most likely sequence of states can be traced back through the trellis. See Figure 2 for Python code implementing the backwards pass of the Viterbi algorithm.
\begin{figure}[h!]
\begin{lstlisting}
route = numpy.zeros(nobs,dtype=numpy.int16)
# find the most likely final state
route[-1] = numpy.argmax(path_p[-1,:])
for n in range(2,nobs+1):
# backtrace through trellis selecting the most likely previous trace
route[-n] = back[nobs-n+1,route[nobs-n+1]]
\end{lstlisting}
\label{fig:backcode}
\caption{Python code for Viterbi backtrace.}
\end{figure}
\section{Hardware Platform}
Pyterbi was tested on a Lenovo T420-41786VU laptop. This platform has a few limitations which add uncertainty to the benchmarks.
\subsection{Host Processor}
The Intel i5-2520M processor supports Intel Turbo Boost, which dynamically scales core frequencies to maintain temperature limits [4]. This means that an application running on one core could see higher clock frequencies than an application which keeps two cores active. Additionally, the T420 laptop can't sustain full processor usage on both cores for an extended period of time. The laptop provided warning messages to a terminal stating that processor speed was being scaled back to avoid overheating while running an extended PP benchmark. These factors will increase the relative performance of shorter single core tasks.
\begin{figure}[!h]
\begin{tabular}{ | l | l |}
\hline
Processor & Intel i5-2520M, 2.50GHz \\ \hline
Cache & 3MB Cache \\ \hline
Cores & 2 Hyper-Threaded cores \\ \hline
Memory & 4096MB RAM \\ \hline
OS & Linux 2.6.38-10 \\ \hline
\end{tabular}
\end{figure}
\subsection{Graphics Card}
PyCUDA was written and tested on a NVIDIA NVS4200M graphics card. This is a low-end laptop graphics card with only a 64-bit memory bus and 48 cores. Performance of pyterbi would probably increase on a higher performance desktop graphics card.
\begin{figure}[!h]
\begin{tabular}{ | l | l |}
\hline
Graphics Card& Nvidia NVS 4200M \\ \hline
Memory & 1024 MB DDR3\\ \hline
CUDA Cores & 48 \\ \hline
Memory Bus Width & 64 bits\\ \hline
Driver & 270.41.19 \\ \hline
CUDA Version & 4.0 \\ \hline
Core Speed & 800 MHz \\ \hline
Device to Host Bandwidth & 2600MB/s \\ \hline
Host to Device Bandwidth & 3000MB/s \\ \hline
Compute Capability & 2.1 \\ \hline
\end{tabular}
\end{figure}
\section{Parallelization}
The PyCUDA path of pyterbi was parallelized by computing the most likely previous state for each state in parallel for an observation. This is equivalent to parallelizing the for loop on line 5 of Figure 1. These can be computed in parallel, because each state only depends on information from previous observations. See Figure~\ref{fig:cpm} for an illustration of this. In this this figure, each thread is represented by a color, and lines represent accessing the probability of prior states. The treads are synchronized at every observation after computing the most likely previous state. Each trellis is mapped to a block of threads, which can share a grid with other trellises.
\begin{figure}
\includegraphics[width=.8\textwidth]{figures/cpmfulltrelliscolored.png}
\caption{Viterbi algorithm at time T=2Ts. Figure adapted from Proakis [2].}
\label{fig:cpm}
\end{figure}
The PP path is parallelized by dispatching processes to compute each trellis. This is coarser parallelism than the PyCUDA path. Finer grained parallelism was not easily possible using the standard Python interpreter because of the global interpreter lock, which prevents concurrent execution of multiple threads. Processes are more expensive to create than threads and cannot share memory as easily, so instead of creating threads for each state in a trellis, the PP path of pyterbi creates a process for each trellis.
\section{Software Design and Data Location}
The first version of the PyCUDA path of pyterbi was 15 times faster than the reference path. Many optimizations and tweaks later, this was improved to a 100 times speedup.
The first optimization of pyterbi was moving the emission probability, observation, and transmission probability matrices from global to constant memory. This slowed down execution time by 50\%. This is possibly due to stuffing too much information into the constant cache and ignoring the L1/L2 cache present for global memory on compute capability 2.1 cards. Moving only the observation sequence to constant memory increased execution by 5\%. Eventually, the observation sequence and all other constant information was cached in shared memory instead of constant memory to support an arbitrary number of trellises.
The next optimization to Pyterbi was caching the emission probability, observation, and transmission matrices in shared memory. These matrices are filled from global memory at the start of the kernel in parallel, with a coalesced memory access pattern. Caching the constant matrices in shared memory at the start of each kernel reduced execution time by another 5\%. In addition to caching constant information in shared memory, the slices of the probability matrix representing the current and previous state probabilities are stored in shared memory.
The largest speed up for the PyCUDA path of pyterbi came from reducing communication between the host and graphics card. The initial version of pyterbi called a kernel for each observation, implicitly synchronizing. Moving the observation loop to the kernel and explicitly synchronizing threads in a kernel reduced the number of kernel calls from the number of observations to once per trellis, which led to over a 100\% speedup.
The final optimization to pyterbi was moving the backtrace computation from the host to the device. This meant the backtrace of multiple trellises could be computed in parallel, and only the route information needs to be copied back to the host for each trellis, instead of copying back the final path probabilities and the entire backtrace matrix, which is much larger. Optimizing the backtrace led to a 40\% speedup.
\section{Results}
The SMP PP path of pyterbi approaches a two times speedup on a dual core hardware platform as the complexity of a trellis increases. See Figure \ref{fig_rhost} for a plot of the speedup of the PP path across observation sequence lengths and number of states. The speedup may be approaching two for more complicated inputs because the constant time associated with creating and dispatching jobs becomes dwarfed by time spent processing the trellises. The speedup will probably approach four on a four core processor given a sufficiently large number of complex trellises.
\begin{figure*}[!t]
\centering
\includegraphics[width=.5 \linewidth]{figures/speedupgraphhost.png}
\caption{Speedup of multiple process parallelized pyterbi over a single process decoder.}
\label{fig_rhost}
\end{figure*}
In addition parallelizing for multiple core processors, PP also allows parallelization on clusters of computers. As a proof of concept, I tested pyterbi in a cluster consisting of a T420 laptop connected to a low-end VPS sharing a single AMD Opteron core with an unknown number of other virtual machines through a tethered internet connection from a cell phone. Clustering a single core of the T420 with the server led to a 10\% speedup. The speedup would probably be improved by using a cluster of faster computers connected with a more suitable network connection.
The PyCUDA path of pyterbi provides over 100 times speedup compared the reference implementation for sufficiently complex trellises. See Figure \ref{fig_rcuda} for a graph of PyCUDA speedup as the observation length and number of states increases. The speedup generally increases as the observation length increases, and as the number of states in the trellis increases. This is probably because the increased complexity with an increase in states is $O(N)$ for the PyCUDA path, and $O(N^2)$ for the reference path. The speedup also increases as the observation length increases. This is probably because the relatively constant setup time of creating and initializing the kernel is overtaken by the time spend traversing the trellis.
\begin{figure*}[!t]
\centering
\includegraphics[width=.5 \linewidth]{figures/speedupgraphcuda.png}
\caption{Speedup from CUDA parallelized viterbi decoder.}
\label{fig_rcuda}
\end{figure*}
\section{Conclusion}
Pyterbi achieves a noticeable speedup over a non-parallelized reference implementation. It provided an opportunity to learn about CUDA and the PyCUDA and PP libraries.
Several improvements to pyterbi are possible:
\begin{itemize}
\item Rewriting the host code in a faster language (C)
\item Rewriting host code for finer-grained parallelism with threads
\item Testing kernel concurrency on more powerful graphics cards
\item Testing realistic cases of cluster computing
\item Getting PyCUDA working with PP for CUDA accelerated cluster computing
\end{itemize}
PyCUDA made accessing the CUDA API more pleasant than using CUDA from C. Abstracting away the cleanup of objects, error checking, and some of the syntax for memory transfers was helpful. While it doesn't save the programmer from understanding what is happening (and the kernel must still be written in C), it makes programming and debugging faster and easier. Learning CUDA through PyCUDA is a little more gentle. Likewise, the PP module was impressive. Easy to use parallelism libraries for higher level languages make parallel programming very accessible, and reduce the cost of parallelizing suitable applications.
All the source code from this project, including this paper, are available under an MIT license at \url{http://github.com/loxodes/pyterbi}
\section*{References}
[1] Lou, H.-L.; , ``Implementing the Viterbi algorithm,'' Signal Processing Magazine, IEEE , vol.12, no.5, pp.42-52, Sep 1995 \\
[2] John Proakis. Digital Communications. McGraw-Hill Science/Engineering/Math, 5 edition, 2007. \\
[3] C. Liu, ``CuHMM: a CUDA Implementation of Hidden Markov Model Training and Classification,'' 2009 \\
[4] Intel, ``Intel Turbo Boost Technology 2.0,'' 2011, URL: \url{http://www.intel.com/content/www/us/en/architecture-and-technology/turbo-boost/turbo-boost-technology.html}\\
[5] Vanovschi V., ``Parallel Python Software,'', 2011, \url{http://www.parallelpython.com}\\
[6] Andreas Kloeckner, ``PyCUDA,'', 2011, \url{http://mathema.tician.de/software/pycuda}\\
\end{document}
| {
"alphanum_fraction": 0.7612406328,
"avg_line_length": 75.8526315789,
"ext": "tex",
"hexsha": "d96045b23516206a41a1fd811d720703005aaa09",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2020-08-31T16:44:45.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-06-06T13:08:40.000Z",
"max_forks_repo_head_hexsha": "1c6255d610877d786a95ea9a026e6a8499affe14",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "loxodes/pyterbi",
"max_forks_repo_path": "doc/pycuda_paper.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1c6255d610877d786a95ea9a026e6a8499affe14",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "loxodes/pyterbi",
"max_issues_repo_path": "doc/pycuda_paper.tex",
"max_line_length": 734,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "1c6255d610877d786a95ea9a026e6a8499affe14",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "loxodes/pyterbi",
"max_stars_repo_path": "doc/pycuda_paper.tex",
"max_stars_repo_stars_event_max_datetime": "2021-02-03T18:07:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-03-12T18:02:06.000Z",
"num_tokens": 3310,
"size": 14412
} |
\chapter{WURC PCB Designs}
\label{sec_wurc_pcd_designs}
Multiple PCB designs were implemented as part of this thesis. In this appendix, we provide mechanical and electrical design material related to their design.
\section{RF-Protoboard}
\section{AGC-Test-Board}
\section{WURC}
\section{WAB-1x1}
\section{WAB-1x4}
%\section{WARP MicroBlaze Console API}
%
%\begin{singlespace}
%\small
%\begin{verbatim}
%
%TODO
%
%\end{verbatim}
%\end{singlespace} | {
"alphanum_fraction": 0.7621145374,
"avg_line_length": 17.4615384615,
"ext": "tex",
"hexsha": "bcede7dded63abbf96c47de274b973747f7df05c",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "acf1ebafee00a8e4375008e60e35da8affc97d9b",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "RyanEGuerra/ryan_guerra_phd_thesis",
"max_forks_repo_path": "sec/appendix_pcb_designs.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "acf1ebafee00a8e4375008e60e35da8affc97d9b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "RyanEGuerra/ryan_guerra_phd_thesis",
"max_issues_repo_path": "sec/appendix_pcb_designs.tex",
"max_line_length": 157,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "acf1ebafee00a8e4375008e60e35da8affc97d9b",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "RyanEGuerra/ryan_guerra_phd_thesis",
"max_stars_repo_path": "sec/appendix_pcb_designs.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 136,
"size": 454
} |
\documentclass[10pt,twocolumn,letterpaper]{article}
\usepackage{algorithmicx}
\usepackage{algorithm}
\usepackage{booktabs}
\usepackage{cvpr}
\usepackage{graphicx}
\usepackage{color, colortbl}
\usepackage{algpseudocode}
\usepackage{booktabs}
\usepackage{hyperref}
\renewcommand{\algorithmicrequire}{\textbf{Input:}}
\renewcommand{\algorithmicensure}{\textbf{Output:}}
\renewcommand{\thefootnote}{$\star$}
\cvprfinalcopy % *** Uncomment this line for the final submission
\usepackage{footmisc} % Daggers for author notes
\DefineFNsymbols{mySymbols}{{\ensuremath\dagger}{\ensuremath\ddagger}\S\P
*{**}{\ensuremath{\dagger\dagger}}{\ensuremath{\ddagger\ddagger}}}
\setfnsymbol{mySymbols}
% Colors for highlighting tables
\definecolor{Gray}{gray}{0.9}
% Pages are numbered in submission mode, and unnumbered in camera-ready
%\ifcvprfinal\pagestyle{empty}\fi
\setcounter{page}{1}
\begin{document}
%%%%%%%%% TITLE
\title{Methods for handling missing and categorical data for classification with neural
networks$^\star$}
\author{
Jason Poulos\thanks{\href{mailto:[email protected]}{\nolinkurl{[email protected]}}. SID: 24993379.}
\hspace{10mm}
Rafael Valle\thanks{\href{mailto:[email protected]}{\nolinkurl{[email protected]}}. SID: 24090989.}
\vspace{15mm}
}
\maketitle
%\thispagestyle{empty}
%%%%%%%%% ABSTRACT
\begin{abstract}
Researchers analyzing survey data typically choose decision trees or random forests for prediction tasks, largely because missing data and categorical variables are not easy to handle with neural networks. This paper investigates techniques for handling missing data and encoding categorical data such that it is appropriate to neural network classifiers. We experiment on the Adult dataset $N=48,842$, comparing six different imputation strategies, a simple and complex neural network classifier, and a 3x3 parameter grid. We select three cross--validated models to predict on the test data and find the simple neural network trained on data with missing values imputed using random forests yields the lowest generalization error.
\end{abstract}
\footnotetext[1]{The video presentation can be viewed at \url{https://youtu.be/2bSPE6gzbN8}. The code used for this project is available at \url{https://github.com/jvpoulos/cs289-project}.}
%\linenumbers
%% main text
\section{Introduction} \label{section:Intro}
Missing data is a common problem in survey data in various domains. Several
techniques for data imputation (i.e., replace missing values with plausible ones) and
direct estimation (i.e., all missing data is analyzed using a maximum likelihood
approach) have been proposed ~\cite{de2003prevention}. \\
Random Forests and other ensembles of decision trees are the method of choice for survey data, largely because missing data and categorical variables are not easy to handle with neural networks. We investigate techniques for handling missing data and
encoding categorical data such that it is appropriate to neural network classifiers. We compare six different imputation strategies: case substitution; mean or median imputation; one--hot; hot deck and cold deck; prediction model; and factor analysis. These strategies are defined in Section \ref{section:techniques}. \\
After briefly reviewing related works in Section~\ref{section:rw}, we experiment using neural networks on benchmark data and compare our results with the state-of-the-art in Section~\ref{section:experiments}. Finally, we draw conclusions in Section~\ref{section:Con}.
\section{Related work} \label{section:rw}
\subsection{Techniques for handling missing data} \label{section:techniques}
We categorize proposed imputation methods into six groups listed
below ~\cite{batista2003analysis}:
\begin{description}
\item[Case substitution] One observation with missing data is replaced with
another non-sampled observation.
\item[Summary statistic] Replace the missing data with the mean, median, or mode of
the feature vector. Using a numerical approach directly is not appropriate for nonordinal categorical data.
\item[One-hot] Create a binary variable to indicate whether or not a specific
feature is missing. %This technique was suggested by Isabelle Guyon.
\item[Hot deck and cold deck] Compute the K-Nearest Neighbors of the
observation with missing data and assign the mode of the K-neighbors
to the missing data. %A similar technique is used in Airbnb's fraud detection
algorithm.
\item[Prediction Model] Train a prediction model (e.g., random forests) to predict the missing value. %This requires correlation amongst features to exist.
\item[Factor analysis] Perform factor analysis (e.g., principal component analysis (PCA)) on the design
matrix, project the design matrix onto the first two eigenvectors and
replace the missing values by the values that might be given by the
projected design matrix.
\end{description}
\subsection{Neural networks for classification with categorical and
continuous features} Common techniques for handling categorical data in
neural networks include encoding the categorical values into numeric values
or using binary encoding. These techniques, however, have some drawbacks
including unnecessarily increasing model complexity or feature dimensionality
and not preserving the similarity information embedded between categorical
values ~\cite{hsu2006generalizing}.\\
More elaborate techniques include information theoretic measures
~\cite{wang2008categorical}, training separate output units for
each of the allowed combination of values of the categorical independent
variables ~\cite{brouwer2002feed}, and using distance
hierarchies ~\cite{hsu2006generalizing}. \\
In the case of categorical variables, which by definition have no direct
representation or computation scheme of the distance between its values,
decision trees can be useful because they do not require distance metrics.
However, their training process is slow given a large enough dataset and they
might not be suitable for problems where the decision boundary between classes
described by a second-order polynomial,\footnote{We note, however, that a
property test can be as complex as the data at hand.} for
example ~\cite{fayyad1996data}.
\section{Experiments} \label{section:experiments}
\subsection{Benchmark data set}
We experiment on the Adult dataset from the UCI Machine Learning Repository ~\cite{Lichman2013}. The dataset has $N=48,842$ instances, $2/3$ for training and $1/3$ reserved as a final test set (i.e., $\mathrm{train}=32,561$ and $\mathrm{test}=16,281$). The dataset contains 14 features: 6 continuous and 8 categorical. The prediction task is to determine whether a person makes over \$50,000 a year. 24\% of individuals in the training data make more than this amount. \\
Table \ref{tab:benchmarks} shows the test error rates obtained by the data set donor ~\cite{kohavi1996}. All error rates were obtained after removing samples with missing values. The state--of--the--art is a Naive Bayes classifier that achieves a 14.05\% error rate. \\
\begin{table}[htb]
\centering
\begin{tabular}{@{}ll@{}}
\toprule
\textbf{Algorithm} & \textbf{Error (\%)} \\ \midrule
1 C4.5 & 15.54 \\
2 C4.5-auto & 14.46 \\
3 C4.5 rules & 14.94 \\
4 Voted ID3 (0.6) & 15.64 \\
5 Voted ID3 (0.8) & 16.47 \\
6 T2 & 16.84 \\
7 1R & 19.54 \\
8 NBTree & 14.10 \\
9 CN2 & 16.00 \\
10 HOODG & 14.82 \\
\rowcolor{Gray}
11 FSS Naive Bayes & 14.05 \\
12 IDTM (Decision table) & 14.46 \\
13 Naive-Bayes & 16.12 \\
14 Nearest-neighbor (1) & 21.42 \\ \bottomrule
\end{tabular}
\caption{Test set error rates on Adult dataset for various algorithms, obtained after removal of samples with missing values and using the original train/test split. Source: \cite{Lichman2013}.}
\label{tab:benchmarks}
\end{table}
\subsection{Patterns of missing values in Adult dataset}
The Adult dataset has 3,620 (7.4\%) samples containing missing values. Missing values occur in three of the categorical features: \textit{Work class}, \textit{Occupation}, and \textit{Native country}. It is unlikely that these values are missing completely at random (MCAR); it is more likely, and much less desirable that the values are not missing at random (MNAR). Since these data originate from a survey, the missing values may be due to respondents unwilling or unable to provide an answer. \\
Uncovering patterns of missing values in the dataset will help select strategies for imputing missing values. The histogram (left) in Figure \ref{fig:proportion-missing} shows \textit{Work class} and \textit{Occupation} each have about 5.6\% of missing values, and \textit{Native country} has about 1.7\% missing values. The aggregation plot (right) shows 5.5\% of samples are missing values for both \textit{Work class} and \textit{Occupation}. Less than 2\% of samples are missing just \textit{Native country} and less than 1\% are missing all three features.\\
Figure \ref{fig:barplot-missing} shows the frequency of observed categories and missing values for \textit{Work class} and \textit{Occupation}. Each stacked column shows the proportion of missing values in the other feature and \textit{Native country} for each category. The plot shows the missing values are not MCAR: individuals working in the private sector, for instance, are more likely to have missing values than those individuals in other work classes. However, missing values tend to be evenly distributed across occupational categories.
\begin{figure*}[htbp]
\centering
\includegraphics[width=0.8\textwidth]{./figure/proportion-missing.pdf}
\caption{Histogram of proportion of missing values in each feature (Left) of Adult training set and aggregation plot of all existing combinations of missing and non-missing values in the samples (Right).}
\label{fig:proportion-missing}
\end{figure*}
\begin{figure*}[htbp]
\centering
\includegraphics[width=0.8\textwidth]{./figure/barplot-missing.pdf}
\caption{Barplot of proportion of observed and missing values of \textit{Work class} and \textit{Occupation} in Adult dataset.}
\label{fig:barplot-missing}
\end{figure*}
\subsection{Preprocessing}
We preprocess the training and test data as follows:
\begin{enumerate}
\item Drop the string feature \texttt{education} because it contains identical information as its numeric counterpart \texttt{education.num}.
\item Binarize the categorical variables.
\item Implement imputation technique. When replacing the missing data with the mean, median, or mode of
the feature vector, the summary statistic is computed from the training data, not from the test data.
\item Standardize each feature to midrange 0 and range 2 (i.e., minimum -1 and maximum 1). We use the training set values for range and midrange to standardize test set features.
\end{enumerate}
\subsection{Model selection}
We implement two neural network classifiers: a simple model and a complex model. The complex model uses two hidden layers, both with the same size of hidden nodes, which is computed using the formula $N_h = \frac{N_s}{(\alpha * (N_i + N_o)}$, where the number of hidden nodes $N_h$ is a function of the number of training examples $N_s$, a scaling parameter $\alpha$, the number of input neurons $N_i$, and the number of output neurons $N_s$. The complex model uses root-mean-square gradient scaling for the update rule, while the simple model uses one hidden layer and stochastic gradient descent for the update rule. Both models use a cross--entropy cost function and weights initialized randomly using Gaussian distribution and scaled by a factor of $10^{-2}$.
We use two layers of cross--validation for selecting model parameters: grid search and $K$--fold cross validation. We perform an exhaustive search on a grid of parameter values composed of $\alpha = \{1,4,9\}$ learning rate $\gamma = \{10^{-1}, 10^{-2}, 10^{-3}\}$, and $\mathrm{mini-batch\, size} = \{32, 512, 4096\}$. For each $3!*3 = 18$ parameter combinations, we perform $K$--fold cross--validation on $k=3$ folds. We evaluate the models based on the average error rate across folds and also record average cost and average computing time across folds.
We train both simple and complex classifiers on data with missing values imputed using most of the techniques described in Section \ref{section:techniques} and on data with instances with missing values removed. Figure \ref{fig:predicted-simple-2d} provides a visual example of the results of the simple neural network on training data with missing values imputed using random forests. Table \ref{tab:err-rates} reports the models with the lowest cross--validated error rate. The simple neural network outperforms the complex classifier in most cases. We find that omitting instances with missing values yields a lower error rate on the training data than all of the imputation methods. The difference between imputation methods in terms of accuracy is minimal.
\begin{table*}[htbp]
\begin{center}
\begin{tabular}{llllllll}
\textbf{Imputation method} & \textbf{Model type} & \textbf{$\alpha$} & \textbf{$\gamma$} & \textbf{Batch size} & \textbf{Error rate} & \textbf{Cost} & \textbf{Time (min.)} \\
\hline
\rowcolor{Gray}
Remove instances with missing values & Simple & 9 & 0.1 & 32 & 0.1406 & 0.1525 & 3.7 \\
Factor analysis (PCA) & Simple & 4 & 0.1 & 32 & 0.1411 & 0.2487 & 8.3667 \\
\rowcolor{Gray}
Prediction model (random forests) & Simple & 4 & 0.1 & 32 & 0.1413 & 0.2056 & 6.7 \\
\rowcolor{Gray}
Summary statistic (mode) & Simple & 9 & 0.1 & 32 & 0.1415 & 0.1829 & 4.2667 \\
Case substitution & Simple & 1 & 0.1 & 32 & 0.1417 & 0.2189 & 6.8 \\
Prediction model (random forests) & Complex & 1 & 0.1 & 32 & 0.1426 & 0.2919 & 9.9 \\
Factor analysis (PCA) & Complex & 1 & 0.01 & 32 & 0.143 & 0.3175 & 11 \\
Remove instances with missing values & Complex & 1 & 0.1 & 32 & 0.1439 & 0.1778 & 8.5 \\
Summary statistic (median) & Simple & 4 & 0.1 & 32 & 0.1446 & 0.1924 & - \\
Summary statistic (mean) & Simple & 4 & 0.1 & 32 & 0.1453 & 0.1915 & - \\
Summary statistic (mean) & Complex & 1 & 0.1 & 32 & 0.1593 & 0.3118 & - \\
Summary statistic (median) & Complex & 1 & 0.01 & 32 & 0.1595 & 0.3564 & - \\
\hline
\end{tabular}
\end{center}
\caption{Performance of models selected on the basis of cross--validated error rate on the training data. \texttt{Imputation method} is how missing values in the training data are imputed; \texttt{Model type} is the type of neural network classifier used; \texttt{$\alpha$} is the scaling factor used to determine the number of hidden neurons in the neural network; \texttt{$\gamma$} is the learning rate; \texttt{Batch size} is the size of the batch; \texttt{Error rate} is the mean 3-fold cross--validated error rate on the training data; \texttt{Cost} is the mean cross--entropy cost across folds; \texttt{Time} is the mean training time across folds. }
\label{tab:err-rates}
\end{table*}
\begin{figure*}[htbp]
\centering
\includegraphics[width=0.8\textwidth]{./figure/predicted-simple-2d.png}
\caption{Performance of simple neural network on training data with missing values imputed using random forests: 3-fold cross--validated error rate versus $\alpha$ (x--axis) and $\gamma$ (colors). See Table \ref{tab:err-rates} for definitions.}
\label{fig:predicted-simple-2d}
\end{figure*}
\subsection{Model assessment}
We select the three highlighted models in Table \ref{tab:err-rates} to train on the entire training set and then fit each model on the test features. We handle missing values in the test features in the same manner as the training features. The simple neural network trained on data with missing values imputed using random forests yields the highest accuracy (16.63\% error rate), followed by mode imputation (16.84\%), and missing values removed (18.59\%).
\section{Conclusion} \label{section:Con}
Neural networks have become a popular machine learning algorithm in many domains, in part due to the ability of neural networks to ``learn'' how to engineer features. However, researchers analyzing survey data typically choose decision trees or random forests for prediction tasks because missing data and categorical variables are not easy to handle with neural networks. This paper investigates techniques for handling missing data and encoding categorical data such that it is appropriate to neural network classifiers. We compare six different imputation strategies, a simple and complex neural network classifier, and a 3x3 parameter grid.
We use the Adult dataset for benchmarking because it has a mixture of continuous and categorical variables and has under 10\% of the instances containing missing values. Removing instances with missing values instead of imputing the missing values actually yields the highest cross--validated accuracy on the training set. This finding suggest that the instances with missing values or the features that contain missing values (i.e., \textit{occupation}, \textit{work class}, and \textit{native country}) do not contribute meaningful information for the prediction task. Another interpretation is that there is not enough missing values in the training data for imputation to make a difference.
However, we find that two of the imputation methods --- prediction using random forests and replacing the missing values with column modes --- both yield lower generalization error than no imputation. Our lowest test error is within 3\% of the state--of--the--art, which uses Naive Bayes and removes data with missing values.
For future work, we will explore the relevance of the features with missing data and the amount of missing data for this particular prediction task.
%\section*{Acknowledgments}
{\small
\bibliographystyle{ieee}
\bibliography{refs}
}
\end{document}
| {
"alphanum_fraction": 0.7577063308,
"avg_line_length": 73.5853658537,
"ext": "tex",
"hexsha": "0b4230ce5248070927eb7c58965169a1f826ab42",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a3686de769c9709587a68731275a0b7065ba88f1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jvpoulos/cs289-project",
"max_forks_repo_path": "report/cs289-report.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a3686de769c9709587a68731275a0b7065ba88f1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jvpoulos/cs289-project",
"max_issues_repo_path": "report/cs289-report.tex",
"max_line_length": 764,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a3686de769c9709587a68731275a0b7065ba88f1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jvpoulos/cs289-project",
"max_stars_repo_path": "report/cs289-report.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4540,
"size": 18102
} |
\documentclass[11pt]{article}
\input{packages.tex}
\input{tikz.tex}
\input{thmstyle.tex}
\input{macros.tex}
\title{Lattices of compatibly embedded finite fields}
\author{}
\begin{document}
\maketitle
\begin{center}
\begin{tikzpicture}
\node (E) at (0, 0) {$E$};
\node (F) at (1.5, 1) {$F$};
\node (G) at (0.5, 2) {$G$};
\draw[arrow] (E) -- (F);
\draw[arrow] (E) -- (G);
\draw[arrow] (F) -- (G);
\node (f12) at (1.25, 0.25) {$\embed{E}{F}$};
\node (f13) at (-0.35, 1) {$\embed{E}{G}$};
\node (f23) at (1.6, 1.65) {$\embed{F}{G}$};
\end{tikzpicture}
\end{center}
\section{Introduction}
Given two finite fields $E$ and $F$ with cardinalities $|E|=p^{m}$ and
$|F|=p^{n}$, we know that $E$ can be embedded in $F$ if and only if $m\,|\,n$.
In other words, $E$ is in that case isomorphic to a subfield $E'\subset F$ of $F$ with
cardinality $|E'|=p^{m}$. There are
$m=[E:\mathbb{F}_p]=|\Gal(E/\mathbb{F}_p)|$ disctinct embeddings from $E$ to
$F$ (the degree of $E$ over $\mathbb{F}_p$ will also be denoted by
$\partial(E)$). Indeed, the Galois group of the extension $E$ over $\mathbb{F}_p$ acts
on the embeddings. Given two different embeddings $\embed{E}{F}$ and
$\embed{E}{F}'$ and an element $x\in E$, the images $\embed{E}{F}(x)$ and
$\embed{E}{F}'(x)$ must be conjugates. As a result, there is no cannonical
embedding from $E$ to $F$. Furthermore, the proof of the fact that $E$ can be
embedded in $F$ if and only if $\dE\,|\,\dF$ is not constructive, so computing the
embedding is itself a challenging problem, and there exists a variety of
solutions.
In this document, we do not recall the embeddings algorithms, we often
consider them as black boxes that we use to construct embeddings between
finite fields, and we study the compatibility between these embeddings. Given
three finite fields $E$, $F$, and $G$, such that $\dE\,|\,\dF$ and $\dF\,|\,\dG$, and three embeddings
$\embed{E}{F}$, $\embed{F}{G}$, and $\embed{E}{G}$, we say that the
embeddings are \emph{compatible} if
\[
\embed{E}{G}=\embed{F}{G}\circ\embed{E}{F}.
\]
In other words, we want the diagram of Figure~\ref{fig:compatibility} to
commute. We also note $E\emb F$ if $E$ is explicitly embedded in $F$, \ie if
we have computed an embedding $\embed{E}{F}$.
\begin{figure}
\centering
\begin{tikzpicture}
\node (E) at (0, 0) {$E$};
\node (F) at (1.5, 1) {$F$};
\node (G) at (0.5, 2) {$G$};
\draw[arrow] (E) -- (F);
\draw[arrow] (E) -- (G);
\draw[arrow] (F) -- (G);
\node (f12) at (1.25, 0.25) {$\embed{E}{F}$};
\node (f13) at (-0.35, 1) {$\embed{E}{G}$};
\node (f23) at (1.6, 1.65) {$\embed{F}{G}$};
\end{tikzpicture}
\caption{Embeddings between finite fields.}
\label{fig:compatibility}
\end{figure}
The background of this work is the development of a computer algebra
software, where we want the user to be able to define arbitrary finite
fields and to work with them without having to care about compatibility
between the different embeddings he or she have to use. These goals are
achieved in the computer algebra softwares MAGMA~\cite{Magma} and
Sagemath~\cite{Sagemath}. We introduce the
framework of Bosma, Cannon and Steel~\cite{BCS97} in
Section~\ref{sec:bcs-framework}. Next, we discuss the current implementation in
Nemo of
this framework in Section~\ref{sec:implem}.
\section{Bosma, Cannon, and Steel framework}
\label{sec:bcs-framework}
\begin{figure}
\centering
\begin{tikzpicture}
\node (E) at (0, 0) {$E$};
\node (F) at (1.5, 1) {$F$};
\node (G) at (0.5, 2) {$G$};
\draw[arrow] (E) -- (F);
\draw[arrow] (E) -- (G);
\draw[dashed-arrow] (F) -- (G);
\node (f12) at (1.25, 0.25) {$\embed{E}{F}$};
\node (f13) at (-0.35, 1) {$\embed{E}{G}$};
\end{tikzpicture}
\caption{An uncomplete diagram.}
\label{fig:uncomplete}
\end{figure}
Let $E$, $F$, and $G$ be finite fields with
$\partial(E)\,|\,\partial(F)$ and
$\partial(F)\,|\,\partial(G)$. We also assume that $E\emb F$ and $E\emb G$,
hence we are in the situation described by Figure~\ref{fig:uncomplete}
where we miss one embedding. In order to complete the diagram of this
figure, Bosma,
Cannon and Steel suggest to take an arbitrary embedding $\embed{F}{G}'$ and to
``correct'' it by composing $\embed{F}{G}'$ with an element of
$\sigma\in\Gal(G/\mathbb{F}_p)$ such that
\[
\embed{E}{G}=\sigma\circ\embed{F}{G}'\circ\embed{E}{F}.
\]
We can then set $\embed{F}{G}=\sigma\circ\embed{F}{G}'$ and the
obtained embedding is compatible by construction. We also see that once we have
one compatible embedding, we can derive other compatible embeddings from it by
precomposing by an element $\xi$ of $\Gal(F/E)$. Indeed, such an element
$\xi$ fixes the elements in $E$, hence the compatibility conditions are
still verified after precomposition. One may wander what happens
if there are several subfields $E_1, \dots, E_r$, or if the configuration is not
the one presented in Figure~\ref{fig:uncomplete}.
There are three configurations with triangles, that are the one in
Figure~\ref{fig:triangles}. We already discussed the configuration on the
left, which is the one in Figure~\ref{fig:uncomplete}. The configuration in the
middle is easier to handle because we can set
\[
\embed{E}{G}=\embed{F}{G}\circ\embed{E}{F}
\]
since we have $E\emb F\emb G$. Finally, we will see later that the
configuration on the right cannot happen in the framework we use because on the
conditions we impose on the finite fields and on the embeddings betweem them.
\begin{figure}
\centering
\begin{tikzpicture}
\node (E) at (0, 0) {$E$};
\node (F) at (1.5, 1) {$F$};
\node (G) at (0.5, 2) {$G$};
\draw[arrow] (E) -- (F);
\draw[arrow] (E) -- (G);
\draw[dashed-arrow] (F) -- (G);
\end{tikzpicture}
\phantom{and}
\begin{tikzpicture}
\node (E) at (0, 0) {$E$};
\node (F) at (1.5, 1) {$F$};
\node (G) at (0.5, 2) {$G$};
\draw[arrow] (E) -- (F);
\draw[dashed-arrow] (E) -- (G);
\draw[arrow] (F) -- (G);
\end{tikzpicture}
\phantom{and}
\begin{tikzpicture}
\node (E) at (0, 0) {$E$};
\node (F) at (1.5, 1) {$F$};
\node (G) at (0.5, 2) {$G$};
\draw[dashed-arrow] (E) -- (F);
\draw[arrow] (E) -- (G);
\draw[arrow] (F) -- (G);
\end{tikzpicture}
\caption{The different configurations with triangles.}
\label{fig:triangles}
\end{figure}
If we have a pair $\mathfrak L=(L, \Phi)$, where
$L$ is a set of finite fields and $\Phi$ is a set of embeddings between
elements of $L$, we say that $\mathfrak L$ is a \emph{lattice of compatibly
embedded finite fields} if
\begin{enumerate}
\item[CE1] (unicity) for each pair $(E, F)$ of elements in $L$, there exists
at most one corresponding embedding $\embed{E}{F}\in\Phi$.
\item[CE2] (reflexivity) For each $E\in L$, the identity map
$\Id_E=\embed{E}{E}$ is in $\Phi$.
\item[CE3] (prime subfield) There is exactly one $P\in L$ such that $\partial
(P) = 1$, and for all $F\in L$, there exists $\embed{P}{F}\in\Phi$
\item[CE4] (invertibility) If $E\emb F$ and $\dE=\dF$, then $F\emb E$ and
$\embed{F}{E}=\embed{E}{F}^{-1}$.
\item[CE5] (transitivity) For any triple $(E, F, G)$ of elements in $L$, if $E\emb
F\emb G$ then $E\emb G$ and
$\embed{E}{G}=\embed{F}{G}\circ\embed{E}{F}$.
\item[CE6](intersections) For each $E, F, G\in L$ such that $F\emb G$ and
$E\emb G$, there exists $S\in L$ such that $\partial(S)=\gcd(\dE, \dF)$
and $S\emb E$, $S\emb F$.
\end{enumerate}
These conditions are, for most of them, very natural. The condition CE3 is
technical and does not imply any work in our implementation because
finite fields elements in Nemo/Flint~\cite{Nemo, Flint} are represented by
polynomials over $\mathbb{F}_p$, so the embedding of $\mathbb{F}_p$ into an
extension is trivial. Finally, condition
CE6 ensures that the implicit isomorphisms between subfields are made
explicit.
Under those conditions, we can prove~\cite{BCS97} that we are able to add a finite field in
$L$ or an embedding that is not yet in $\Phi$ without altering the compatibility
of the lattice $\mathfrak L$.
\section{Implementation in Nemo}
\label{sec:implem}
In practice, we do not compute our embeddings by correcting random embeddings as
suggested in Section~\ref{sec:bcs-framework}. Instead, we use the naive
algorithm to compute a compatible embedding that does not need any correction.
Assume we are in the situation of Figure~\ref{fig:uncomplete}, \ie we have $E$,
$F$, $G$ finite fields and $E\emb F$, $E\emb G$, $\dF\,|\,\dG$. Let $\alpha_F$
be a generator of $F$ over $\mathbb{F}_p$, then $\alpha_F$ is also a generator
of $F$ over $E$. Let $\pi_{E}(\alpha_F)$ be the minimal polynomial of $\alpha_F$
over $E$, and let $\rho$ be a root of $\embed{E}{G}(\pi_E(\alpha_F))$, the
minimal polynomial of $\alpha_F$ viewed in $G$. We set $\embed{F}{G}$ to the
embedding mapping $\alpha_F$ to $\rho$. More precisely, we set
\[
\embed{F}{G}(\sum_{i=0}^{[F:E]-1}e_i\alpha_F^i) =
\sum_{i=0}^{[F:E]-1}\embed{E}{G}(e_i)\rho^i,
\]
hence we see that
\[
\embed{E}{G} = \embed{F}{G}\circ\embed{E}{F}
\]
and the new embedding is compatible with the already existing ones. We take
the cannonical generator of $F$ over $\mathbb{F}_p$ to be $\alpha_F$, we compute
$\pi_E(\alpha_F)$ using linear algebra and we compute $\embed{F}{G}$ using
linear algebra too. Assume now that we have several finite fields $E_1, \cdots, E_r$ such
that for all $j$, $E_j\emb F$, $E_j\emb G$ and $\dF\,|\,\dG$. This is the
situation of Figure~\ref{fig:uncomplete-sev}.
\begin{figure}
\centering
\begin{tikzpicture}
\node (E1) at (-2, 0) {$E_1$};
\node (E2) at (-1, 0) {$E_2$};
\node (Er) at (0.75, 0) {$E_r$};
\node (F) at (1.5, 1) {$F$};
\node (G) at (0.5, 2) {$G$};
\node (p) at (0, 0) {$\dots$};
\draw[arrow] (E1) -- (F);
\draw[arrow] (E1) -- (G);
\draw[arrow] (E2) -- (F);
\draw[arrow] (E2) -- (G);
\draw[arrow] (Er) -- (F);
\draw[arrow] (Er) -- (G);
\draw[dashed-arrow] (F) -- (G);
\end{tikzpicture}
\caption{An uncomplete diagram with several subfields.}
\label{fig:uncomplete-sev}
\end{figure}
In that case, we consider the polynomial
\[
P = \gcd_i(\embed{E_i}{G}(\pi_{E_i}(\alpha_F))),
\]
and we let $\rho$ be a root of $P$. We see that $\rho$ is a root of each
polynomial $\embed{E_i}{G}(\pi_{E_i}(\alpha_F))$, so the embedding mapping
$\alpha_F$ to $\rho$ is compatible with all the previously existing embeddings.
\section{Conclusion}
In practice, because of condition CE6 concerning intersections, we might have to
compute additionnal embeddings before computing the wanted embedding. In
conclusion, in order to embed a finite field $F$ in $G$, we have to:
\begin{enumerate}
\item for each subfield $S$ of $G$, check if the finite field $S\cap F$ is
embedded in $S$ and $F$, and if not, embed it. In practice, if there is not
any finite field of degree $d=\gcd(\partial(S), \dF)$, we compute an
arbitrary finite field $I$ of degree $d$ using Flint
and we embed $I$ in $S$ and $F$.
\item Embed $F$ in $G$ using Section~\ref{sec:implem} procedure.
\item Compute the ``transitive closure'' of the lattice, \ie compute the
embeddings such that condition CE5 holds. In practice we compute all the
embeddings and keep them in memory.
\end{enumerate}
The first step implies a recursive call to our embedding algorithm, so the
complexity of the operation might explode at that step.
There are several things that could be enhanced in our current framework. First,
we could use graph algorithms in order to compute the transitive closure only
when asked by the user. We could also compute minimal polynomials using
Berlekamp Massey algorithm instead of linear algebra.
\bibliographystyle{plain}
\bibliography{erou}
\end{document}
| {
"alphanum_fraction": 0.6501375115,
"avg_line_length": 40.8129251701,
"ext": "tex",
"hexsha": "76d06514a67b45ea69439471d8e5fee15c8f8d16",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5f73dc37587c61fec494562d7d23795bdaf7d37f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "erou/compatible-embeddings",
"max_forks_repo_path": "compatible-lattices.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5f73dc37587c61fec494562d7d23795bdaf7d37f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "erou/compatible-embeddings",
"max_issues_repo_path": "compatible-lattices.tex",
"max_line_length": 102,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "5f73dc37587c61fec494562d7d23795bdaf7d37f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "erou/compatible-embeddings",
"max_stars_repo_path": "compatible-lattices.tex",
"max_stars_repo_stars_event_max_datetime": "2019-05-17T16:33:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-05-17T16:33:19.000Z",
"num_tokens": 4073,
"size": 11999
} |
\subsection{gammapy.modeling}
\label{ssec:gammapy-modeling}
\todo{Quentin Remy}
gammapy.modeling contains all the functionality related to modeling and fitting
data. This includes spectral, spatial and temporal model classes, as well as
the fit and parameter API.
\subsubsection{Models}
\label{ssec:models}
The models are grouped into the following categories:
\begin{itemize}
\item SpectralModel: models to describe spectral shapes of sources
\item SpatialModel: models to describe spatial shapes (morphologies) of sources
\item TemporalModel: models to describe temporal flux evolution of sources, such as
light and phase curves
\end{itemize}
The models follow a naming scheme which contains the category as a suffix to
the class name.
The Spectral Models include a special class of Normed models, which have a
dimension-less normalisation. These spectral models feature a norm parameter
instead of amplitude and are named using the NormSpectralModel suffix. They
must be used along with another spectral model, as a multiplicative correction
factor according to their spectral shape. They can be typically used for
adjusting template based models, or adding a EBL correction to some analytic
model. The analytic Spatial models are all normalized such as they integrate to
unity over the sky but the template Spatial models may not, so in that special
case they have to be combined with a NormSpectralModel.
The SkyModel is a factorised model that combine the spectral, spatial and
temporal model components (by default the spatial and temporal components are
optional). SkyModel objects represents additive emission components, usually
sources or diffuse emission, although a single source can also be modeled by
multiple components. To handle list of multiple SkyModel components, Gammapy
has a Models class.
The model gallery provides a visual overview of the available models in
Gammapy. Most of the analytic models commonly used in gamma-ray astronomy are
built-in. We also offer a wrapper to radiative models implemented in the Naima
package~\cite{naima}. The modeling framework can be easily extended with
user-defined models. For example agnpy models that describe leptonic radiative
processes in jetted Active Galactic Nuclei (AGN) can wrapped into
gammapy~\citep[see section3.5 of ][]{2021arXiv211214573N} .
\begin{figure}
\import{code-examples/generated/}{gp_models}
\caption{Using gammapy.modeling.models}
\label{fig*:minted:gp_models}
\end{figure}
\subsubsection{Fit}
\label{ssec:fit}
The Fit class provides methods to fit i.e., optimise parameters and estimate
parameter errors and correlations. It interfaces with a Datasets object, which
in turn is connected to a Models object containing the model parameters in its
Parameters object. Models can be unique for a given dataset, or contribute to
multiple datasets and thus provide links, allowing e.g., to do a joint fit to
multiple IACT datasets, or to a joint IACT and \textit{Fermi}-LAT dataset. Many
examples are given in the tutorials.
The Fit class provides a uniform interface to multiple fitting backends:
“minuit”~\citep{iminuit}, “scipy”,~\citep{2020SciPy-NMeth}, and
“sherpa”~\citep{sherpa-2005,sherpa-2011}. Note that, for now, covariance matrix
and errors are computed only for the fitting with MINUIT. However depending on
the problem other optimizers can better perform, so sometimes it can be useful
to run a pre-fit with alternative optimization methods. In future we plan to
extend the supported Fit backend, including for example MCMC solutions.
\footnote{a prototype is available in gammapy-recipes,
\url{https://gammapy.github.io/gammapy-recipes/_build/html/notebooks/mcmc-sampling-emcee/mcmc_sampling.html}
}
| {
"alphanum_fraction": 0.8073517574,
"avg_line_length": 48.4025974026,
"ext": "tex",
"hexsha": "d50edf3e29e78fc1cf98fd4d7205766f313e3b1b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "edb61c092ad90b523282be363150ed6013af0a43",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bkhelifi/gammapy-v1.0-paper",
"max_forks_repo_path": "src/text/2-package-subsections/modeling.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "edb61c092ad90b523282be363150ed6013af0a43",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bkhelifi/gammapy-v1.0-paper",
"max_issues_repo_path": "src/text/2-package-subsections/modeling.tex",
"max_line_length": 109,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "edb61c092ad90b523282be363150ed6013af0a43",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bkhelifi/gammapy-v1.0-paper",
"max_stars_repo_path": "src/text/2-package-subsections/modeling.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 878,
"size": 3727
} |
\section{\module{shelve} ---
Python object persistence}
\declaremodule{standard}{shelve}
\modulesynopsis{Python object persistence.}
A ``shelf'' is a persistent, dictionary-like object. The difference
with ``dbm'' databases is that the values (not the keys!) in a shelf
can be essentially arbitrary Python objects --- anything that the
\refmodule{pickle} module can handle. This includes most class
instances, recursive data types, and objects containing lots of shared
sub-objects. The keys are ordinary strings.
\refstmodindex{pickle}
To summarize the interface (\code{key} is a string, \code{data} is an
arbitrary object):
\begin{verbatim}
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve data at key (raise KeyError if no
# such key)
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = d.has_key(key) # true if the key exists
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
\end{verbatim}
Restrictions:
\begin{itemize}
\item
The choice of which database package will be used
(e.g. \refmodule{dbm} or \refmodule{gdbm}) depends on which interface
is available. Therefore it is not safe to open the database directly
using \refmodule{dbm}. The database is also (unfortunately) subject
to the limitations of \refmodule{dbm}, if it is used --- this means
that (the pickled representation of) the objects stored in the
database should be fairly small, and in rare cases key collisions may
cause the database to refuse updates.
\refbimodindex{dbm}
\refbimodindex{gdbm}
\item
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
\item
The \module{shelve} module does not support \emph{concurrent} read/write
access to shelved objects. (Multiple simultaneous read accesses are
safe.) When a program has a shelf open for writing, no other program
should have it open for reading or writing. \UNIX{} file locking can
be used to solve this, but this differs across \UNIX{} versions and
requires knowledge about the database implementation used.
\end{itemize}
\begin{seealso}
\seemodule{anydbm}{Generic interface to \code{dbm}-style databases.}
\seemodule{dbhash}{BSD \code{db} database interface.}
\seemodule{dbm}{Standard \UNIX{} database interface.}
\seemodule{dumbdbm}{Portable implementation of the \code{dbm} interface.}
\seemodule{gdbm}{GNU database interface, based on the \code{dbm} interface.}
\seemodule{pickle}{Object serialization used by \module{shelve}.}
\seemodule{cPickle}{High-performance version of \refmodule{pickle}.}
\end{seealso}
| {
"alphanum_fraction": 0.7411347518,
"avg_line_length": 37.1052631579,
"ext": "tex",
"hexsha": "76eaaf49c261fe0d344edbbbfc884115804e0304",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-03-27T01:55:17.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-07-16T08:14:13.000Z",
"max_forks_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca",
"max_forks_repo_licenses": [
"PSF-2.0"
],
"max_forks_repo_name": "marcosptf/cpython-2.0.1",
"max_forks_repo_path": "Doc/lib/libshelve.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca",
"max_issues_repo_issues_event_max_datetime": "2021-05-03T21:20:50.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-11-18T15:48:14.000Z",
"max_issues_repo_licenses": [
"PSF-2.0"
],
"max_issues_repo_name": "marcosptf/cpython-2.0.1",
"max_issues_repo_path": "Doc/lib/libshelve.tex",
"max_line_length": 78,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca",
"max_stars_repo_licenses": [
"PSF-2.0"
],
"max_stars_repo_name": "marcosptf/cpython-2.0.1",
"max_stars_repo_path": "Doc/lib/libshelve.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T21:47:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-26T21:53:36.000Z",
"num_tokens": 735,
"size": 2820
} |
\chapter*{Declaration by Supervisor}
\addcontentsline{toc}{chapter}{Declaration by Supervisor}
\begin{flushleft}
I/We have supervised and accepted this dissertation for the submission of the degree.
\vspace{15mm}
{\makebox[6.5cm]{\dotfill}} \hfill {\makebox[5cm]{\dotfill}} \\
\supervisorA \hfill Date \\
% \vspace{15mm}
% {\makebox[6.5cm]{\dotfill}} \hfill {\makebox[5cm]{\dotfill}} \\
% \supervisorB \hfill Date \\
\end{flushleft}
| {
"alphanum_fraction": 0.6892778993,
"avg_line_length": 25.3888888889,
"ext": "tex",
"hexsha": "83b6e99d61f28131efac5efc43b79a6e053cdb50",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "62bb85f823b68feca499f3d9c0383c9cc62fea69",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ThamaluM/PrevelcerDoc",
"max_forks_repo_path": "chapters/Declaration_Supervisor.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "62bb85f823b68feca499f3d9c0383c9cc62fea69",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ThamaluM/PrevelcerDoc",
"max_issues_repo_path": "chapters/Declaration_Supervisor.tex",
"max_line_length": 87,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "62bb85f823b68feca499f3d9c0383c9cc62fea69",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ThamaluM/PrevelcerDoc",
"max_stars_repo_path": "chapters/Declaration_Supervisor.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 154,
"size": 457
} |
\chapter{API decription}
\label{chapter:api}
This section describes the API calls used to get data from from the DDS. The
data is accessed through a hierarcical REST API where deeper URIs return more
specific data. All call URIs have a common root \url{rest_api/v5}, which
has been omitted below for clarity. All GET calls return JSON objects unless
otherwise noted. Key/value pairs are listed as name of the key
along with the type of the corresponding value within parantheses, followed
by a brief description of the contents. See the sections on the different
data sources for specifications on the structure of their respective JSON
objects. This description is only for the \smr~version~3 Level2 data. For
details on how to access the collocated measurements from the VDS,
see~\cite{VDS:2016}, for more details on the Level2 data format
see~\cite{iodd, atbdl2data}, and for a more general description of the \smr~API,
see \url{http://odin.rss.chalmers.se/apidocs/index.html}.
\section{API calls}
\subsection{\url{level2/DDS/}}
Method: \emph{GET}
Get project information.
\paragraph{Parameters:} None.
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": [
{
"FreqMode": <int>,
"URLS": {
"URL-comment": <string: URL for comments for scans>,
"URL-failed": <string: URL for failed scans>,
"URL-scans": <string: URL for scans>
}
},
...
],
"Count": <int: number of FreqModes in project>,
"Type": "level2_project_freqmode"
}
\end{lstlisting}
\subsection{\url{level2/DDS/area}}
\label{api:area}
Method: \emph{GET}
Get data in provided area.
\paragraph{Parameters:}
\begin{itemize}
\item product \emph{(array of strings)}: Return data only for these
products
\item min\_lat \emph{(float)}: Minimum latitude (-90 to 90)
\item max\_lat \emph{(float)}: Maximum latitude (-90 to 90)
\item min\_lon \emph{(float)}: Minimum longitude (0 to 360)
\item max\_lon \emph{(float)}: Maximum longitude (0 to 360)
\item min\_pressure \emph{(float)}: Minimum pressure (Pa)
\item max\_pressure \emph{(float)}: Maximum pressure (Pa)
\item min\_altitude \emph{(float)}: Minimum altitude (m)
\item max\_altitude \emph{(float)}: Maximum altitude (m)
\item start\_time \emph{(date)}: Return data after this date (inclusive)
\item end\_time \emph{(date)}: Return data before this date (exclusive)
\end{itemize}
Provide latitude and/or longitude limits to get data for a certain area of the
earth. If no latitude or longitude limits are provided, data for the whole
earth is returned. Choose between min/max altitude and min/max pressure.
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": <array of Level2 Data Structures as described below>,
"Count": <int: number of matching data>,
"Type": "L2"
}
\end{lstlisting}
\subsection{\url{level2/DDS/locations}}
Method: \emph{GET}
Get data close to provided location.
\paragraph{Parameters:}
\begin{itemize}
\item product \emph{(array of strings)}: Return data only for these
products
\item location \emph{(array of strings; required)}: Return data close to
these locations ('lat,lon').
\item radius \emph{(float; required)}: Return data within this radius from
the provided locations (km).
\item min\_pressure \emph{(float)}: Minimum pressure (Pa)
\item max\_pressure \emph{(float)}: Maximum pressure (Pa)
\item min\_altitude \emph{(float)}: Minimum altitude (m)
\item max\_altitude \emph{(float)}: Maximum altitude (m)
\item start\_time \emph{(date)}: Return data after this date (inclusive)
\item end\_time \emph{(date)}: Return data before this date (exclusive)
\end{itemize}
Provide one or more locations and a radius to get data within the resulting
circles on the earth surface. Choose between min/max altitude and min/max
pressure.
\paragraph{Data structure:}
Returns JSON object with the same structure as endpoint~\ref{api:area}.
\subsection{\url{level2/DDS/products/}}
\label{api:products}
Method: \emph{GET}
Get available products.
\paragraph{Parameters:} None.
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": <array of strings: product names>,
"Count": <int: number of available products>,
"Type": "level2_product_name"
}
\end{lstlisting}
\subsection{\url{level2/DDS/<string:date>/}}
Method: \emph{GET}
Get data for the provided date.
\paragraph{Parameters:}
\begin{itemize}
\item product \emph{(array of strings)}: Return data only for these
products
\item min\_pressure \emph{(float)}: Minimum pressure (Pa)
\item max\_pressure \emph{(float)}: Maximum pressure (Pa)
\item min\_altitude \emph{(float)}: Minimum altitude (m)
\item max\_altitude \emph{(float)}: Maximum altitude (m)
\item start\_time \emph{(date)}: Return data after this date (inclusive)
\item end\_time \emph{(date)}: Return data before this date (exclusive)
\end{itemize}
Choose between min/max altitude and min/max pressure.
\paragraph{Data structure:}
Returns JSON object with the same structure as endpoint~\ref{api:area}.
\subsection{\url{level2/DDS/<int:frequency mode>/comments/}}
Method: \emph{GET}
Get list of comments for a frequency mode.
\paragraph{Parameters:}
\begin{itemize}
\item offset \emph{(int)}: Skip scans before returning
\item limit \emph{(int; default: 1000)}: Number of scans to return
\end{itemize}
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": [
{
"Comment": <string: comment from processing>,
"URLS": {
"URL-failed": <string: URL for failed scans with comment>,
"URL-scans": <string: URL for successful scans with comment>
}
},
...
],
"Count": <int: number of unique comments>,
"Type": "level2_scan_comment"
}
\end{lstlisting}
\subsection{\url{level2/DDS/<int:frequency mode>/failed/}}
Method: \emph{GET}
Get list of matching scans that failed the level2 processing.
\paragraph{Parameters:}
\begin{itemize}
\item start\_time \emph{(date)}: Return data after this date (inclusive)
\item end\_time \emph{(date)}: Return data before this date (exclusive)
\item comment \emph{(string)}: Return scans with this comment
\item offset \emph{(int)}: Skip scans before returning
\item limit \emph{(int; default: 1000)}: Number of scans to return
\end{itemize}
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": [
{
"Date": <string: date>,
"Error": <string: error message>,
"ScanID": <int: scan number>,
"URLS": {
"URL-ancillary": <string: URL for Level2 ancillary data>,
"URL-level2": <string: URL for Level2 data>,
"URL-log": <string: URL for Level1 log data>,
"URL-spectra": <string: URL for Level1 spectra>
}
},
...
],
"Count": <int: number of matching scans>,
"Type": "level2_failed_scan_info"
}
\end{lstlisting}
\subsection{\url{level2/DDS/<int:frequency mode>/products/}}
Method: \emph{GET}
Get available products for a given project and frequency mode.
\paragraph{Parameters:} None.
\paragraph{Data structure:}
Returns JSON object with the same structure as endpoint~\ref{api:products}.
\subsection{\url{level2/DDS/<int:frequency mode>/scans/}}
Method: \emph{GET}
Get list of matching scans.
\paragraph{Parameters:}
\begin{itemize}
\item start\_time \emph{(date)}: Return data after this date (inclusive)
\item end\_time \emph{(date)}: Return data before this date (exclusive)
\item comment \emph{(string)}: Return scans with this comment
\item offset \emph{(int)}: Skip scans before returning
\item limit \emph{(int; default: 1000)}: Number of scans to return
\end{itemize}
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": [
{
"Date": <string: date>,
"ScanID": <int: scan number>,
"URLS": {
"URL-ancillary": <string: URL for Level2 ancillary data>,
"URL-level2": <string: URL for Level2 data>,
"URL-log": <string: URL for Level1 log data>,
"URL-spectra": <string: URL for Level1 spectra>
}
},
...
],
"Count": <int: number of matching scans>,
"Type": "level2_scan_info"
}
\end{lstlisting}
\subsection{\url{level2/DDS/<int:frequency mode>/<int:scan number>/}}
Method: \emph{GET}
Get level2 data, info, comments, and ancillary data for one scan and frequency
mode.
\paragraph{Parameters:} None.
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": {
"L2": <Level2 Data Structure as described below>,
"L2anc": <Level2 Ancillary Data Structure as described below>,
"L2c": <Level2 Comments Data Structure as described below>,
"L2i": <Level2 Info Data Structure as described below>,
},
"Count": null,
"Type": "mixed"
}
\end{lstlisting}
\subsection{\url{level2/DDS/<frequency mode>/<scan number>/L2/}}
Method: \emph{GET}
Get level2 data for one scan and frequency mode.
\paragraph{Parameters}:
\begin{itemize}
\item product \emph{(array of strings)}: Return data only for these
products
\end{itemize}
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": [
{
"AVK": <array of arrays of floats: averaging kernels at altitudes>,
"Altitude": <array of floats: altitudes>,
"Apriori": <array of floats: apriori data at altitudes>,
"ErrorNoise": <array of floats: noise error at altitudes>,
"ErrorTotal": <array of floats: total error at altitudes>,
"FreqMode": <int: frequency mode>,
"InvMode": "string",
"Lat1D": <float: approximate latitude of retrieval>,
"Latitude": <array of floats: latitudes for retrieval at altitudes>,
"Lon1D": <float: approximate latitude of retrieval>,
"Longitude": <array of floats: longitude for retrieval at altitudes>,
"MJD": <float: Modified Julian Date for retrieval>,
"MeasResponse": <array of floats: measurement response at altitudes>,
"Pressure": <array of floats: pressure at altitudes [Pa]>,
"Product": <string: product name>,
"Quality": <int: quality flag>,
"ScanID": <int: scan number>,
"Temperature": <array of float: retrieved temperature at altitudes [K]>,
"VMR": <array of float: retrieved volumetric mixing ratio at altitudes>
}
],
"Count": null,
"Type": "L2"
}
\end{lstlisting}
\subsection{\url{level2/DDS/<int:frequency mode>/<int:scan number>/L2anc/}}
Method: \emph{GET}
Get ancillary data for one scan and frequency mode.
\paragraph{Parameters:} None.
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": [
{
"FreqMode": <int: freqcuency mode>,
"InvMode": <string: inversion mode used in retrieval>,
"LST": <float: local solar time>,
"Lat1D": <float: approximate latitude of retrieval>,
"Latitude": <array of floats: latitudes for retrieval at altitudes>,
"Lon1D": <float: approximate latitude of retrieval>,
"Longitude": <array of floats: longitude for retrieval at altitudes>,
"MJD": <float: Modified Julian Date for retrieval>,
"Orbit": <int: orbit number>,
"Pressure": <array of floats: pressure at altitudes [Pa]>,
"SZA": <array of floats: solar zenith angle for retrieval at altitudes>,
"SZA1D": <float: approximate solar zenith angle for retrieval>,
"ScanID": <int: scan number>,
"Theta": <array of floats: potential temperature>
}
],
"Count": 1,
"Type": "string"
}
\end{lstlisting}
\subsection{\url{level2/DDS/<int:frequency mode>/<int:scan number>/L2i/}}
Method: \emph{GET}
Get level2 auxiliary data for one scan and frequency mode.
\paragraph{Parameters:} None.
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": {
"BlineOffset": <array of arrays of floats: baseline offsets for spectra>,
"ChannelsID": <array of floats: channel identifier>,
"FitSpectrum": <array of arrays of floats: fitted spectra [K]>
"FreqMode": <int: frequency mode>,
"FreqOffset": <float: retrieved LO frequency offset [Hz]>,
"InvMode": <string: inversion mode used in retrieval>,
"LOFreq": <array of floats: LO frequency for each spectrum in scan [Hz]>,
"MinLmFactor": <float: minimum Levenberg-Marquard factor of OEM>,
"PointOffset": <float: retrieved pointing offset [degree],
"Residual": <float: residual of retrieved and measured spectra>,
"SBpath": <float: sideband path used for retrieving spectra [m]>,
"STW": <array of floats: satellite time words for spectra>,
"ScanID": <int: scan number>,
"Tsat": <float: satellite onboard temperature [K]>,
"URLS": {
"URL-ancillary": <string: URL for Level2 ancillary data>,
"URL-level2": <string: URL for Level2 data>,
"URL-log": <string: URL for Level1 log data>,
"URL-spectra": <string: URL for Level1 spectra>
}
},
"Count": null,
"Type": "L2i"
}
\end{lstlisting}
\subsection{\url{level2/DDS/<int:frequency mode>/<int:scan number>/L2c/}}
Method: \emph{GET}
Get level2 comments for one scan and frequency mode.
\paragraph{Parameters:} None.
\paragraph{Data structure:}
Returns JSON object with the following structure:
\begin{lstlisting}[basicstyle=\footnotesize]
{
"Data": <array of strings: comments>
"Count": <int: number of comments for scan>,
"Type": L2c
}
\end{lstlisting}
\section{Example usage}
\label{sec:api_usage}
This is a brief example of how to use the \smr~API to access the DDS in Python.
The basic procedure for navigating the call hierarchy is the same in all major
programming languages and browsers.
\lstinputlisting[language=Python, basicstyle=\footnotesize]{example.py}
| {
"alphanum_fraction": 0.6944037912,
"avg_line_length": 31.8866666667,
"ext": "tex",
"hexsha": "7e4046fd063d30ceff11134bc312d1424da5c8ea",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-05-18T15:26:54.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-05-18T15:26:54.000Z",
"max_forks_repo_head_hexsha": "19a7fea949a8839897f511bc8ddc6abbb52e9cb0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Odin-SMR/docs",
"max_forks_repo_path": "DDS/api.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "19a7fea949a8839897f511bc8ddc6abbb52e9cb0",
"max_issues_repo_issues_event_max_datetime": "2020-10-12T13:45:33.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-09-28T09:28:13.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Odin-SMR/docs",
"max_issues_repo_path": "PVER/api.tex",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "19a7fea949a8839897f511bc8ddc6abbb52e9cb0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Odin-SMR/docs",
"max_stars_repo_path": "PVER/api.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3860,
"size": 14349
} |
\chapter{Timeline}
\label{sec:timeline}
\TrickHLA\ provides a mechanism for the user to specify the scenario timeline
for the simulation. \TrickHLA\ needs access to the scenario timeline in order
to coordinate freezing (pausing) the federation. The scenario timeline is the
only timeline that can be counted on to be synchronized between all the federates.
% ---------------------------------------------------------------------------
\section{What is the {\em TrickHLATimeline} class?}
\TrickHLA\ provides a {\tt TrickHLATimeline} class with a {\tt get\_time()}
method that is used for getting the current simulation scenario time. This is
a virtual method and must be overridden by a derived class in order to add
application-specific functionality to the simulation. If a secnario timeline
is not specified by the user then \TrickHLA\ will use the Trick simulation time
as the default scenario timeline, which is only valid if all Federates are using
Trick and start with the same simulation time.
% ---------------------------------------------------------------------------
\subsection{{\tt TrickHLATimeline}}
The header file for the {\tt TrickHLATimeline} class is shown below.
\begin{lstlisting}[caption={{\tt TrickHLATimeline} class header}]
class TrickHLATimeline
{
friend class InputProcessor;
friend void init_attrTrickHLATimeline();
public:
TrickHLATimeline();
virtual ~TrickHLATimeline();
private:
TrickHLATimeline(const TrickHLATimeline & rhs);
TrickHLATimeline & operator=(const TrickHLATimeline & rhs);
public:
virtual double get_time(); // Returns a time in seconds, typically
// Terrestrial Time (TT) for the Scenario Timeline.
};
\end{lstlisting}
% ---------------------------------------------------------------------------
\subsection{{\tt TrickHLASimTimeline}}
In order to illustrate the use of the {\tt TrickHLATimeline} class, we subclass
it, as shown below.
\begin{lstlisting}[caption={{\tt TrickHLASimTimeline} class header}]
#include "TrickHLA/include/TrickHLATimeline.hh"
class TrickHLASimTimeline : public TrickHLATimeline
{
friend class InputProcessor;
friend void init_attrTrickHLASimTimeline();
public:
TrickHLASimTimeline(); // default constructor
virtual ~TrickHLASimTimeline(); // destructor
virtual double get_time(); // RETURN: s Current simulation time in seconds to represent the scenario time.
};
\end{lstlisting}
We give the {\tt get\_time()} method something to do, as shown below.
\begin{lstlisting}[caption={ {\tt TrickHLASimTimeline} code}]
/********************************* TRICK HEADER *******************************
PURPOSE: (TrickHLASimTimeline : This class represents the simulation timeline.)
LIBRARY DEPENDENCY: ((TrickHLASimTimeline.o))
*******************************************************************************/
// Trick include files.
#if TRICK_VER >= 10
# include "sim_services/Executive/include/Executive.hh"
# include "sim_services/Executive/include/exec_proto.h"
#else
// Trick 07
# include "sim_services/include/executive.h"
# include "sim_services/include/exec_proto.h"
#endif
// TrickHLA include files.
#include "TrickHLA/include/TrickHLASimTimeline.hh"
/********************************* TRICK HEADER *******************************
PURPOSE: (TrickHLASimTimeline::TrickHLASimTimeline : Default constructor.)
*******************************************************************************/
TrickHLASimTimeline::TrickHLASimTimeline() // RETURN: -- None.
{ }
/********************************* TRICK HEADER *******************************
PURPOSE: (TrickHLASimTimeline::~TrickHLASimTimeline : Destructor.)
*******************************************************************************/
TrickHLASimTimeline::~TrickHLASimTimeline() // RETURN: -- None.
{ }
/********************************* TRICK HEADER *******************************
PURPOSE: (TrickHLASimTimeline::get_time() : Get the current simulation time.)
LIBRARY DEPENDENCY: ((TrickHLATimeline.o)(TrickHLASimTimeline.o))
*******************************************************************************/
double TrickHLASimTimeline::get_time() // RETURN: -- Current simulation time in seconds to represent the scenario time.
{
#if TRICK_VER >= 10
return exec_get_sim_time();
#else
return exec_get_exec()->out.time;
#endif
}
\end{lstlisting}
In this example, all the {\tt get\_time()} method does is just return the
Trick simulation time.
% ---------------------------------------------------------------------------
\section{{\tt S\_define} file}
The {\tt TrickHLASimTimeline} class is introduced into the simulation via
the {\tt S\_define} file. There, you would need to add a
new {\tt TrickHLASimTimeline} object into one simulation object and in this
example we add it to the THLA\_INIT simulation object like the following:
\begin{verbatim}
TrickHLA: TrickHLASimTimeline sim_timeline;
\end{verbatim}
\TrickHLA\ will call the get\_time() function when it needs to get the current
scenario time.
% ---------------------------------------------------------------------------
\section{{\tt input} file}
You need to register the {\tt TrickHLATimeline} object with the THLA
federate by adding the following lines.
\begin{verbatim}
THLA.federate.scenario_timeline = &THLA_INIT.sim_timeline
\end{verbatim}
The simulation scenario timeline is specified by the THLA\_INIT.sim\_timeline
implementation.
| {
"alphanum_fraction": 0.6294257934,
"avg_line_length": 38.1188811189,
"ext": "tex",
"hexsha": "ef3bb5cbfa83e5a3a921d91ce0ee154963b5ab23",
"lang": "TeX",
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2021-10-01T18:37:38.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-08-25T05:51:05.000Z",
"max_forks_repo_head_hexsha": "ae704b97049579e997593ae6d8dd016010b8fa1e",
"max_forks_repo_licenses": [
"NASA-1.3"
],
"max_forks_repo_name": "jiajlin/TrickHLA",
"max_forks_repo_path": "docs/TrickHLA/LaTeX/TrickHLAUser-Timeline.tex",
"max_issues_count": 57,
"max_issues_repo_head_hexsha": "ae704b97049579e997593ae6d8dd016010b8fa1e",
"max_issues_repo_issues_event_max_datetime": "2021-05-17T20:54:35.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-06-04T16:03:44.000Z",
"max_issues_repo_licenses": [
"NASA-1.3"
],
"max_issues_repo_name": "jiajlin/TrickHLA",
"max_issues_repo_path": "docs/TrickHLA/LaTeX/TrickHLAUser-Timeline.tex",
"max_line_length": 119,
"max_stars_count": 18,
"max_stars_repo_head_hexsha": "ae704b97049579e997593ae6d8dd016010b8fa1e",
"max_stars_repo_licenses": [
"NASA-1.3"
],
"max_stars_repo_name": "jiajlin/TrickHLA",
"max_stars_repo_path": "docs/TrickHLA/LaTeX/TrickHLAUser-Timeline.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-17T10:47:21.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-04T14:23:08.000Z",
"num_tokens": 1158,
"size": 5451
} |
\chapter[The tactic language]{The tactic language\label{TacticLanguage}}
%\geometry{a4paper,body={5in,8in}}
This chapter gives a compact documentation of Ltac, the tactic
language available in {\Coq}. We start by giving the syntax, and next,
we present the informal semantics. If you want to know more regarding
this language and especially about its foundations, you can refer
to~\cite{Del00}. Chapter~\ref{Tactics-examples} is devoted to giving
examples of use of this language on small but also with non-trivial
problems.
\section{Syntax}
\def\tacexpr{\textrm{\textsl{expr}}}
\def\tacexprlow{\textrm{\textsl{tacexpr$_1$}}}
\def\tacexprinf{\textrm{\textsl{tacexpr$_2$}}}
\def\tacexprpref{\textrm{\textsl{tacexpr$_3$}}}
\def\atom{\textrm{\textsl{atom}}}
%%\def\recclause{\textrm{\textsl{rec\_clause}}}
\def\letclause{\textrm{\textsl{let\_clause}}}
\def\matchrule{\textrm{\textsl{match\_rule}}}
\def\contextrule{\textrm{\textsl{context\_rule}}}
\def\contexthyp{\textrm{\textsl{context\_hyp}}}
\def\tacarg{\nterm{tacarg}}
\def\cpattern{\nterm{cpattern}}
The syntax of the tactic language is given Figures~\ref{ltac}
and~\ref{ltac-aux}. See Chapter~\ref{BNF-syntax} for a description of
the BNF metasyntax used in these grammar rules. Various already
defined entries will be used in this chapter: entries
{\naturalnumber}, {\integer}, {\ident}, {\qualid}, {\term},
{\cpattern} and {\atomictac} represent respectively the natural and
integer numbers, the authorized identificators and qualified names,
{\Coq}'s terms and patterns and all the atomic tactics described in
Chapter~\ref{Tactics}. The syntax of {\cpattern} is the same as that
of terms, but it is extended with pattern matching metavariables. In
{\cpattern}, a pattern-matching metavariable is represented with the
syntax {\tt ?id} where {\tt id} is an {\ident}. The notation {\tt \_}
can also be used to denote metavariable whose instance is
irrelevant. In the notation {\tt ?id}, the identifier allows us to
keep instantiations and to make constraints whereas {\tt \_} shows
that we are not interested in what will be matched. On the right hand
side of pattern-matching clauses, the named metavariable are used
without the question mark prefix. There is also a special notation for
second-order pattern-matching problems: in an applicative pattern of
the form {\tt @?id id$_1$ \ldots id$_n$}, the variable {\tt id}
matches any complex expression with (possible) dependencies in the
variables {\tt id$_1$ \ldots id$_n$} and returns a functional term of
the form {\tt fun id$_1$ \ldots id$_n$ => {\term}}.
The main entry of the grammar is {\tacexpr}. This language is used in
proof mode but it can also be used in toplevel definitions as shown in
Figure~\ref{ltactop}.
\begin{Remarks}
\item The infix tacticals ``\dots\ {\tt ||} \dots'', ``\dots\ {\tt +}
\dots'', and ``\dots\ {\tt ;} \dots'' are associative.
\item In {\tacarg}, there is an overlap between {\qualid} as a
direct tactic argument and {\qualid} as a particular case of
{\term}. The resolution is done by first looking for a reference of
the tactic language and if it fails, for a reference to a term. To
force the resolution as a reference of the tactic language, use the
form {\tt ltac :} {\qualid}. To force the resolution as a reference to
a term, use the syntax {\tt ({\qualid})}.
\item As shown by the figure, tactical {\tt ||} binds more than the
prefix tacticals {\tt try}, {\tt repeat}, {\tt do} and
{\tt abstract} which themselves bind more than the postfix tactical
``{\tt \dots\ ;[ \dots\ ]}'' which binds more than ``\dots\ {\tt ;}
\dots''.
For instance
\begin{quote}
{\tt try repeat \tac$_1$ ||
\tac$_2$;\tac$_3$;[\tac$_{31}$|\dots|\tac$_{3n}$];\tac$_4$.}
\end{quote}
is understood as
\begin{quote}
{\tt (try (repeat (\tac$_1$ || \tac$_2$)));} \\
{\tt ((\tac$_3$;[\tac$_{31}$|\dots|\tac$_{3n}$]);\tac$_4$).}
\end{quote}
\end{Remarks}
\begin{figure}[htbp]
\begin{centerframe}
\begin{tabular}{lcl}
{\tacexpr} & ::= &
{\tacexpr} {\tt ;} {\tacexpr}\\
& | & {\tt [>} \nelist{\tacexpr}{|} {\tt ]}\\
& | & {\tacexpr} {\tt ; [} \nelist{\tacexpr}{|} {\tt ]}\\
& | & {\tacexprpref}\\
\\
{\tacexprpref} & ::= &
{\tt do} {\it (}{\naturalnumber} {\it |} {\ident}{\it )} {\tacexprpref}\\
& | & {\tt progress} {\tacexprpref}\\
& | & {\tt repeat} {\tacexprpref}\\
& | & {\tt try} {\tacexprpref}\\
& | & {\tt once} {\tacexprpref}\\
& | & {\tt exactly\_once} {\tacexprpref}\\
& | & {\tt timeout} {\it (}{\naturalnumber} {\it |} {\ident}{\it )} {\tacexprpref}\\
& | & {\tt time} \zeroone{\qstring} {\tacexprpref}\\
& | & {\tacexprinf} \\
\\
{\tacexprinf} & ::= &
{\tacexprlow} {\tt ||} {\tacexprpref}\\
& | & {\tacexprlow} {\tt +} {\tacexprpref}\\
& | & {\tt tryif} {\tacexprlow} {\tt then} {\tacexprlow} {\tt else} {\tacexprlow}\\
& | & {\tacexprlow}\\
\\
{\tacexprlow} & ::= &
{\tt fun} \nelist{\name}{} {\tt =>} {\atom}\\
& | &
{\tt let} \zeroone{\tt rec} \nelist{\letclause}{\tt with} {\tt in}
{\atom}\\
& | &
{\tt match goal with} \nelist{\contextrule}{\tt |} {\tt end}\\
& | &
{\tt match reverse goal with} \nelist{\contextrule}{\tt |} {\tt end}\\
& | &
{\tt match} {\tacexpr} {\tt with} \nelist{\matchrule}{\tt |} {\tt end}\\
& | &
{\tt lazymatch goal with} \nelist{\contextrule}{\tt |} {\tt end}\\
& | &
{\tt lazymatch reverse goal with} \nelist{\contextrule}{\tt |} {\tt end}\\
& | &
{\tt lazymatch} {\tacexpr} {\tt with} \nelist{\matchrule}{\tt |} {\tt end}\\
& | &
{\tt multimatch goal with} \nelist{\contextrule}{\tt |} {\tt end}\\
& | &
{\tt multimatch reverse goal with} \nelist{\contextrule}{\tt |} {\tt end}\\
& | &
{\tt multimatch} {\tacexpr} {\tt with} \nelist{\matchrule}{\tt |} {\tt end}\\
& | & {\tt abstract} {\atom}\\
& | & {\tt abstract} {\atom} {\tt using} {\ident} \\
& | & {\tt first [} \nelist{\tacexpr}{\tt |} {\tt ]}\\
& | & {\tt solve [} \nelist{\tacexpr}{\tt |} {\tt ]}\\
& | & {\tt idtac} \sequence{\messagetoken}{}\\
& | & {\tt fail} \zeroone{\naturalnumber} \sequence{\messagetoken}{}\\
& | & {\tt gfail} \zeroone{\naturalnumber} \sequence{\messagetoken}{}\\
& | & {\tt fresh} ~|~ {\tt fresh} {\qstring}|~ {\tt fresh} {\qualid}\\
& | & {\tt context} {\ident} {\tt [} {\term} {\tt ]}\\
& | & {\tt eval} {\nterm{redexpr}} {\tt in} {\term}\\
& | & {\tt type of} {\term}\\
& | & {\tt external} {\qstring} {\qstring} \nelist{\tacarg}{}\\
& | & {\tt constr :} {\term}\\
& | & {\tt uconstr :} {\term}\\
& | & {\tt type\_term} {\term}\\
& | & {\tt numgoals} \\
& | & {\tt guard} {\it test}\\
& | & \atomictac\\
& | & {\qualid} \nelist{\tacarg}{}\\
& | & {\atom}
\end{tabular}
\end{centerframe}
\caption{Syntax of the tactic language}
\label{ltac}
\end{figure}
\begin{figure}[htbp]
\begin{centerframe}
\begin{tabular}{lcl}
{\atom} & ::= &
{\qualid} \\
& | & ()\\
& | & {\integer}\\
& | & {\tt (} {\tacexpr} {\tt )}\\
\\
{\messagetoken}\!\!\!\!\!\! & ::= & {\qstring} ~|~ {\ident} ~|~ {\integer} \\
\\
\tacarg & ::= &
{\qualid}\\
& $|$ & {\tt ()} \\
& $|$ & {\tt ltac :} {\atom}\\
& $|$ & {\term}\\
\\
\letclause & ::= & {\ident} \sequence{\name}{} {\tt :=} {\tacexpr}\\
\\
\contextrule & ::= &
\nelist{\contexthyp}{\tt ,} {\tt |-}{\cpattern} {\tt =>} {\tacexpr}\\
& $|$ & {\tt |-} {\cpattern} {\tt =>} {\tacexpr}\\
& $|$ & {\tt \_ =>} {\tacexpr}\\
\\
\contexthyp & ::= & {\name} {\tt :} {\cpattern}\\
& $|$ & {\name} {\tt :=} {\cpattern} \zeroone{{\tt :} {\cpattern}}\\
\\
\matchrule & ::= &
{\cpattern} {\tt =>} {\tacexpr}\\
& $|$ & {\tt context} {\zeroone{\ident}} {\tt [} {\cpattern} {\tt ]}
{\tt =>} {\tacexpr}\\
& $|$ & {\tt appcontext} {\zeroone{\ident}} {\tt [} {\cpattern} {\tt ]}
{\tt =>} {\tacexpr}\\
& $|$ & {\tt \_ =>} {\tacexpr}\\
\\
{\it test} & ::= &
{\integer} {\tt \,=\,} {\integer}\\
& $|$ & {\integer} {\tt \,<\,} {\integer}\\
& $|$ & {\integer} {\tt <=} {\integer}\\
& $|$ & {\integer} {\tt \,>\,} {\integer}\\
& $|$ & {\integer} {\tt >=} {\integer}
\end{tabular}
\end{centerframe}
\caption{Syntax of the tactic language (continued)}
\label{ltac-aux}
\end{figure}
\begin{figure}[ht]
\begin{centerframe}
\begin{tabular}{lcl}
\nterm{top} & ::= & \zeroone{\tt Local} {\tt Ltac} \nelist{\nterm{ltac\_def}} {\tt with} \\
\\
\nterm{ltac\_def} & ::= & {\ident} \sequence{\ident}{} {\tt :=}
{\tacexpr}\\
& $|$ &{\qualid} \sequence{\ident}{} {\tt ::=}{\tacexpr}
\end{tabular}
\end{centerframe}
\caption{Tactic toplevel definitions}
\label{ltactop}
\end{figure}
%%
%% Semantics
%%
\section{Semantics}
%\index[tactic]{Tacticals}
\index{Tacticals}
%\label{Tacticals}
Tactic expressions can only be applied in the context of a proof. The
evaluation yields either a term, an integer or a tactic. Intermediary
results can be terms or integers but the final result must be a tactic
which is then applied to the focused goals.
There is a special case for {\tt match goal} expressions of which
the clauses evaluate to tactics. Such expressions can only be used as
end result of a tactic expression (never as argument of a non recursive local
definition or of an application).
The rest of this section explains the semantics of every construction
of Ltac.
%% \subsection{Values}
%% Values are given by Figure~\ref{ltacval}. All these values are tactic values,
%% i.e. to be applied to a goal, except {\tt Fun}, {\tt Rec} and $arg$ values.
%% \begin{figure}[ht]
%% \noindent{}\framebox[6in][l]
%% {\parbox{6in}
%% {\begin{center}
%% \begin{tabular}{lp{0.1in}l}
%% $vexpr$ & ::= & $vexpr$ {\tt ;} $vexpr$\\
%% & | & $vexpr$ {\tt ; [} {\it (}$vexpr$ {\tt |}{\it )}$^*$ $vexpr$ {\tt
%% ]}\\
%% & | & $vatom$\\
%% \\
%% $vatom$ & ::= & {\tt Fun} \nelist{\inputfun}{} {\tt ->} {\tacexpr}\\
%% %& | & {\tt Rec} \recclause\\
%% & | &
%% {\tt Rec} \nelist{\recclause}{\tt And} {\tt In}
%% {\tacexpr}\\
%% & | &
%% {\tt Match Context With} {\it (}$context\_rule$ {\tt |}{\it )}$^*$
%% $context\_rule$\\
%% & | & {\tt (} $vexpr$ {\tt )}\\
%% & | & $vatom$ {\tt Orelse} $vatom$\\
%% & | & {\tt Do} {\it (}{\naturalnumber} {\it |} {\ident}{\it )} $vatom$\\
%% & | & {\tt Repeat} $vatom$\\
%% & | & {\tt Try} $vatom$\\
%% & | & {\tt First [} {\it (}$vexpr$ {\tt |}{\it )}$^*$ $vexpr$ {\tt ]}\\
%% & | & {\tt Solve [} {\it (}$vexpr$ {\tt |}{\it )}$^*$ $vexpr$ {\tt ]}\\
%% & | & {\tt Idtac}\\
%% & | & {\tt Fail}\\
%% & | & {\primitivetactic}\\
%% & | & $arg$
%% \end{tabular}
%% \end{center}}}
%% \caption{Values of ${\cal L}_{tac}$}
%% \label{ltacval}
%% \end{figure}
%% \subsection{Evaluation}
\subsubsection[Sequence]{Sequence\tacindex{;}
\index{Tacticals!;@{\tt {\tac$_1$};\tac$_2$}}}
A sequence is an expression of the following form:
\begin{quote}
{\tacexpr}$_1$ {\tt ;} {\tacexpr}$_2$
\end{quote}
The expressions {\tacexpr}$_1$ and {\tacexpr}$_2$ are evaluated
to $v_1$ and $v_2$ which have to be tactic values. The tactic $v_1$ is
then applied and $v_2$ is applied to the goals generated by the
application of $v_1$. Sequence is left-associative.
\subsubsection[Local application of tactics]{Local application of tactics\tacindex{[>\ldots$\mid$\ldots$\mid$\ldots]}\tacindex{;[\ldots$\mid$\ldots$\mid$\ldots]}\index{Tacticals![> \mid ]@{\tt {\tac$_0$};[{\tac$_1$}$\mid$\ldots$\mid$\tac$_n$]}}\index{Tacticals!; [ \mid ]@{\tt {\tac$_0$};[{\tac$_1$}$\mid$\ldots$\mid$\tac$_n$]}}}
%\tacindex{; [ | ]}
%\index{; [ | ]@{\tt ;[\ldots$\mid$\ldots$\mid$\ldots]}}
Different tactics can be applied to the different goals using the following form:
\begin{quote}
{\tt [ >} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]}
\end{quote}
The expressions {\tacexpr}$_i$ are evaluated to $v_i$, for $i=0,...,n$
and all have to be tactics. The $v_i$ is applied to the $i$-th goal,
for $=1,...,n$. It fails if the number of focused goals is not exactly $n$.
\begin{Variants}
\item If no tactic is given for the $i$-th goal, it behaves as if
the tactic {\tt idtac} were given. For instance, {\tt [~> | auto
]} is a shortcut for {\tt [ > idtac | auto ]}.
\item {\tt [ >} {\tacexpr}$_1$ {\tt |} $...$ {\tt |}
{\tacexpr}$_i$ {\tt |} {\tacexpr} {\tt ..} {\tt |}
{\tacexpr}$_{i+1+j}$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]}
In this variant, {\tt expr} is used for each goal numbered from
$i+1$ to $i+j$ (assuming $n$ is the number of goals).
Note that {\tt ..} is part of the syntax, while $...$ is the meta-symbol used
to describe a list of {\tacexpr} of arbitrary length.
goals numbered from $i+1$ to $i+j$.
\item {\tt [ >} {\tacexpr}$_1$ {\tt |} $...$ {\tt |}
{\tacexpr}$_i$ {\tt |} {\tt ..} {\tt |} {\tacexpr}$_{i+1+j}$ {\tt |}
$...$ {\tt |} {\tacexpr}$_n$ {\tt ]}
In this variant, {\tt idtac} is used for the goals numbered from
$i+1$ to $i+j$.
\item {\tt [ >} {\tacexpr} {\tt ..} {\tt ]}
In this variant, the tactic {\tacexpr} is applied independently to
each of the goals, rather than globally. In particular, if there
are no goal, the tactic is not run at all. A tactic which
expects multiple goals, such as {\tt swap}, would act as if a single
goal is focused.
\item {\tacexpr} {\tt ; [ } {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]}
This variant of local tactic application is paired with a
sequence. In this variant, $n$ must be the number of goals
generated by the application of {\tacexpr} to each of the
individual goals independently. All the above variants work in
this form too. Formally, {\tacexpr} {\tt ; [} $...$ {\tt ]} is
equivalent to
\begin{quote}
{\tt [ >} {\tacexpr} {\tt ; [ >} $...$ {\tt ]} {\tt ..} {\tt ]}
\end{quote}
\end{Variants}
\subsubsection[For loop]{For loop\tacindex{do}
\index{Tacticals!do@{\tt do}}}
There is a for loop that repeats a tactic {\num} times:
\begin{quote}
{\tt do} {\num} {\tacexpr}
\end{quote}
{\tacexpr} is evaluated to $v$ which must be a tactic value.
This tactic value $v$ is
applied {\num} times. Supposing ${\num}>1$, after the first
application of $v$, $v$ is applied, at least once, to the generated
subgoals and so on. It fails if the application of $v$ fails before
the {\num} applications have been completed.
\subsubsection[Repeat loop]{Repeat loop\tacindex{repeat}
\index{Tacticals!repeat@{\tt repeat}}}
We have a repeat loop with:
\begin{quote}
{\tt repeat} {\tacexpr}
\end{quote}
{\tacexpr} is evaluated to $v$. If $v$ denotes a tactic, this tactic
is applied to each focused goal independently. If the application
succeeds, the tactic is applied recursively to all the generated subgoals
until it eventually fails. The recursion stops in a subgoal when the
tactic has failed \emph{to make progress}. The tactic {\tt repeat
{\tacexpr}} itself never fails.
\subsubsection[Error catching]{Error catching\tacindex{try}
\index{Tacticals!try@{\tt try}}}
We can catch the tactic errors with:
\begin{quote}
{\tt try} {\tacexpr}
\end{quote}
{\tacexpr} is evaluated to $v$ which must be a tactic value.
The tactic value $v$ is
applied to each focused goal independently. If the application of $v$
fails in a goal, it catches the error and leaves the goal
unchanged. If the level of the exception is positive, then the
exception is re-raised with its level decremented.
\subsubsection[Detecting progress]{Detecting progress\tacindex{progress}}
We can check if a tactic made progress with:
\begin{quote}
{\tt progress} {\tacexpr}
\end{quote}
{\tacexpr} is evaluated to $v$ which must be a tactic value.
The tactic value $v$ is
applied to each focued subgoal independently. If the application of
$v$ to one of the focused subgoal produced subgoals equal to the
initial goals (up to syntactical equality), then an error of level 0
is raised.
\ErrMsg \errindex{Failed to progress}
\subsubsection[Backtracking branching]{Backtracking branching\tacindex{$+$}
\index{Tacticals!or@{\tt $+$}}}
We can branch with the following structure:
\begin{quote}
{\tacexpr}$_1$ {\tt +} {\tacexpr}$_2$
\end{quote}
{\tacexpr}$_1$ and {\tacexpr}$_2$ are evaluated to $v_1$ and
$v_2$ which must be tactic values. The tactic value $v_1$ is applied to each
focused goal independently and if it fails or a later tactic fails,
then the proof backtracks to the current goal and $v_2$ is applied.
Tactics can be seen as having several successes. When a tactic fails
it asks for more successes of the prior tactics. {\tacexpr}$_1$ {\tt
+} {\tacexpr}$_2$ has all the successes of $v_1$ followed by all the
successes of $v_2$. Algebraically, ({\tacexpr}$_1$ {\tt +}
{\tacexpr}$_2$);{\tacexpr}$_3$ $=$ ({\tacexpr}$_1$;{\tacexpr}$_3$)
{\tt +} ({\tacexpr}$_2$;{\tacexpr}$_3$).
Branching is left-associative.
\subsubsection[First tactic to work]{First tactic to work\tacindex{first}
\index{Tacticals!first@{\tt first}}}
Backtracking branching may be too expensive. In this case we may
restrict to a local, left biased, branching and consider the first
tactic to work (i.e. which does not fail) among a panel of tactics:
\begin{quote}
{\tt first [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]}
\end{quote}
{\tacexpr}$_i$ are evaluated to $v_i$ and $v_i$ must be tactic values,
for $i=1,...,n$. Supposing $n>1$, it applies, in each focused goal
independently, $v_1$, if it works, it stops otherwise it tries to
apply $v_2$ and so on. It fails when there is no applicable tactic. In
other words, {\tt first [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |}
{\tacexpr}$_n$ {\tt ]} behaves, in each goal, as the the first $v_i$
to have \emph{at least} one success.
\ErrMsg \errindex{No applicable tactic}
\subsubsection[Left-biased branching]{Left-biased branching\tacindex{$\mid\mid$}
\index{Tacticals!orelse@{\tt $\mid\mid$}}}
Yet another way of branching without backtracking is the following structure:
\begin{quote}
{\tacexpr}$_1$ {\tt ||} {\tacexpr}$_2$
\end{quote}
{\tacexpr}$_1$ and {\tacexpr}$_2$ are evaluated to $v_1$ and
$v_2$ which must be tactic values. The tactic value $v_1$ is applied in each
subgoal independently and if it fails \emph{to progress} then $v_2$ is
applied. {\tacexpr}$_1$ {\tt ||} {\tacexpr}$_2$ is equivalent to {\tt
first [} {\tt progress} {\tacexpr}$_1$ {\tt |} {\tt progress}
{\tacexpr}$_2$ {\tt ]} (except that if it fails, it fails like
$v_2$). Branching is left-associative.
\subsubsection[Generalized biased branching]{Generalized biased branching\tacindex{tryif}
\index{Tacticals!tryif@{\tt tryif}}}
The tactic
\begin{quote}
{\tt tryif {\tacexpr}$_1$ then {\tacexpr}$_2$ else {\tacexpr}$_3$}
\end{quote}
is a generalization of the biased-branching tactics above. The
expression {\tacexpr}$_1$ is evaluated to $v_1$, which is then applied
to each subgoal independently. For each goal where $v_1$ succeeds at
least once, {tacexpr}$_2$ is evaluated to $v_2$ which is then applied
collectively to the generated subgoals. The $v_2$ tactic can trigger
backtracking points in $v_1$: where $v_1$ succeeds at least once, {\tt
tryif {\tacexpr}$_1$ then {\tacexpr}$_2$ else {\tacexpr}$_3$} is
equivalent to $v_1;v_2$. In each of the goals where $v_1$ does not
succeed at least once, {\tacexpr}$_3$ is evaluated in $v_3$ which is
is then applied to the goal.
\subsubsection[Soft cut]{Soft cut\tacindex{once}\index{Tacticals!once@{\tt once}}}
Another way of restricting backtracking is to restrict a tactic to a
single success \emph{a posteriori}:
\begin{quote}
{\tt once} {\tacexpr}
\end{quote}
{\tacexpr} is evaluated to $v$ which must be a tactic value.
The tactic value $v$ is
applied but only its first success is used. If $v$ fails, {\tt once}
{\tacexpr} fails like $v$. If $v$ has a least one success, {\tt once}
{\tacexpr} succeeds once, but cannot produce more successes.
\subsubsection[Checking the successes]{Checking the successes\tacindex{exactly\_once}\index{Tacticals!exactly\_once@{\tt exactly\_once}}}
Coq provides an experimental way to check that a tactic has \emph{exactly one} success:
\begin{quote}
{\tt exactly\_once} {\tacexpr}
\end{quote}
{\tacexpr} is evaluated to $v$ which must be a tactic value.
The tactic value $v$ is
applied if it has at most one success. If $v$ fails, {\tt
exactly\_once} {\tacexpr} fails like $v$. If $v$ has a exactly one
success, {\tt exactly\_once} {\tacexpr} succeeds like $v$. If $v$ has
two or more successes, {\tt exactly\_once} {\tacexpr} fails.
The experimental status of this tactic pertains to the fact if $v$ performs side effects, they may occur in a unpredictable way. Indeed, normally $v$ would only be executed up to the first success until backtracking is needed, however {\tt exactly\_once} needs to look ahead to see whether a second success exists, and may run further effects immediately.
\ErrMsg \errindex{This tactic has more than one success}
\subsubsection[Solving]{Solving\tacindex{solve}
\index{Tacticals!solve@{\tt solve}}}
We may consider the first to solve (i.e. which generates no subgoal) among a
panel of tactics:
\begin{quote}
{\tt solve [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]}
\end{quote}
{\tacexpr}$_i$ are evaluated to $v_i$ and $v_i$ must be tactic values,
for $i=1,...,n$. Supposing $n>1$, it applies $v_1$ to each goal
independently, if it doesn't solve the goal then it tries to apply
$v_2$ and so on. It fails if there is no solving tactic.
\ErrMsg \errindex{Cannot solve the goal}
\subsubsection[Identity]{Identity\label{ltac:idtac}\tacindex{idtac}
\index{Tacticals!idtac@{\tt idtac}}}
The constant {\tt idtac} is the identity tactic: it leaves any goal
unchanged but it appears in the proof script.
\variant {\tt idtac \nelist{\messagetoken}{}}
This prints the given tokens. Strings and integers are printed
literally. If a (term) variable is given, its contents are printed.
\subsubsection[Failing]{Failing\tacindex{fail}
\index{Tacticals!fail@{\tt fail}}
\tacindex{gfail}\index{Tacticals!gfail@{\tt gfail}}}
The tactic {\tt fail} is the always-failing tactic: it does not solve
any goal. It is useful for defining other tacticals since it can be
caught by {\tt try}, {\tt repeat}, {\tt match goal}, or the branching
tacticals. The {\tt fail} tactic will, however, succeed if all the
goals have already been solved.
\begin{Variants}
\item {\tt fail $n$}\\ The number $n$ is the failure level. If no
level is specified, it defaults to $0$. The level is used by {\tt
try}, {\tt repeat}, {\tt match goal} and the branching tacticals.
If $0$, it makes {\tt match goal} considering the next clause
(backtracking). If non zero, the current {\tt match goal} block,
{\tt try}, {\tt repeat}, or branching command is aborted and the
level is decremented. In the case of {\tt +}, a non-zero level skips
the first backtrack point, even if the call to {\tt fail $n$} is not
enclosed in a {\tt +} command, respecting the algebraic identity.
\item {\tt fail \nelist{\messagetoken}{}}\\
The given tokens are used for printing the failure message.
\item {\tt fail $n$ \nelist{\messagetoken}{}}\\
This is a combination of the previous variants.
\item {\tt gfail}\\
This variant fails even if there are no goals left.
\item {\tt gfail \nelist{\messagetoken}{}}\\
{\tt gfail $n$ \nelist{\messagetoken}{}}\\
These variants fail with an error message or an error level even if
there are no goals left. Be careful however if Coq terms have to be
printed as part of the failure: term construction always forces the
tactic into the goals, meaning that if there are no goals when it is
evaluated, a tactic call like {\tt let x:=H in fail 0 x} will succeed.
\end{Variants}
\ErrMsg \errindex{Tactic Failure {\it message} (level $n$)}.
\subsubsection[Timeout]{Timeout\tacindex{timeout}
\index{Tacticals!timeout@{\tt timeout}}}
We can force a tactic to stop if it has not finished after a certain
amount of time:
\begin{quote}
{\tt timeout} {\num} {\tacexpr}
\end{quote}
{\tacexpr} is evaluated to $v$ which must be a tactic value.
The tactic value $v$ is
applied normally, except that it is interrupted after ${\num}$ seconds
if it is still running. In this case the outcome is a failure.
Warning: For the moment, {\tt timeout} is based on elapsed time in
seconds, which is very
machine-dependent: a script that works on a quick machine may fail
on a slow one. The converse is even possible if you combine a
{\tt timeout} with some other tacticals. This tactical is hence
proposed only for convenience during debug or other development
phases, we strongly advise you to not leave any {\tt timeout} in
final scripts. Note also that this tactical isn't available on
the native Windows port of Coq.
\subsubsection{Timing a tactic\tacindex{time}
\index{Tacticals!time@{\tt time}}}
A tactic execution can be timed:
\begin{quote}
{\tt time} {\qstring} {\tacexpr}
\end{quote}
evaluates {\tacexpr}
and displays the time the tactic expression ran, whether it fails or
successes. In case of several successes, the time for each successive
runs is displayed. Time is in seconds and is machine-dependent. The
{\qstring} argument is optional. When provided, it is used to identify
this particular occurrence of {\tt time}.
\subsubsection[Local definitions]{Local definitions\index{Ltac!let@\texttt{let}}
\index{Ltac!let rec@\texttt{let rec}}
\index{let@\texttt{let}!in Ltac}
\index{let rec@\texttt{let rec}!in Ltac}}
Local definitions can be done as follows:
\begin{quote}
{\tt let} {\ident}$_1$ {\tt :=} {\tacexpr}$_1$\\
{\tt with} {\ident}$_2$ {\tt :=} {\tacexpr}$_2$\\
...\\
{\tt with} {\ident}$_n$ {\tt :=} {\tacexpr}$_n$ {\tt in}\\
{\tacexpr}
\end{quote}
each {\tacexpr}$_i$ is evaluated to $v_i$, then, {\tacexpr} is
evaluated by substituting $v_i$ to each occurrence of {\ident}$_i$,
for $i=1,...,n$. There is no dependencies between the {\tacexpr}$_i$
and the {\ident}$_i$.
Local definitions can be recursive by using {\tt let rec} instead of
{\tt let}. In this latter case, the definitions are evaluated lazily
so that the {\tt rec} keyword can be used also in non recursive cases
so as to avoid the eager evaluation of local definitions.
\subsubsection{Application}
An application is an expression of the following form:
\begin{quote}
{\qualid} {\tacarg}$_1$ ... {\tacarg}$_n$
\end{quote}
The reference {\qualid} must be bound to some defined tactic
definition expecting at least $n$ arguments. The expressions
{\tacexpr}$_i$ are evaluated to $v_i$, for $i=1,...,n$.
%If {\tacexpr} is a {\tt Fun} or {\tt Rec} value then the body is evaluated by
%substituting $v_i$ to the formal parameters, for $i=1,...,n$. For recursive
%clauses, the bodies are lazily substituted (when an identifier to be evaluated
%is the name of a recursive clause).
%\subsection{Application of tactic values}
\subsubsection[Function construction]{Function construction\index{fun@\texttt{fun}!in Ltac}
\index{Ltac!fun@\texttt{fun}}}
A parameterized tactic can be built anonymously (without resorting to
local definitions) with:
\begin{quote}
{\tt fun} {\ident${}_1$} ... {\ident${}_n$} {\tt =>} {\tacexpr}
\end{quote}
Indeed, local definitions of functions are a syntactic sugar for
binding a {\tt fun} tactic to an identifier.
\subsubsection[Pattern matching on terms]{Pattern matching on terms\index{Ltac!match@\texttt{match}}
\index{match@\texttt{match}!in Ltac}}
We can carry out pattern matching on terms with:
\begin{quote}
{\tt match} {\tacexpr} {\tt with}\\
~~~{\cpattern}$_1$ {\tt =>} {\tacexpr}$_1$\\
~{\tt |} {\cpattern}$_2$ {\tt =>} {\tacexpr}$_2$\\
~...\\
~{\tt |} {\cpattern}$_n$ {\tt =>} {\tacexpr}$_n$\\
~{\tt |} {\tt \_} {\tt =>} {\tacexpr}$_{n+1}$\\
{\tt end}
\end{quote}
The expression {\tacexpr} is evaluated and should yield a term which
is matched against {\cpattern}$_1$. The matching is non-linear: if a
metavariable occurs more than once, it should match the same
expression every time. It is first-order except on the
variables of the form {\tt @?id} that occur in head position of an
application. For these variables, the matching is second-order and
returns a functional term.
Alternatively, when a metavariable of the form {\tt ?id} occurs under
binders, say $x_1$, \ldots, $x_n$ and the expression matches, the
metavariable is instantiated by a term which can then be used in any
context which also binds the variables $x_1$, \ldots, $x_n$ with
same types. This provides with a primitive form of matching
under context which does not require manipulating a functional term.
If the matching with {\cpattern}$_1$ succeeds, then {\tacexpr}$_1$ is
evaluated into some value by substituting the pattern matching
instantiations to the metavariables. If {\tacexpr}$_1$ evaluates to a
tactic and the {\tt match} expression is in position to be applied to
a goal (e.g. it is not bound to a variable by a {\tt let in}), then
this tactic is applied. If the tactic succeeds, the list of resulting
subgoals is the result of the {\tt match} expression. If
{\tacexpr}$_1$ does not evaluate to a tactic or if the {\tt match}
expression is not in position to be applied to a goal, then the result
of the evaluation of {\tacexpr}$_1$ is the result of the {\tt match}
expression.
If the matching with {\cpattern}$_1$ fails, or if it succeeds but the
evaluation of {\tacexpr}$_1$ fails, or if the evaluation of
{\tacexpr}$_1$ succeeds but returns a tactic in execution position
whose execution fails, then {\cpattern}$_2$ is used and so on. The
pattern {\_} matches any term and shunts all remaining patterns if
any. If all clauses fail (in particular, there is no pattern {\_})
then a no-matching-clause error is raised.
Failures in subsequent tactics do not cause backtracking to select new
branches or inside the right-hand side of the selected branch even if
it has backtracking points.
\begin{ErrMsgs}
\item \errindex{No matching clauses for match}
No pattern can be used and, in particular, there is no {\tt \_} pattern.
\item \errindex{Argument of match does not evaluate to a term}
This happens when {\tacexpr} does not denote a term.
\end{ErrMsgs}
\begin{Variants}
\item \index{multimatch@\texttt{multimatch}!in Ltac}
\index{Ltac!multimatch@\texttt{multimatch}}
Using {\tt multimatch} instead of {\tt match} will allow subsequent
tactics to backtrack into a right-hand side tactic which has
backtracking points left and trigger the selection of a new matching
branch when all the backtracking points of the right-hand side have
been consumed.
The syntax {\tt match \ldots} is, in fact, a shorthand for
{\tt once multimatch \ldots}.
\item \index{lazymatch@\texttt{lazymatch}!in Ltac}
\index{Ltac!lazymatch@\texttt{lazymatch}}
Using {\tt lazymatch} instead of {\tt match} will perform the same
pattern matching procedure but will commit to the first matching
branch rather than trying a new matching if the right-hand side
fails. If the right-hand side of the selected branch is a tactic with
backtracking points, then subsequent failures cause this tactic to
backtrack.
\item \index{context@\texttt{context}!in pattern}
There is a special form of patterns to match a subterm against the
pattern:
\begin{quote}
{\tt context} {\ident} {\tt [} {\cpattern} {\tt ]}
\end{quote}
It matches any term with a subterm matching {\cpattern}. If there is
a match, the optional {\ident} is assigned the ``matched context'', i.e.
the initial term where the matched subterm is replaced by a
hole. The example below will show how to use such term contexts.
If the evaluation of the right-hand-side of a valid match fails, the
next matching subterm is tried. If no further subterm matches, the
next clause is tried. Matching subterms are considered top-bottom and
from left to right (with respect to the raw printing obtained by
setting option {\tt Printing All}, see Section~\ref{SetPrintingAll}).
\begin{coq_example}
Ltac f x :=
match x with
context f [S ?X] =>
idtac X; (* To display the evaluation order *)
assert (p := eq_refl 1 : X=1); (* To filter the case X=1 *)
let x:= context f[O] in assert (x=O) (* To observe the context *)
end.
Goal True.
f (3+4).
\end{coq_example}
\item \index{appcontext@\texttt{appcontext}!in pattern}
\optindex{Tactic Compat Context}
For historical reasons, {\tt context} used to consider $n$-ary applications
such as {\tt (f 1 2)} as a whole, and not as a sequence of unary
applications {\tt ((f 1) 2)}. Hence {\tt context [f ?x]} would fail
to find a matching subterm in {\tt (f 1 2)}: if the pattern was a partial
application, the matched subterms would have necessarily been
applications with exactly the same number of arguments.
As a workaround, one could use the following variant of {\tt context}:
\begin{quote}
{\tt appcontext} {\ident} {\tt [} {\cpattern} {\tt ]}
\end{quote}
This syntax is now deprecated, as {\tt context} behaves as intended. The former
behavior can be retrieved with the {\tt Tactic Compat Context} flag.
\end{Variants}
\subsubsection[Pattern matching on goals]{Pattern matching on goals\index{Ltac!match goal@\texttt{match goal}}
\index{Ltac!match reverse goal@\texttt{match reverse goal}}
\index{match goal@\texttt{match goal}!in Ltac}
\index{match reverse goal@\texttt{match reverse goal}!in Ltac}}
We can make pattern matching on goals using the following expression:
\begin{quote}
\begin{tabbing}
{\tt match goal with}\\
~~\={\tt |} $hyp_{1,1}${\tt ,}...{\tt ,}$hyp_{1,m_1}$
~~{\tt |-}{\cpattern}$_1${\tt =>} {\tacexpr}$_1$\\
\>{\tt |} $hyp_{2,1}${\tt ,}...{\tt ,}$hyp_{2,m_2}$
~~{\tt |-}{\cpattern}$_2${\tt =>} {\tacexpr}$_2$\\
~~...\\
\>{\tt |} $hyp_{n,1}${\tt ,}...{\tt ,}$hyp_{n,m_n}$
~~{\tt |-}{\cpattern}$_n${\tt =>} {\tacexpr}$_n$\\
\>{\tt |\_}~~~~{\tt =>} {\tacexpr}$_{n+1}$\\
{\tt end}
\end{tabbing}
\end{quote}
If each hypothesis pattern $hyp_{1,i}$, with $i=1,...,m_1$
is matched (non-linear first-order unification) by an hypothesis of
the goal and if {\cpattern}$_1$ is matched by the conclusion of the
goal, then {\tacexpr}$_1$ is evaluated to $v_1$ by substituting the
pattern matching to the metavariables and the real hypothesis names
bound to the possible hypothesis names occurring in the hypothesis
patterns. If $v_1$ is a tactic value, then it is applied to the
goal. If this application fails, then another combination of
hypotheses is tried with the same proof context pattern. If there is
no other combination of hypotheses then the second proof context
pattern is tried and so on. If the next to last proof context pattern
fails then {\tacexpr}$_{n+1}$ is evaluated to $v_{n+1}$ and $v_{n+1}$
is applied. Note also that matching against subterms (using the {\tt
context} {\ident} {\tt [} {\cpattern} {\tt ]}) is available and is
also subject to yielding several matchings.
Failures in subsequent tactics do not cause backtracking to select new
branches or combinations of hypotheses, or inside the right-hand side
of the selected branch even if it has backtracking points.
\ErrMsg \errindex{No matching clauses for match goal}
No clause succeeds, i.e. all matching patterns, if any,
fail at the application of the right-hand-side.
\medskip
It is important to know that each hypothesis of the goal can be
matched by at most one hypothesis pattern. The order of matching is
the following: hypothesis patterns are examined from the right to the
left (i.e. $hyp_{i,m_i}$ before $hyp_{i,1}$). For each hypothesis
pattern, the goal hypothesis are matched in order (fresher hypothesis
first), but it possible to reverse this order (older first) with
the {\tt match reverse goal with} variant.
\variant
\index{multimatch goal@\texttt{multimatch goal}!in Ltac}
\index{Ltac!multimatch goal@\texttt{multimatch goal}}
\index{multimatch reverse goal@\texttt{multimatch reverse goal}!in Ltac}
\index{Ltac!multimatch reverse goal@\texttt{multimatch reverse goal}}
Using {\tt multimatch} instead of {\tt match} will allow subsequent
tactics to backtrack into a right-hand side tactic which has
backtracking points left and trigger the selection of a new matching
branch or combination of hypotheses when all the backtracking points
of the right-hand side have been consumed.
The syntax {\tt match [reverse] goal \ldots} is, in fact, a shorthand for
{\tt once multimatch [reverse] goal \ldots}.
\index{lazymatch goal@\texttt{lazymatch goal}!in Ltac}
\index{Ltac!lazymatch goal@\texttt{lazymatch goal}}
\index{lazymatch reverse goal@\texttt{lazymatch reverse goal}!in Ltac}
\index{Ltac!lazymatch reverse goal@\texttt{lazymatch reverse goal}}
Using {\tt lazymatch} instead of {\tt match} will perform the same
pattern matching procedure but will commit to the first matching
branch with the first matching combination of hypotheses rather than
trying a new matching if the right-hand side fails. If the right-hand
side of the selected branch is a tactic with backtracking points, then
subsequent failures cause this tactic to backtrack.
\subsubsection[Filling a term context]{Filling a term context\index{context@\texttt{context}!in expression}}
The following expression is not a tactic in the sense that it does not
produce subgoals but generates a term to be used in tactic
expressions:
\begin{quote}
{\tt context} {\ident} {\tt [} {\tacexpr} {\tt ]}
\end{quote}
{\ident} must denote a context variable bound by a {\tt context}
pattern of a {\tt match} expression. This expression evaluates
replaces the hole of the value of {\ident} by the value of
{\tacexpr}.
\ErrMsg \errindex{not a context variable}
\subsubsection[Generating fresh hypothesis names]{Generating fresh hypothesis names\index{Ltac!fresh@\texttt{fresh}}
\index{fresh@\texttt{fresh}!in Ltac}}
Tactics sometimes have to generate new names for hypothesis. Letting
the system decide a name with the {\tt intro} tactic is not so good
since it is very awkward to retrieve the name the system gave.
The following expression returns an identifier:
\begin{quote}
{\tt fresh} \nelist{\textrm{\textsl{component}}}{}
\end{quote}
It evaluates to an identifier unbound in the goal. This fresh
identifier is obtained by concatenating the value of the
\textrm{\textsl{component}}'s (each of them is, either an {\qualid} which
has to refer to a (unqualified) name, or directly a name denoted by a
{\qstring}). If the resulting name is already used, it is padded
with a number so that it becomes fresh. If no component is
given, the name is a fresh derivative of the name {\tt H}.
\subsubsection[Computing in a constr]{Computing in a constr\index{Ltac!eval@\texttt{eval}}
\index{eval@\texttt{eval}!in Ltac}}
Evaluation of a term can be performed with:
\begin{quote}
{\tt eval} {\nterm{redexpr}} {\tt in} {\term}
\end{quote}
where \nterm{redexpr} is a reduction tactic among {\tt red}, {\tt
hnf}, {\tt compute}, {\tt simpl}, {\tt cbv}, {\tt lazy}, {\tt unfold},
{\tt fold}, {\tt pattern}.
\subsubsection{Recovering the type of a term}
%\tacindex{type of}
\index{Ltac!type of@\texttt{type of}}
\index{type of@\texttt{type of}!in Ltac}
The following returns the type of {\term}:
\begin{quote}
{\tt type of} {\term}
\end{quote}
\subsubsection[Manipulating untyped terms]{Manipulating untyped terms\index{Ltac!uconstr@\texttt{uconstr}}
\index{uconstr@\texttt{uconstr}!in Ltac}
\index{Ltac!type\_term@\texttt{type\_term}}
\index{type\_term@\texttt{type\_term}!in Ltac}}
The terms built in Ltac are well-typed by default. It may not be
appropriate for building large terms using a recursive Ltac function:
the term has to be entirely type checked at each step, resulting in
potentially very slow behavior. It is possible to build untyped terms
using Ltac with the syntax
\begin{quote}
{\tt uconstr :} {\term}
\end{quote}
An untyped term, in Ltac, can contain references to hypotheses or to
Ltac variables containing typed or untyped terms. An untyped term can
be type-checked using the function {\tt type\_term} whose argument is
parsed as an untyped term and returns a well-typed term which can be
used in tactics.
\begin{quote}
{\tt type\_term} {\term}
\end{quote}
Untyped terms built using {\tt uconstr :} can also be used as
arguments to the {\tt refine} tactic~\ref{refine}. In that case the
untyped term is type checked against the conclusion of the goal, and
the holes which are not solved by the typing procedure are turned into
new subgoals.
\subsubsection[Counting the goals]{Counting the goals\index{Ltac!numgoals@\texttt{numgoals}}\index{numgoals@\texttt{numgoals}!in Ltac}}
The number of goals under focus can be recovered using the {\tt
numgoals} function. Combined with the {\tt guard} command below, it
can be used to branch over the number of goals produced by previous tactics.
\begin{coq_example*}
Ltac pr_numgoals := let n := numgoals in idtac "There are" n "goals".
Goal True /\ True /\ True.
split;[|split].
\end{coq_example*}
\begin{coq_example}
all:pr_numgoals.
\end{coq_example}
\subsubsection[Testing boolean expressions]{Testing boolean expressions\index{Ltac!guard@\texttt{guard}}\index{guard@\texttt{guard}!in Ltac}}
The {\tt guard} tactic tests a boolean expression, and fails if the expression evaluates to false. If the expression evaluates to true, it succeeds without affecting the proof.
\begin{quote}
{\tt guard} {\it test}
\end{quote}
The accepted tests are simple integer comparisons.
\begin{coq_example*}
Goal True /\ True /\ True.
split;[|split].
\end{coq_example*}
\begin{coq_example}
all:let n:= numgoals in guard n<4.
Fail all:let n:= numgoals in guard n=2.
\end{coq_example}
\begin{ErrMsgs}
\item \errindex{Condition not satisfied}
\end{ErrMsgs}
\begin{coq_eval}
Reset Initial.
\end{coq_eval}
\subsubsection[Proving a subgoal as a separate lemma]{Proving a subgoal as a separate lemma\tacindex{abstract}\comindex{Qed exporting}
\index{Tacticals!abstract@{\tt abstract}}}
From the outside ``\texttt{abstract \tacexpr}'' is the same as
{\tt solve \tacexpr}. Internally it saves an auxiliary lemma called
{\ident}\texttt{\_subproof}\textit{n} where {\ident} is the name of the
current goal and \textit{n} is chosen so that this is a fresh name.
Such auxiliary lemma is inlined in the final proof term
unless the proof is ended with ``\texttt{Qed exporting}''. In such
case the lemma is preserved. The syntax
``\texttt{Qed exporting }\ident$_1$\texttt{, ..., }\ident$_n$''
is also supported. In such case the system checks that the names given by the
user actually exist when the proof is ended.
This tactical is useful with tactics such as \texttt{omega} or
\texttt{discriminate} that generate huge proof terms. With that tool
the user can avoid the explosion at time of the \texttt{Save} command
without having to cut manually the proof in smaller lemmas.
\begin{Variants}
\item \texttt{abstract {\tacexpr} using {\ident}}.\\
Give explicitly the name of the auxiliary lemma.
\end{Variants}
\ErrMsg \errindex{Proof is not complete}
\section[Tactic toplevel definitions]{Tactic toplevel definitions\comindex{Ltac}}
\subsection{Defining {\ltac} functions}
Basically, {\ltac} toplevel definitions are made as follows:
%{\tt Tactic Definition} {\ident} {\tt :=} {\tacexpr}\\
%
%{\tacexpr} is evaluated to $v$ and $v$ is associated to {\ident}. Next, every
%script is evaluated by substituting $v$ to {\ident}.
%
%We can define functional definitions by:\\
\begin{quote}
{\tt Ltac} {\ident} {\ident}$_1$ ... {\ident}$_n$ {\tt :=}
{\tacexpr}
\end{quote}
This defines a new {\ltac} function that can be used in any tactic
script or new {\ltac} toplevel definition.
\Rem The preceding definition can equivalently be written:
\begin{quote}
{\tt Ltac} {\ident} {\tt := fun} {\ident}$_1$ ... {\ident}$_n$
{\tt =>} {\tacexpr}
\end{quote}
Recursive and mutual recursive function definitions are also
possible with the syntax:
\begin{quote}
{\tt Ltac} {\ident}$_1$ {\ident}$_{1,1}$ ...
{\ident}$_{1,m_1}$~~{\tt :=} {\tacexpr}$_1$\\
{\tt with} {\ident}$_2$ {\ident}$_{2,1}$ ... {\ident}$_{2,m_2}$~~{\tt :=}
{\tacexpr}$_2$\\
...\\
{\tt with} {\ident}$_n$ {\ident}$_{n,1}$ ... {\ident}$_{n,m_n}$~~{\tt :=}
{\tacexpr}$_n$
\end{quote}
\medskip
It is also possible to \emph{redefine} an existing user-defined tactic
using the syntax:
\begin{quote}
{\tt Ltac} {\qualid} {\ident}$_1$ ... {\ident}$_n$ {\tt ::=}
{\tacexpr}
\end{quote}
A previous definition of \qualid must exist in the environment.
The new definition will always be used instead of the old one and
it goes accross module boundaries.
If preceded by the keyword {\tt Local} the tactic definition will not
be exported outside the current module.
\subsection[Printing {\ltac} tactics]{Printing {\ltac} tactics\comindex{Print Ltac}}
Defined {\ltac} functions can be displayed using the command
\begin{quote}
{\tt Print Ltac {\qualid}.}
\end{quote}
\section{Debugging {\ltac} tactics}
\subsection[Info trace]{Info trace\comindex{Info}\optindex{Info Level}}
It is possible to print the trace of the path eventually taken by an {\ltac} script. That is, the list of executed tactics, discarding all the branches which have failed. To that end the {\tt Info} command can be used with the following syntax.
\begin{quote}
{\tt Info} {\num} {\tacexpr}.
\end{quote}
The number {\num} is the unfolding level of tactics in the trace. At level $0$, the trace contains a sequence of tactics in the actual script, at level $1$, the trace will be the concatenation of the traces of these tactics, etc\ldots
\begin{coq_eval}
Reset Initial.
\end{coq_eval}
\begin{coq_example*}
Ltac t x := exists x; reflexivity.
Goal exists n, n=0.
\end{coq_example*}
\begin{coq_example}
Info 0 t 1||t 0.
\end{coq_example}
\begin{coq_example*}
Undo.
\end{coq_example*}
\begin{coq_example}
Info 1 t 1||t 0.
\end{coq_example}
The trace produced by {\tt Info} tries its best to be a reparsable {\ltac} script, but this goal is not achievable in all generality. So some of the output traces will contain oddities.
As an additional help for debugging, the trace produced by {\tt Info} contains (in comments) the messages produced by the {\tt idtac} tacticals~\ref{ltac:idtac} at the right possition in the script. In particular, the calls to {\tt idtac} in branches which failed are not printed.
An alternative to the {\tt Info} command is to use the {\tt Info Level} option as follows:
\begin{quote}
{\tt Set Info Level} \num.
\end{quote}
This will automatically print the same trace as {\tt Info \num} at each tactic call. The unfolding level can be overridden by a call to the {\tt Info} command. And this option can be turned off with:
\begin{quote}
{\tt Unset Info Level} \num.
\end{quote}
The current value for the {\tt Info Level} option can be checked using the {\tt Test Info Level} command.
\subsection[Interactive debugger]{Interactive debugger\optindex{Ltac Debug}}
The {\ltac} interpreter comes with a step-by-step debugger. The
debugger can be activated using the command
\begin{quote}
{\tt Set Ltac Debug.}
\end{quote}
\noindent and deactivated using the command
\begin{quote}
{\tt Unset Ltac Debug.}
\end{quote}
To know if the debugger is on, use the command \texttt{Test Ltac Debug}.
When the debugger is activated, it stops at every step of the
evaluation of the current {\ltac} expression and it prints information
on what it is doing. The debugger stops, prompting for a command which
can be one of the following:
\medskip
\begin{tabular}{ll}
simple newline: & go to the next step\\
h: & get help\\
x: & exit current evaluation\\
s: & continue current evaluation without stopping\\
r $n$: & advance $n$ steps further\\
r {\qstring}: & advance up to the next call to ``{\tt idtac} {\qstring}''\\
\end{tabular}
\endinput
\subsection{Permutation on closed lists}
\begin{figure}[b]
\begin{center}
\fbox{\begin{minipage}{0.95\textwidth}
\begin{coq_eval}
Reset Initial.
\end{coq_eval}
\begin{coq_example*}
Require Import List.
Section Sort.
Variable A : Set.
Inductive permut : list A -> list A -> Prop :=
| permut_refl : forall l, permut l l
| permut_cons :
forall a l0 l1, permut l0 l1 -> permut (a :: l0) (a :: l1)
| permut_append : forall a l, permut (a :: l) (l ++ a :: nil)
| permut_trans :
forall l0 l1 l2, permut l0 l1 -> permut l1 l2 -> permut l0 l2.
End Sort.
\end{coq_example*}
\end{center}
\caption{Definition of the permutation predicate}
\label{permutpred}
\end{figure}
Another more complex example is the problem of permutation on closed
lists. The aim is to show that a closed list is a permutation of
another one. First, we define the permutation predicate as shown on
Figure~\ref{permutpred}.
\begin{figure}[p]
\begin{center}
\fbox{\begin{minipage}{0.95\textwidth}
\begin{coq_example}
Ltac Permut n :=
match goal with
| |- (permut _ ?l ?l) => apply permut_refl
| |- (permut _ (?a :: ?l1) (?a :: ?l2)) =>
let newn := eval compute in (length l1) in
(apply permut_cons; Permut newn)
| |- (permut ?A (?a :: ?l1) ?l2) =>
match eval compute in n with
| 1 => fail
| _ =>
let l1' := constr:(l1 ++ a :: nil) in
(apply (permut_trans A (a :: l1) l1' l2);
[ apply permut_append | compute; Permut (pred n) ])
end
end.
Ltac PermutProve :=
match goal with
| |- (permut _ ?l1 ?l2) =>
match eval compute in (length l1 = length l2) with
| (?n = ?n) => Permut n
end
end.
\end{coq_example}
\end{minipage}}
\end{center}
\caption{Permutation tactic}
\label{permutltac}
\end{figure}
\begin{figure}[p]
\begin{center}
\fbox{\begin{minipage}{0.95\textwidth}
\begin{coq_example*}
Lemma permut_ex1 :
permut nat (1 :: 2 :: 3 :: nil) (3 :: 2 :: 1 :: nil).
Proof.
PermutProve.
Qed.
Lemma permut_ex2 :
permut nat
(0 :: 1 :: 2 :: 3 :: 4 :: 5 :: 6 :: 7 :: 8 :: 9 :: nil)
(0 :: 2 :: 4 :: 6 :: 8 :: 9 :: 7 :: 5 :: 3 :: 1 :: nil).
Proof.
PermutProve.
Qed.
\end{coq_example*}
\end{minipage}}
\end{center}
\caption{Examples of {\tt PermutProve} use}
\label{permutlem}
\end{figure}
Next, we can write naturally the tactic and the result can be seen on
Figure~\ref{permutltac}. We can notice that we use two toplevel
definitions {\tt PermutProve} and {\tt Permut}. The function to be
called is {\tt PermutProve} which computes the lengths of the two
lists and calls {\tt Permut} with the length if the two lists have the
same length. {\tt Permut} works as expected. If the two lists are
equal, it concludes. Otherwise, if the lists have identical first
elements, it applies {\tt Permut} on the tail of the lists. Finally,
if the lists have different first elements, it puts the first element
of one of the lists (here the second one which appears in the {\tt
permut} predicate) at the end if that is possible, i.e., if the new
first element has been at this place previously. To verify that all
rotations have been done for a list, we use the length of the list as
an argument for {\tt Permut} and this length is decremented for each
rotation down to, but not including, 1 because for a list of length
$n$, we can make exactly $n-1$ rotations to generate at most $n$
distinct lists. Here, it must be noticed that we use the natural
numbers of {\Coq} for the rotation counter. On Figure~\ref{ltac}, we
can see that it is possible to use usual natural numbers but they are
only used as arguments for primitive tactics and they cannot be
handled, in particular, we cannot make computations with them. So, a
natural choice is to use {\Coq} data structures so that {\Coq} makes
the computations (reductions) by {\tt eval compute in} and we can get
the terms back by {\tt match}.
With {\tt PermutProve}, we can now prove lemmas such those shown on
Figure~\ref{permutlem}.
\subsection{Deciding intuitionistic propositional logic}
\begin{figure}[tbp]
\begin{center}
\fbox{\begin{minipage}{0.95\textwidth}
\begin{coq_example}
Ltac Axioms :=
match goal with
| |- True => trivial
| _:False |- _ => elimtype False; assumption
| _:?A |- ?A => auto
end.
Ltac DSimplif :=
repeat
(intros;
match goal with
| id:(~ _) |- _ => red in id
| id:(_ /\ _) |- _ =>
elim id; do 2 intro; clear id
| id:(_ \/ _) |- _ =>
elim id; intro; clear id
| id:(?A /\ ?B -> ?C) |- _ =>
cut (A -> B -> C);
[ intro | intros; apply id; split; assumption ]
| id:(?A \/ ?B -> ?C) |- _ =>
cut (B -> C);
[ cut (A -> C);
[ intros; clear id
| intro; apply id; left; assumption ]
| intro; apply id; right; assumption ]
| id0:(?A -> ?B),id1:?A |- _ =>
cut B; [ intro; clear id0 | apply id0; assumption ]
| |- (_ /\ _) => split
| |- (~ _) => red
end).
\end{coq_example}
\end{minipage}}
\end{center}
\caption{Deciding intuitionistic propositions (1)}
\label{tautoltaca}
\end{figure}
\begin{figure}
\begin{center}
\fbox{\begin{minipage}{0.95\textwidth}
\begin{coq_example}
Ltac TautoProp :=
DSimplif;
Axioms ||
match goal with
| id:((?A -> ?B) -> ?C) |- _ =>
cut (B -> C);
[ intro; cut (A -> B);
[ intro; cut C;
[ intro; clear id | apply id; assumption ]
| clear id ]
| intro; apply id; intro; assumption ]; TautoProp
| id:(~ ?A -> ?B) |- _ =>
cut (False -> B);
[ intro; cut (A -> False);
[ intro; cut B;
[ intro; clear id | apply id; assumption ]
| clear id ]
| intro; apply id; red; intro; assumption ]; TautoProp
| |- (_ \/ _) => (left; TautoProp) || (right; TautoProp)
end.
\end{coq_example}
\end{minipage}}
\end{center}
\caption{Deciding intuitionistic propositions (2)}
\label{tautoltacb}
\end{figure}
The pattern matching on goals allows a complete and so a powerful
backtracking when returning tactic values. An interesting application
is the problem of deciding intuitionistic propositional logic.
Considering the contraction-free sequent calculi {\tt LJT*} of
Roy~Dyckhoff (\cite{Dyc92}), it is quite natural to code such a tactic
using the tactic language. On Figure~\ref{tautoltaca}, the tactic {\tt
Axioms} tries to conclude using usual axioms. The {\tt DSimplif}
tactic applies all the reversible rules of Dyckhoff's system.
Finally, on Figure~\ref{tautoltacb}, the {\tt TautoProp} tactic (the
main tactic to be called) simplifies with {\tt DSimplif}, tries to
conclude with {\tt Axioms} and tries several paths using the
backtracking rules (one of the four Dyckhoff's rules for the left
implication to get rid of the contraction and the right or).
\begin{figure}[tb]
\begin{center}
\fbox{\begin{minipage}{0.95\textwidth}
\begin{coq_example*}
Lemma tauto_ex1 : forall A B:Prop, A /\ B -> A \/ B.
Proof.
TautoProp.
Qed.
Lemma tauto_ex2 :
forall A B:Prop, (~ ~ B -> B) -> (A -> B) -> ~ ~ A -> B.
Proof.
TautoProp.
Qed.
\end{coq_example*}
\end{minipage}}
\end{center}
\caption{Proofs of tautologies with {\tt TautoProp}}
\label{tautolem}
\end{figure}
For example, with {\tt TautoProp}, we can prove tautologies like those of
Figure~\ref{tautolem}.
\subsection{Deciding type isomorphisms}
A more tricky problem is to decide equalities between types and modulo
isomorphisms. Here, we choose to use the isomorphisms of the simply typed
$\lb{}$-calculus with Cartesian product and $unit$ type (see, for example,
\cite{RC95}). The axioms of this $\lb{}$-calculus are given by
Figure~\ref{isosax}.
\begin{figure}
\begin{center}
\fbox{\begin{minipage}{0.95\textwidth}
\begin{coq_eval}
Reset Initial.
\end{coq_eval}
\begin{coq_example*}
Open Scope type_scope.
Section Iso_axioms.
Variables A B C : Set.
Axiom Com : A * B = B * A.
Axiom Ass : A * (B * C) = A * B * C.
Axiom Cur : (A * B -> C) = (A -> B -> C).
Axiom Dis : (A -> B * C) = (A -> B) * (A -> C).
Axiom P_unit : A * unit = A.
Axiom AR_unit : (A -> unit) = unit.
Axiom AL_unit : (unit -> A) = A.
Lemma Cons : B = C -> A * B = A * C.
Proof.
intro Heq; rewrite Heq; reflexivity.
Qed.
End Iso_axioms.
\end{coq_example*}
\end{minipage}}
\end{center}
\caption{Type isomorphism axioms}
\label{isosax}
\end{figure}
The tactic to judge equalities modulo this axiomatization can be written as
shown on Figures~\ref{isosltac1} and~\ref{isosltac2}. The algorithm is quite
simple. Types are reduced using axioms that can be oriented (this done by {\tt
MainSimplif}). The normal forms are sequences of Cartesian
products without Cartesian product in the left component. These normal forms
are then compared modulo permutation of the components (this is done by {\tt
CompareStruct}). The main tactic to be called and realizing this algorithm is
{\tt IsoProve}.
\begin{figure}
\begin{center}
\fbox{\begin{minipage}{0.95\textwidth}
\begin{coq_example}
Ltac DSimplif trm :=
match trm with
| (?A * ?B * ?C) =>
rewrite <- (Ass A B C); try MainSimplif
| (?A * ?B -> ?C) =>
rewrite (Cur A B C); try MainSimplif
| (?A -> ?B * ?C) =>
rewrite (Dis A B C); try MainSimplif
| (?A * unit) =>
rewrite (P_unit A); try MainSimplif
| (unit * ?B) =>
rewrite (Com unit B); try MainSimplif
| (?A -> unit) =>
rewrite (AR_unit A); try MainSimplif
| (unit -> ?B) =>
rewrite (AL_unit B); try MainSimplif
| (?A * ?B) =>
(DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif)
| (?A -> ?B) =>
(DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif)
end
with MainSimplif :=
match goal with
| |- (?A = ?B) => try DSimplif A; try DSimplif B
end.
Ltac Length trm :=
match trm with
| (_ * ?B) => let succ := Length B in constr:(S succ)
| _ => constr:1
end.
Ltac assoc := repeat rewrite <- Ass.
\end{coq_example}
\end{minipage}}
\end{center}
\caption{Type isomorphism tactic (1)}
\label{isosltac1}
\end{figure}
\begin{figure}
\begin{center}
\fbox{\begin{minipage}{0.95\textwidth}
\begin{coq_example}
Ltac DoCompare n :=
match goal with
| [ |- (?A = ?A) ] => reflexivity
| [ |- (?A * ?B = ?A * ?C) ] =>
apply Cons; let newn := Length B in DoCompare newn
| [ |- (?A * ?B = ?C) ] =>
match eval compute in n with
| 1 => fail
| _ =>
pattern (A * B) at 1; rewrite Com; assoc; DoCompare (pred n)
end
end.
Ltac CompareStruct :=
match goal with
| [ |- (?A = ?B) ] =>
let l1 := Length A
with l2 := Length B in
match eval compute in (l1 = l2) with
| (?n = ?n) => DoCompare n
end
end.
Ltac IsoProve := MainSimplif; CompareStruct.
\end{coq_example}
\end{minipage}}
\end{center}
\caption{Type isomorphism tactic (2)}
\label{isosltac2}
\end{figure}
Figure~\ref{isoslem} gives examples of what can be solved by {\tt IsoProve}.
\begin{figure}
\begin{center}
\fbox{\begin{minipage}{0.95\textwidth}
\begin{coq_example*}
Lemma isos_ex1 :
forall A B:Set, A * unit * B = B * (unit * A).
Proof.
intros; IsoProve.
Qed.
Lemma isos_ex2 :
forall A B C:Set,
(A * unit -> B * (C * unit)) =
(A * unit -> (C -> unit) * C) * (unit -> A -> B).
Proof.
intros; IsoProve.
Qed.
\end{coq_example*}
\end{minipage}}
\end{center}
\caption{Type equalities solved by {\tt IsoProve}}
\label{isoslem}
\end{figure}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "Reference-Manual"
%%% End:
| {
"alphanum_fraction": 0.6863248881,
"avg_line_length": 37.481505102,
"ext": "tex",
"hexsha": "5880487f71c352baf19dae544485feb973d53d51",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6b52b8532879df53386b0f5413485888a1aa886a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "JasonGross/test-broken-tar",
"max_forks_repo_path": "presentations/coq-workshop-2014-coq/doc/refman/RefMan-ltac.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6b52b8532879df53386b0f5413485888a1aa886a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JasonGross/test-broken-tar",
"max_issues_repo_path": "presentations/coq-workshop-2014-coq/doc/refman/RefMan-ltac.tex",
"max_line_length": 355,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6b52b8532879df53386b0f5413485888a1aa886a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "JasonGross/test-broken-tar",
"max_stars_repo_path": "presentations/coq-workshop-2014-coq/doc/refman/RefMan-ltac.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 18065,
"size": 58771
} |
\documentclass{article}
% Language set to English
\usepackage[english]{babel}
% Set page size and margins
\usepackage[letterpaper,top=2cm,bottom=2cm,left=3cm,right=3cm,marginparwidth=1.75cm]{geometry}
% Useful packages
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage[colorlinks=true, allcolors=blue]{hyperref}
\title{Source Control for Writers}
\author{Jennifer Manzella}
\begin{document}
\maketitle
\begin{abstract}
This document covers Git Workflow
\end{abstract}
\section{Introduction}
In this tutorial, I will cover local git configuration and basic workflow.
If you don't already have an account, please begin by creating one through
the \href{https://www.github.com}{github website}. By the end of the
tutorial, you will understand the following:
\subsection{Local Configuration}
\begin{itemize}
\item Configuring Git on Your Local System
\item Generating a Public Key
\item Adding Public Key to Github Account
\item Cloning Your Forked Repository
\item Setting Upstream Remotes
\end{itemize}
\subsection{Git Workflow}
\begin{itemize}
\item Understanding Workflow
\item Creating Your Working Branch
\item Working with Branches
\item Pushing Local Changes Upstream
\item Generating Diff Files
\item Examples
\item Secure Copy
\item Squashing and Formatting Commit
\item Creating a PR
\item Useful Git Commands
\end{itemize}
\section{Git Install}
Start by installing Git on your local computer if you haven't already.
The github installation guide is
\href{https://github.com/git-guides/install-git}{here}.
\subsection{Initialize Environment}
After installation completes, you can set up your local environment.
Open the terminal by typing "cmd" in the search field. If Git installed
successfully, you should be able to
\end{document} | {
"alphanum_fraction": 0.7309697602,
"avg_line_length": 30.4444444444,
"ext": "tex",
"hexsha": "a6bd52a14eb1a01d762181faefa0fcf4376c4afc",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1e43997da428dcb711199b6f17c73f5689c07a22",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jennifermanzella/TechnicalDocuments",
"max_forks_repo_path": "GitTutorial.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1e43997da428dcb711199b6f17c73f5689c07a22",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jennifermanzella/TechnicalDocuments",
"max_issues_repo_path": "GitTutorial.tex",
"max_line_length": 94,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1e43997da428dcb711199b6f17c73f5689c07a22",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jennifermanzella/TechnicalDocuments",
"max_stars_repo_path": "GitTutorial.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 456,
"size": 1918
} |
\documentclass{article}
\usepackage{graphicx}
\graphicspath{{images/}}
\begin{document}
\title{A0--MinMax Algorithm Analysis}
\author{Michael Fulton}
\date{February 3, 2015}
\maketitle
\section{Recursive Analysis}
The function which relates cost functions to number of inputs for my recursive algorithm is as follows:
\[
C(N)=
\begin{cases}
2C(\frac{N}{2})+2 \mbox{ if} N < 2 \\
1 \mbox{ if} N = 2 \\
0 \mbox{ if} \leq 1
\end{cases}
\]
\[f(n) = \begin{cases} n/2 \mbox{if } n \equiv 0 \\
(3n +1)/2 \mbox{if } n \equiv 1. \end{cases}\]
\section{Iterative Analysis}
\newpage
\section{Plots}
\includegraphics[scale =0.6]{plot.png}
\centering
\end{document}
| {
"alphanum_fraction": 0.6671554252,
"avg_line_length": 17.4871794872,
"ext": "tex",
"hexsha": "509dd69570946065d39ee42443c68e7deff34889",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9a814424f965ce185f412c1034855a18d460c6b3",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "emu4hire/cs344",
"max_forks_repo_path": "a0/analysis/analysis.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9a814424f965ce185f412c1034855a18d460c6b3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "emu4hire/cs344",
"max_issues_repo_path": "a0/analysis/analysis.tex",
"max_line_length": 104,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9a814424f965ce185f412c1034855a18d460c6b3",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "emu4hire/cs344",
"max_stars_repo_path": "a0/analysis/analysis.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 247,
"size": 682
} |
\chapter{Related Work}
There are also other studies investigating other aspects of Polly, the polyhedral or alternative technologies.
\section{Alternative Studies}
Andreas Simbürger et. al. \cite{PolyhedralEmpiricalStudy} investigated the potential of polyhedral compilation in an empirical study.
He also compares the coverages of dynamic and static analysis and concludes that the polyhedral model \enquote{is a well-studied and promising approach to automatic program optimization} \cite{PolyhedralEmpiricalStudy}.
But also mentions \enquote{that the current practical implementations of the polyhedral model do not achieve practically relevant execution coverages, when applied to real-world programs at compile time} \cite{PolyhedralEmpiricalStudy}.\\
In contrast to this study it is neither taking care about the reasons for rejecting parents of \scops nor does it investigate the potential of extending them.
\section{Alternative Extensions}\label{sec:altExt}
There are other studies which already investigated certain aspects of rejection reasons and how to possibly solve some of them.\\
In \cite{spolly} also the rejection reasons are investigated and the impact of speculative extensions to Polly.
In this paper is determined that many of the rejection reasons are a result from the \enquote{conservative overapproximation of the employed static analysis} \cite{spolly}.
So the approach of SPolly was done which integrates run time knowledge and features for replacing the overapproximation.
According the the paper SPolly is able to \enquote{effectively widen the applicability of polyhedral optimization} \cite{spolly}.
\section[Alternative Technologies]{Alternative Technologies \cite{PolyhedralEmpiricalStudy}}
The \llvm is not the only framework that can handle the polyhedral model.\\
There are other systems which extract \scops directly from the source code.
The first one was LooPo \cite{loopo}.
A current system is PoCC \cite{pocc} which implements a full compiler tool chain for applying automatically optimizations based on the polyhedral model.
It supports two tools for transformations.
This one is PLuTo \cite{pluto} and the other is LaTSeE \cite{latsee}.
In \cite{PolyhedralEmpiricalStudy} both are discribed as follows:
\begin{quotation}\noindent
\enquote{The PLuTo scheduling algorithm implements a transformation that optimizes data locality on shared-memory systems.
Rather than generating the optimal solution, LeTSeE tries to converge on it iteratively by exploring the legal transformation space.}
\end{quotation}
| {
"alphanum_fraction": 0.823759281,
"avg_line_length": 88.2413793103,
"ext": "tex",
"hexsha": "7c22c4d4584101caeae2662d8ecc46fe552ae931",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "85c8b747bbf9cc2a010803942120b659d4e7a2e7",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "TrackerSB/Bachelorarbeit",
"max_forks_repo_path": "Chapters/RelatedWork.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "85c8b747bbf9cc2a010803942120b659d4e7a2e7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "TrackerSB/Bachelorarbeit",
"max_issues_repo_path": "Chapters/RelatedWork.tex",
"max_line_length": 238,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "85c8b747bbf9cc2a010803942120b659d4e7a2e7",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "TrackerSB/Bachelorarbeit",
"max_stars_repo_path": "Chapters/RelatedWork.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 551,
"size": 2559
} |
\section{Part 1 -- Architecture}
%Describe the model you created.
%Here you will describe the identified architecture, the chosen design decisions, the relevant information about the produced models, such as smart solutions applied, how you organized the architectural specification in a modular way, \etc
The primary goal of our AADL model was to specify components of the Smart Fire Alarm system with enough detail to provide a reasonable estimate of data transmission latency between relevant devices. To do this, we organized elements of the network into three layers: (1) the top layer, which describes three sub-networks; (2) the middle layer which describes the physical devices forming each sub-network; and (3) the bottom layer describing internal details of some of these devices.
The first layer is formed by three sub-networks. The Sensor Network contains devices connected using short-ranged wireless communication technologies (Wi-fi, Bluetooth and Zigbee) and, in a real-world implementation, would be placed within the same house or building. The Communication Network provides a very high-level abstraction of the infrastructure that allows the connection between two remote locations over the Internet. Finally, the Control Center is also a high-level abstraction of the location where sensor data is gathered for analysis and also where an alarm signal is received if it is necessary to dispatch emergency services.
\autoref{fig:smartfirealarm} shows a broad overview of the system and how the three sub-networks connect to each other. The Sensor Network connects to the Communication Network through two ports, depending on which communication technology is used, and data is then forwarded into the Control Center over a wired Internet connection.
%\includegraphics[width=\textwidth]{SensorUnit.png}
\begin{figure*}[h]
\caption{General structure of the system}
\label{fig:smartfirealarm}
\centering
\includegraphics[width=0.9\textwidth]{SmartFireAlarm}
\end{figure*}
\begin{figure*}[h]
\caption{Structure of the Sensor Network}
\label{fig:sensornetwork}
\centering
\includegraphics[width=0.9\textwidth]{SensorNetwork}
\end{figure*}
\begin{figure*}[h]
\caption{Internal structure of a Sensor Unit}
\label{fig:sensorunit}
\centering
\includegraphics[width=0.9\textwidth]{SensorUnit}
\end{figure*}
\begin{figure*}[h]
\caption{Structure of the Communication Network}
\label{fig:communicationnetwork}
\centering
\includegraphics[width=0.9\textwidth]{CommunicationNetwork}
\end{figure*}
\begin{figure}[h]
\caption{Structure of the Control Center}
\label{fig:controlcenter}
\centering
\includegraphics[width=0.45\textwidth]{ControlCenter}
\end{figure}
\subsection{The Sensor Network}
The Sensor Network represents the devices placed within a house or building which together have three purposes: first, collecting environment data that might indicate the presence of a fire; second, sending that data to the Control Center; and third, providing an interface for a local user to monitor the data and configure the sensors. \autoref{fig:sensornetwork} shows the arrangement of the Sensor Network.
To enable this, the Sensor Network is formed by four types of devices:
\begin{enumerate}
\item Sensor Units, which collect raw environment data;
\item Communication Units, which receive data from the Sensor Units and forwards it to the relevant destinations;
\item Mobile Phones, which provide a user interface for monitoring and configuring the other devices; and
\item one Modem, which connects the Sensor Network to the Internet using wired infrastructure.
\end{enumerate}
The Sensor Units contain five sensors to collect relevant environmental data: ambient temperature, humidity level, carbon monoxide (CO) and dioxide (CO$_2$) concentration, and GPS coordinates.
\autoref{fig:sensorunit} shows the internal structure of a Sensor Unit.
Since the network can contain multiple Sensor Units (for different rooms inside a house, for example), it is possible to detect precisely where a fire starts and which rooms are affected.
Sensor Units collect raw environmental data and, using an internal process, combines this information into a single block of data.
This internal process features a thread that runs at an interval of 100ms to collect and combine data received from sensors.
The combined block of data is then transmitted to the Communication Device using a Zigbee connection. The number of Sensor Units in the network can be configured in a constant value located in \texttt{SensorNetwork.aadl}. Furthermore, the Sensor Units have two modes corresponding to their power status. They can be turned on or off when an appropriate signal is received.
Meanwhile, two Communication Devices exist in the network to collect data from the Sensor Units and transmit it elsewhere. The two devices exist for redundancy, so the system can work even if one of them is faulty. There is also an intercommunication port between the two Communication Devices so they work in a synchronized fashion and can avoid duplicated data. Each Communication Device provides ample communication technologies:
\begin{itemize}
\item Zigbee to communicate with the Sensor Units;
\item Bluetooth to communicate with a Mobile Phone;
\item Wi-fi and Ethernet to connect to a Modem; and
\item GPRS for cellular communication.
\end{itemize}
Data collected from the Sensor Units is sent over Bluetooth to a Mobile Phone, which allows user interaction. The Wi-fi, Ethernet and GPRS communication technologies provide both redundancy and flexibility to the system. Ideally, an Ethernet connection provides lower latency communication to the Internet, but Wi-fi and GPRS allow the system to function even if there is a lack of wired infrastructure and permit continuous operation if one is for some reason unavailable.
Aside from providing a bridge between different devices, the Communication Device monitors the sensor data and compares values to a predeterminate threshold. If the temperature, or the CO/CO$_2$ concentration is too high, or the humidity level is too low, an alert signal is sent to the Mobile Phone and to the Control Center, so the necessary measures can be taken.
The Mobile Phone receives data over a Bluetooth connection and displays it in an application. This allows the user to know the current status of the network, the current and historic sensor readings, and control the power each Sensor Unit. Finally, the Modem receives data from the Communication Devices over Wi-fi or Ethernet and transmits packets to the Control Center over the Internet.
The Sensor Network as a whole has two ports to connect to the outside world: one represents a wired connection attached to the Modem, while another shows the cellular GPRS signal sent and received directly by the Communication Device.
\subsection{The Communication Network and the Control Center}
During the development of our model, emphasis was put on the Sensor Network, which we considered the most interesting and complex section of the system. Therefore, the Communication Network and Control Center are rather simple in comparison.
The Communication Network is formed by a Cell Tower, which receives a GPRS signal from the Communication Device and forwards it to the Internet Backbone. The Backbone itself receives wired signal from the Modem or the Cell Tower and abstracts the general Internet infrastructure. \autoref{fig:communicationnetwork} shows the structure of the Communication Network.
The Control Center is the final destination of the sensor readings. It contains a Database, which stores the sensor history for analysis, and an Alarm, which receives an alert signal sent from the Communication Device. With this signal, the designated authorities can dispatch emergency services to the precise location of the fire within a matter of seconds. The Alarm's possible states are modeled as modes: it can either be ringing or not, and switching between modes depends on a signal sent from the Communication Device. \autoref{fig:communicationnetwork} shows the structure of the control center.
\subsection{Auxiliary items}
To help with the structure of the model, a property set and two data types were defined.
The property set allows us to define custom attributes attached to components in the system.
In it, we defined custom units for data captured by the sensors: temperature is measured in degrees celsius (\degree C),
humidity is measured in grams per cubic meter (gpm$^3$),
CO and CO$_2$ concentration is measured in parts per million (ppm) and
GPS coordinates are tuples of latitude and longitude.
Each sensor has one or more properties representing the data captured by it.
The data types allow us to analyze the system considering the byte size of data sent through its connections. In our system, each sensor reading can have up to 16 bytes; thus, once the data from the five types of sensors is aggregated and sent through the network, it can be up to 80 bytes of data.
Our analysis is concerned with the connection latency between pairs of components and, therefore, does not suffer influence from the information in the property set and data types.
%\limit{6}
| {
"alphanum_fraction": 0.8096998695,
"avg_line_length": 82.8468468468,
"ext": "tex",
"hexsha": "4f47c67091de8fd45f454d423750a752372aeb69",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-08-20T02:00:10.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-08-20T02:00:10.000Z",
"max_forks_repo_head_hexsha": "6085b3998d106cd030974f887ae1f428bdd5d3cd",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "RenanGreca/SmartFireAlarm",
"max_forks_repo_path": "report/part1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6085b3998d106cd030974f887ae1f428bdd5d3cd",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "RenanGreca/SmartFireAlarm",
"max_issues_repo_path": "report/part1.tex",
"max_line_length": 643,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6085b3998d106cd030974f887ae1f428bdd5d3cd",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "RenanGreca/SmartFireAlarm",
"max_stars_repo_path": "report/part1.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1870,
"size": 9196
} |
\documentclass[10pt]{art}
\usepackage{multirow, minted, tabularx, environ, booktabs, setspace, makecell, caption, joinargs, colortbl, pdflscape, ct, sset}
\usepackage[prefix=bonak]{xkeymask}
\usepackage[a4paper, inner=25mm, outer=25mm, top=25mm, bottom=25mm]{geometry}
% Simplicial and Cubical sets
\newcommand{\DeltaHat}{\ensuremath{\hat{\boldsymbol{\Delta}}}}
\newcommand{\Cube}[1]{\ensuremath{\boldsymbol{\square^{#1}}}}
\renewcommand{\I}[1]{\ensuremath{\mathsf{I}^{#1}}}
\newcommand{\CSet}{\ensuremath{\mathsf{Set}_{\boldsymbol{\square}}}}
% Magic with xkeyval to go over the 9-argument limit of LaTeX
\makeatletter
\define@cmdkey[bonak]{X}{D}{(#1)}
\define@cmdkey[bonak]{frame}{D}{(#1)}
\define@cmdkey[bonak]{layer}{D}{(#1)}
\define@cmdkey[bonak]{layer}{d}{(#1)}
\define@cmdkey[bonak]{filler}{D}{(#1)}
\define@cmdkey[bonak]{filler}{E}{(#1)}
\define@cmdkey[bonak]{filler}{d}{(#1)}
\define@cmdkey[bonak]{fullframe}{D}{(#1)}
\define@cmdkey[bonak]{restrframe}{D}{(#1)}
\define@cmdkey[bonak]{restrframe}{d}{(#1)}
\define@cmdkey[bonak]{restrlayer}{D}{(#1)}
\define@cmdkey[bonak]{restrlayer}{d}{(#1)}
\define@cmdkey[bonak]{restrlayer}{l}{(#1)}
\define@cmdkey[bonak]{restrfiller}{D}{(#1)}
\define@cmdkey[bonak]{restrfiller}{E}{(#1)}
\define@cmdkey[bonak]{restrfiller}{d}{(#1)}
\define@cmdkey[bonak]{restrfiller}{c}{(#1)}
\define@cmdkey[bonak]{cohframe}{D}{(#1)}
\define@cmdkey[bonak]{cohframe}{d}{(#1)}
\define@cmdkey[bonak]{cohlayer}{D}{(#1)}
\define@cmdkey[bonak]{cohlayer}{d}{(#1)}
\define@cmdkey[bonak]{cohlayer}{l}{(#1)}
\define@cmdkey[bonak]{cohfiller}{D}{(#1)}
\define@cmdkey[bonak]{cohfiller}{E}{(#1)}
\define@cmdkey[bonak]{cohfiller}{d}{(#1)}
\define@cmdkey[bonak]{cohfiller}{c}{(#1)}
\makeatother
% Truncated sets, otherwise referred to as X: the general macro
\newcommandx{\X}[3][1,2,3]{
\mathsf{\color{carolina}{X}}_{#1}^{#2}
\setkeys*[bonak]{X}{#3}
}
% Frame, layer, and filler: the general macro
\newcommandx{\prim}[6][2,3,4,5,6]{
\ensuremath{\mathsf{\color{indian-yellow}{#1}}_{#2}^{
\joinargs{}[#3][#4][#5]}}
\setkeys*[bonak]{#1}{#6}
}
% Restriction for frame, layer, and filler: the general macro
\newcommandx{\restr}[8][2,3,4,5,6,7,8]{
\ensuremath{\mathsf{\color{russian-green}{restr}}_{
\mathsf{\color{indian-yellow}{#1}}
\joinargs{,}[#2][#3][#4]}^{\joinargs{}[#5][#6][#7]}}
\setkeys*[bonak]{restr#1}{#8}
}
% Coherence conditions for frame, layer, and filler: the general macro
% Hack: we'd like \coh to take 10 arguments, with the 10th being passed to
% xkeyval, but this is not possible. The only possible compromise is to join
% #7 and #8 by hand in usage of \coh: it's now #7 or '#6,#7' in \cohfoo commands
\newcommandx{\coh}[9][2,3,4,5,6,7,8,9]{
\ensuremath{\mathsf{\color{chestnut}{coh}}_{
\mathsf{\color{indian-yellow}{#1}}
\joinargs{,}[#2][#3][#4][#5][#6]}^{\joinargs{}[#7][#8]}}
\setkeys*[bonak]{coh#1}{#9}
}
% Universe, referred to as U
\newcommand{\U}[1]{\mathsf{\color{spanish-blue}{U}}_{#1}}
% The unit type
\newcommand{\unittype}{\mathsf{unit}}
\newcommand{\unitpoint}{\star}
% Definitional and extensional equality
\newcommand{\defeq}{\triangleq}
\newcommand{\eqHSet}{\ensuremath{\equiv_{\mathit{HSet}}}}
\newcommand{\reflHSet}{\ensuremath{\mathsf{refl}_{\mathit{HSet}}}}
% Some abbreviations
\newcommand{\imp}{\rightarrow}
\newcommand{\overright}[1]{\overrightarrow{#1}}
\renewcommand{\D}{D}
\newcommand{\hdD}{D.1}
\newcommand{\tlD}{D.2}
\renewcommand{\d}{d}
\newcommand{\E}{E}
\renewcommand{\l}{l}
\renewcommand{\c}{c}
\newcommand{\pair}[2]{#1, #2}
% Fancy headers
\pagestyle{fancy}
\fancyhf{}
\fancyhead[R]{\footnotesize\textcolor{gray80}{{\thepage / \pageref{LastPage}}}}
\renewcommand{\sectionmark}[1]{\markboth{}{\thesection.\ #1}}
\fancyhead[L]{\footnotesize\textcolor{gray80}{{\MakeUppercase{\rightmark}}}}
% Section and subsection styles
\renewcommand{\thesection}{\Roman{section}}
\titleformat{\section}{\centering\scshape\Large\color{raspeberry}}{\thesection}{0.7em}{}
\titleformat{\subsection}{\strongenv\large\color{gray80}}{\thesubsection}{0.7em}{}
% Pack enumerate items and bibitems
\setlist{nolistsep}
\setlength{\bibitemsep}{.2\baselineskip plus .05\baselineskip minus .05\baselineskip}
% The eqntable environment, displaying the various
\newcolumntype{Y}{>{\centering\arraybackslash}X}
\NewEnviron{eqntable}[1]{
\begin{table}[H]
\small
\begin{tabularx}{\linewidth}{@{}>{$}l<{$} >{$}c<{$} >{$}c<{$} >{$}Y<{$} @{}}
\toprule
\BODY
\bottomrule
\end{tabularx}
\caption{#1}
\end{table}
}
% The label, since we're including twice
\newcommand*{\lab}{}
% \midrule color
\def\graymidrule{\arrayrulecolor{gray30}\midrule\arrayrulecolor{gray65}}
% A block in the eqntable environment
\NewDocumentCommand{\eqnline}{m m m m}{#1 & #2 & #3 & #4 \\}
\newcommandx*{\mc}[1]{\multicolumn{4}{c}{\emph{#1}} \\\\}
% \eqnarg is slightly more involved: output ( ... ) if not in mask,
% and { ... } if in mask
\newcommandx*{\eqnarg}[3]{\ifinmask[bonak]{#1}[#2]{\{#2:#3\}}{(#2:#3)}}
% Table caption set up
\DeclareCaptionFormat{hfillstart}{\hfill#1#2#3\par}
\DeclareCaptionFont{mdit}{\mdseries\itshape}
\captionsetup[table]{
justification=centering,
font=bf,
labelfont=mdit,
}
% Code listings
\usemintedstyle{solarized-dark}
\setminted{escapeinside=~~}
\setmintedinline{escapeinside=~~}
% Fonts
\setmainfont{Avenir Next}
\setmonofont{Source Code Pro}
\defaultfontfeatures{Ligatures=TeX, Scale=MatchUppercase}
\title{An indexed construction of semi-simplicial and semi-cubical types}
\author{
\textcolor{gray80}{Ramkumar Ramachandra} \\
\itshape \textcolor{gray80}{Université Paris-Cité}
\and
\textcolor{gray80}{Hugo Herbelin} \\
\itshape \textcolor{gray80}{Institut de Recherche en Informatique Fondamentale}
}
\date{}
\color{gray65}
\begin{document}
\thispagestyle{empty}
\maketitle
\begin{abstract}
A model of homotopy type theory, formalizing augmented semi-simplicial sets, that naturally generalizes to semi-cubical sets as well, in dependent type theory. The accompanying artifact is an axiom-less~\footnote{Technically speaking, we use some axioms related to functional extensionality, but the requirement arises only from our generalization to $\nu$-types; if $\nu$ is finite (say, unit type corresponding to augmented semi-simplicial types), this requirement vanishes.} Coq formalization that uses sophisticated rewriting techniques and pushes the boundaries of proof assistant technology.
\end{abstract}
\tableofcontents
\newpage
\section{Overture}
Cubical type theory~\cite{Bezem14}~\cite{Cohen16}~\cite{Angiuli17} is an extension of dependent type theory that provides a computational interpretation of Voevodsky's \emph{univalence axiom}, in a field known as \emph{homotopy type theory}, or HoTT. The most significant implementation of cubical type theory in the real world is an experimental branch of Agda, known as \emph{Cubical Agda}~\cite{Vezzosi21}. Cubical type theory is usually modeled as cubical sets, in the set-theoretic setting.
Our contribution is the following. We have modelled the core of semi-cubical sets in a way that naturally generalizes to augmented semi-simplicial sets in type theory, and we call our generalization \emph{$\nu$-sets}. In contrast to previous efforts that use a fibered representation, we use an indexed representation. Our work is, in principle axiom-free, although the accompanying artifact uses functional extensionality, and this requires disappears if $\nu$ is finite.
\begin{table}[H]
\begin{tabularx}{\linewidth}{p{.3\linewidth}|p{.3\linewidth}|p{.3\linewidth}}
\toprule
& Set theory & Type theory \\
\graymidrule
Fibered & CCHM & \\
\graymidrule
Indexed & & Our work \\
\bottomrule
\end{tabularx}
\end{table}
The outline of the paper is as follows. First, we briefly describe augmented semi-simplicial and semi-cubical sets, as it appears in mathematical literature, and our generalization to $\nu$-sets. We then proceed to explain our construction, with the help of diagrams. Before spelling out the details of our formalization in type-theoretic language, we have a section on reasoning in sets.
\section{Mathematical treatment of \texorpdfstring{$\nu$}{nu}-sets}
It is well-known that simplicial sets exist purely in the imagination of the mathematician, as every simplicial set has an infinite number of degenerate simplices. What we have formalized is \emph{augmented} semi-simplicial sets, and semi-cubical sets, which is to say that they are the usual simplicial sets and cubical sets, minus the degeneracies. Since they're not of particular interest, and indeed, we plan to extend our work to do degeneracies in the future, we briefly explain the mathematical objects we have formalized.
\subsection{Augmented semi-simplicial sets}
Semi-simplicial sets are otherwise known as \emph{delta-sets} in mathematical literature. Since they are of little interest to the mathematician, we briefly describe \DeltaHat, which is identical to \Simplex{}, but for the fact that the maps $\sq{m} \rightarrow \sq{n}$ are \emph{strictly} order-preserving. The degeneracies then vanish, and we're left with the task of defining gluing conditions solely on the basis of face maps. There is little interest in studying \DeltaHat, since maps between delta sets are not well-defined, and it is therefore not possible to construct a category of delta sets.
\subsection{Semi-cubical sets}
We might naively attempt to define $\Cube{}$ identically to \DeltaHat, but let us briefly explain why this wouldn't work, motivating the definition of semi-cubical sets as in \cite{Antolini00}. If there were morphisms from every $\sq{m}$ to \sq{n}, we would end up with:
$$
\begin{tikzcd}
\bullet \arrow[r] \arrow[dr] \arrow[d] & \bullet \arrow[d] \\
\bullet \arrow[r] \arrow[ur] & \bullet
\end{tikzcd}
$$
where the filling conditions are conflated with the cube itself. $\Simplex{n}$ can be defined quite simply as the convex hull of $n$ points, but even defining the standard $n$-cube becomes a problem if we start with $\sq{n}$, but the situation becomes much more amenable if we define:
\begin{definition}[\Cube{n}]
\begin{equation*}
\Cube{n} := \I{n} = \sq{0, 1}^n
\end{equation*}
\end{definition}
\begin{example}[$\Cube{0}$, $\Cube{1}$ and $\Cube{2}$]
$\Cube{0}$ can be drawn as:
$$
\begin{tikzcd}
0 \equiv 1
\end{tikzcd}
$$
$\Cube{1}$ can be drawn as:
$$
\begin{tikzcd}
0 \arrow[r, dash] & 1
\end{tikzcd}
$$
and $\Cube{2}$ can be drawn as:
$$
\begin{tikzcd}
(0, 1) \arrow[r, dash] & (1, 1) \arrow[d, dash] \\
(0, 0) \arrow[u, dash] & (1, 0) \arrow[l, dash]
\end{tikzcd}
$$
\end{example}
Here, $\I{n}$ serves the purpose of \sq{n}, but this change will cascade into other definitions. In view of defining a category \CSet, we will restrict the morphisms in \Cube{}.
\begin{definition}[\Cube{}]
\begin{align*}
\obj(\Cube{}) & := \I{n} \\
\mor(\Cube{}) & := \delta^\epsilon_i : \I{n + 1} \rightarrow \I{n}
\end{align*}
where $\delta^\epsilon_i$ must satisfy the corresponding face map condition:
\begin{equation*}
\delta^\epsilon_i \delta^\omega_j = \delta^\omega_{j - 1} \delta^\epsilon_i
\end{equation*}
where $\epsilon$ and $\omega$ correspond to opposite faces.
\end{definition}
\begin{definition}[\CSet]
Just as in \SSet, we define semi-cubical sets as the functor category:
\begin{equation*}
\CSet := \Set^{\Cube{}^{op}}
\end{equation*}
\end{definition}
Or, in terms of objects and morphisms:
\begin{definition}[$\CSet$ in terms of objects and morphisms]
\begin{align*}
\obj(\CSet) & := X_n \\
\mor(\CSet) & := X_\lambda, \text{where $\lambda$ is \Cube{}-morphism}
\end{align*}
where we term $X_n$ as the $n$-cubex, and $X_\lambda$ as the ``face map'', defined similarly:
\begin{align*}
X_n & := X(\I{n}), \text{where X is the semi-cubical set} \\
X_\lambda & := X(\lambda)
\end{align*}
\end{definition}
\begin{theorem}
$\CSet$ does not admit degeneracies.
\begin{proof}
The reader is advised to refer to \cite{Antolini00} for the proof.
\end{proof}
\end{theorem}
\subsection{Generalization to \texorpdfstring{$\nu$}{nu}}
\begin{table}[H]
\begin{tabularx}{\linewidth}{p{.2\linewidth}|p{.4\linewidth}|p{.3\linewidth}}
\toprule
Value of $\nu$ & 1 & 2 \\
\graymidrule
Interpretation & Augmented semi-simplicial types & Semi-cubical types \\
\bottomrule
\end{tabularx}
\end{table}
\section{Our construction}
In this section, we briefly explain the difference between the fibered and indexed representations, and illustrate our approach to the construction, with the help of some figures.
\subsection{Fibered versus indexed representation}
The fibered representation can be illustrated as:
\begin{equation*}
\begin{tikzcd}
X_0 : \U{} & X_1 : \U{} \arrow[l, "\delta^\epsilon_0" description, shift left=2] \arrow[l, "\delta^\omega_0" description, shift right=2] & X_2 : \U{} \arrow[l, "\delta^\epsilon_1" description, shift left=6] \arrow[l, "\delta^\epsilon_0" description, shift left=2] \arrow[l, "\delta^\omega_0" description, shift right=2] \arrow[l, "\delta^\omega_1" description, shift right=6] & \ldots
\end{tikzcd}
\end{equation*}
The indexed representation can be illustrated as:
\begin{align*}
X_0 & : \U{} \\
X_1 & : X_0 \times X_0 \rightarrow \U{} \\
X_2 & : \forall a b c d, X_1 : ab \rightarrow X_1 : bc \rightarrow X_1 : cd \rightarrow X_1 : da \rightarrow \U{} \\
\ldots
\end{align*}
One way to obtain one construction from the other is via the \emph{Grothendieck construction}, although this is not relevant to our work.
\subsection{Iterated parametricity}
\section{Reasoning about sets in type theory}
Since proof irrelevance is an inherent part of set theory and first-order logic, and proof irrelevance is \emph{extensional} in type theory, we work in a universe known as \texttt{HSet}, which we describe shortly.
\subsection{Unicity of identity proofs}
UIP is a flavor of proof-irrelevance:
\begin{align*}
\forall x y, \forall p q : x = y, p = q
\end{align*}
which is to say that any two proofs of \emph{equality} of the same two types are \emph{equal}. In other words, the proofs cannot be distinguished from one another.
In type theory, UIP can be proved on the unit type:
\begin{align*}
\mathsf{eq\_refl} & : \forall x, x = x \\
\mathsf{UIP\_refl\_unit} & : \forall x : \top = \top, x = \mathsf{eq\_refl}
\end{align*}
\subsection{HSet}
\texttt{HSet} provides us a restricted setting of UIP, and our implementation of \texttt{HSet} is a straightforward packaging of two fields:
\begin{align*}
\mathsf{Dom} & : \mathsf{Type} \\
\mathsf{UIP} & : \forall x y : \mathsf{Dom}, \forall p q : x = y, p = q
\end{align*}
In the \texttt{HSet} universe, three properties hold:
\begin{enumerate}
\item[(i)] UIP holds on the unit type.
\item[(ii)] UIP propagates to $\Sigma$-types.
\item[(iii)] UIP propagates to $\Pi$-types, with some additional functional extensionality axioms.
\end{enumerate}
\section{Type-theoretic construction of \texorpdfstring{$\nu$}{nu}-sets}
% Abbreviated tables in this section
%
% The rule for implicit arguments for each table:
% show 1 explicit in first two parts
% show E + 1 explicit in the third part
\appendmask[bonak]{layer}[D]
\appendmask[bonak]{filler}[D]
\appendmask[bonak]{restrframe}[D]
\appendmask[bonak]{restrlayer}[D, d]
\appendmask[bonak]{restrfiller}[D, d]
\appendmask[bonak]{cohframe}[D]
\appendmask[bonak]{cohlayer}[D, d]
\appendmask[bonak]{cohfiller}[D, d]
% Now, we write the type theory bit, and input the abbreviated tables
% Truncated sets, otherwise referred to as X
% Keep all arguments
\newcommandx{\Xp}[1]{\X[#1][][]}
\newcommandx{\Xto}[3][3=]{\X[#1][<#2][#3]}
\newcommandx{\Xcomp}[3][3=]{\X[#1][=#2][#3]}
\newcommandx{\Xfrom}[3][3=]{\X[#1][\geq#2][#3]}
% Frame, layer, and filler
% Drop the universe letter
\newcommandx{\framep}[5][1,2,3,4,5]{\prim{frame}[][#2][#3][#4][#5]}
\newcommandx{\layer}[5][1,2,3,4,5]{\prim{layer}[][#2][#3][#4][#5]}
\newcommandx{\filler}[5][1,2,3,4,5]{\prim{filler}[][#2][#3][#4][#5]}
% Restriction for frame, layer, and filler
% Drop the unviverse letter and q
\newcommandx{\restrf}[7][1,2,3,4,5,6,7]{\restr{frame}[][#2][][#4][#5][#6][#7]}
\newcommandx{\restrl}[7][1,2,3,4,5,6,7]{\restr{layer}[][#2][][#4][#5][#6][#7]}
\newcommandx{\restrc}[7][1,2,3,4,5,6,7]{\restr{filler}[][#2][][#4][#5][#6][#7]}
% Coherence conditions for frame, layer, and filler
% Keep only ε, ω; n, p; proposition on n and p
\newcommandx{\cohf}[9][1,2,3,4,5,6,7,8,9]{\coh{frame}[][#2][#3][][][][][#9]}
\newcommandx{\cohl}[9][1,2,3,4,5,6,7,8,9]{\coh{layer}[][#2][#3][][][][][#9]}
\newcommandx{\cohc}[9][1,2,3,4,5,6,7,8,9]{\coh{filler}[][#2][#3][][][][][#9]}
The definition in \ref{tab:coind}, the coinductive limit, is dispatched over tables \ref{tab:core}, \ref{tab:frames}, \ref{tab:faces} and \ref{tab:coh}. They describe the structure of the underlying higher-dimensional relations on which fillers are built.
\renewcommand*{\lab}{tab:coind}
\input{tab-coind.tex}
% For just the second table, keep the universe letter in fullframe
\newcommandx{\fullframe}[3][1,2,3]{\prim{fullframe}[#1][#2][][][#3]}
We now describe \emph{bare truncated sets}: \emph{bare} because it can be seen as defining truncated $\nu$-sets without face maps, and \emph{truncated} because it is $n$-truncated.
\renewcommand*{\lab}{tab:core}
\input{tab-core.tex}
% Drop the universe letter in fullframe for all remaining tables
\renewcommandx{\fullframe}[3][1,2,3]{\prim{fullframe}[][#2][][][#3]}
\renewcommand*{\lab}{tab:frames}
\input{tab-frames.tex}
% Truncated sets, otherwise referred to as X
% Drop the universe letter for the third and fourth tables
\renewcommandx{\Xto}[3][3]{\X[][<#2][#3]}
\renewcommandx{\Xcomp}[3][3]{\X[][=#2][#3]}
\renewcommandx{\Xfrom}[3][3]{\X[][\geq#2][#3]}
% Hack to remove , in \cohf
\renewcommandx{\cohf}[9][1,2,3,4,5,6,7,8,9]{\coh{frame}[][#2][#3][][][][][#9]}
In the way we have chosen to formlate these tables, there is, for instance, coherence condition $\cohf$ both sides of the equality in $\restrl$ and $\restrc$, to ensure that they are the same type.
For a given $n$, the coherence conditions evaluate to a reflexivity proof, so that the construction evaluates to an effective sequence of types of iterated relations not mentioning $\restrf$ nor $\cohf$ anymore.
% The third table mentions cohframe; keep ε, ω in this instance
\renewcommandx{\cohf}[9][1,2,3,4,5,6,7,8,9]{\coh{frame}[][#2][#3][][][#6,#7][#8][#9]}
\renewcommand*{\lab}{tab:faces}
\input{tab-faces.tex}
% Restriction for frame, layer, and filler
% Drop just the universe letter for the coh table
\renewcommandx{\restrf}[7][1,2,3,4,5,6,7]{\restr{frame}[][#2][#3][#4][#5][#6][#7]}
\renewcommandx{\restrl}[7][1,2,3,4,5,6,7]{\restr{layer}[][#2][#3][#4][#5][#6][#7]}
\renewcommandx{\restrc}[7][1,2,3,4,5,6,7]{\restr{filler}[][#2][#3][#4][#5][#6][#7]}
% Frame, layer, and filler
% Drop all arguments for coh table; they're not present in coind table.
\renewcommandx{\framep}[5][1,2,3,4,5]{\prim{frame}[][][][][#5]}
\renewcommandx{\layer}[5][1,2,3,4,5]{\prim{layer}[][][][][#5]}
\renewcommandx{\filler}[5][1,2,3,4,5]{\prim{filler}[][][][][#5]}
% Hack to remove , in \cohf
\renewcommandx{\cohf}[9][1,2,3,4,5,6,7,8,9]{\coh{frame}[][#2][#3][][][][][#9]}
The proof of $\cohf$ require an higher-dimensional coherence condition which we obtain by working in \texttt{HSet}. If the proofs of the same equality were not equated, there would be a need for arbitrary many higher-dimensional coherences (see e.g.~\cite{Herbelin15} for a discussion on the de facto need for recursive higher-dimensional coherence conditions in formulating higher-dimensional structures in type theory).
% Coherence conditions for frame, layer, and filler
% Drop universe letter, ε, and ω
\renewcommandx{\cohf}[9][1,2,3,4,5,6,7,8,9]{\coh{frame}[][][][#4][#5][#6,#7][#8][#9]}
\renewcommandx{\cohl}[9][1,2,3,4,5,6,7,8,9]{\coh{layer}[][][][#4][#5][#6,#7][#8][#9]}
\renewcommandx{\cohc}[9][1,2,3,4,5,6,7,8,9]{\coh{filler}[][][][#4][#5][#6,#7][#8][#9]}
\renewcommand*{\lab}{tab:coh}
\input{tab-coh.tex}
\newpage
\section{Future work}
\newpage
\appendix
\section{Tables in full detail}
% Full tables in this section
%
% Clear the mask and label
\clearmask
% Keep all arguments for all macros
% Frame, layer, and filler
\renewcommandx{\framep}[5][1,2,3,4,5]{\prim{frame}[#1][#2][#3][#4][#5]}
\renewcommandx{\layer}[5][1,2,3,4,5]{\prim{layer}[#1][#2][#3][#4][#5]}
\renewcommandx{\filler}[5][1,2,3,4,5]{\prim{filler}[#1][#2][#3][#4][#5]}
\renewcommandx{\fullframe}[3][1,2,3]{\prim{fullframe}[#1][#2][][][#3]}
% Restriction for frame, layer, and filler
\renewcommandx{\restrf}[7][1,2,3,4,5,6,7]{\restr{frame}[#1][#2][#3][#4][#5][#6][#7]}
\renewcommandx{\restrl}[7][1,2,3,4,5,6,7]{\restr{layer}[#1][#2][#3][#4][#5][#6][#7]}
\renewcommandx{\restrc}[7][1,2,3,4,5,6,7]{\restr{filler}[#1][#2][#3][#4][#5][#6][#7]}
% Coherence conditions for frame, layer, and filler
\renewcommandx{\cohf}[9][1,2,3,4,5,6,7,8,9]{\coh{frame}[#1][#2][#3][#4][#5][#6,#7][#8][#9]}
\renewcommandx{\cohl}[9][1,2,3,4,5,6,7,8,9]{\coh{layer}[#1][#2][#3][#4][#5][#6,#7][#8][#9]}
\renewcommandx{\cohc}[9][1,2,3,4,5,6,7,8,9]{\coh{filler}[#1][#2][#3][#4][#5][#6,#7][#8][#9]}
\renewcommand*{\lab}{fulltab:coind}
\input{tab-coind.tex}
\renewcommand*{\lab}{fulltab:core}
\input{tab-core.tex}
\renewcommand*{\lab}{fulltab:frames}
\input{tab-frames.tex}
\begin{landscape}
\renewcommand*{\lab}{fulltab:faces}
\input{tab-faces.tex}
\renewcommand*{\lab}{fulltab:coh}
\input{tab-coh.tex}
\end{landscape}
\newpage
\begin{thebibliography}{10}
\bibitem[Bez]{Bezem14}
Bezem, M., Coquand, T., \& Huber, S. (2014, July). A model of type theory in cubical sets. \emph{In 19th International Conference on Types for Proofs and Programs (TYPES 2013)} (Vol. 26, pp. 107-128). Wadern, Germany: Schloss Dagstuhl–Leibniz Zentrum fuer Informatik.
\bibitem[CubAgda]{Vezzosi21}
Vezzosi, A., Mörtberg, A., \& Abel, A. (2021). Cubical Agda: a dependently typed programming language with univalence and higher inductive types. \emph{Journal of Functional Programming, 31}.
\bibitem[CohCoq]{Cohen16}
Cohen, C., Coquand, T., Huber, S., \& Mörtberg, A. (2016). Cubical type theory: a constructive interpretation of the univalence axiom. \emph{arXiv preprint arXiv:1611.02108}.
\bibitem[Angiuli]{Angiuli17}
Carlo Angiuli, Guillaume Brunerie, Thierry Coquand, Kuen-Bang Hou (Favonia), Robert Harper, \& Daniel R. Licata. \emph{arXiv preprint}.
\bibitem[Fri]{Friedman08}
Friedman, G. (2008). An elementary illustrated introduction to simplicial sets. \emph{arXiv preprint arXiv:0809.4221}.
\bibitem[Rie]{Riehl11}
Riehl, E. (2011). A leisurely introduction to simplicial sets. \emph{Unpublished expository article available online from the author's web page}.
\bibitem[CubSet]{Antolini00}
Antolini, R. (2000). Cubical structures, homotopy theory. \emph{Annali di Matematica pura ed applicata, 178}(1), 317-324.
\bibitem[Her]{Herbelin15}
Herbelin, H. (2015). A dependently-typed construction of semi-simplicial types. \emph{Mathematical Structures in Computer Science, 25}(5), 1116-1131.
\bibitem[CoqInCoq]{Barras97}
Barras, B., \& Werner, B. (1997). Coq in coq. \emph{Available on the WWW.}
\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.6794248344,
"avg_line_length": 42.9801801802,
"ext": "tex",
"hexsha": "933c393f4ab81e36204259fda1f9d480725c2b70",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-03-14T07:01:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-03-14T07:01:01.000Z",
"max_forks_repo_head_hexsha": "c76f547d1a0c47f9bb1f199f83d32c494b4fc3ff",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "artagnon/bonak",
"max_forks_repo_path": "paper/paper.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c76f547d1a0c47f9bb1f199f83d32c494b4fc3ff",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "artagnon/bonak",
"max_issues_repo_path": "paper/paper.tex",
"max_line_length": 602,
"max_stars_count": 16,
"max_stars_repo_head_hexsha": "c76f547d1a0c47f9bb1f199f83d32c494b4fc3ff",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "artagnon/bonak",
"max_stars_repo_path": "paper/paper.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-23T01:01:51.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-13T10:47:50.000Z",
"num_tokens": 8234,
"size": 23854
} |
\chapter{Large-scale datasets for Switzerland}
\label{data}
\vspace{-15pt} % one line spacing corresponds approx to 15 pts
\begin{tcolorbox}[enhanced,width=\textwidth,size=fbox,
sharp corners,colframe=black!5!white,drop fuzzy shadow southeast,
boxrule=3mm, parbox=false]
This chapter borrows partly from the articles \citep{walch_big_2020, walch_quantifying_2021}:
\qquad %\bibentry{walch_big_2020}
A. Walch, R. Castello, N. Mohajeri, and J.-L. Scartezzini (2020).
Big data mining for the estimation of hourly rooftop photovoltaic potential and its uncertainty.
\textit{Applied Energy}, 262:114404.
\href{https://doi.org/10.1016/j.apenergy.2019.114404}{doi:10.1016/j.apenergy.2019.114404}
\qquad %\bibentry{walch_quantifying_2021}
A. Walch, N. Mohajeri, A. Gudmundsson, and J.-L. Scartezzini (2021). Quantifying the technical geothermal potential from shallow borehole heat exchangers at regional scale.
\textit{Renewable Energy}, 165:369–380.
\href{https://doi.org/10.1016/j.renene.2020.11.019}{doi:10.1016/j.renene.2020.11.019}
and the conference proceedings \cite{walch_spatio-temporal_2019}:
\quad A. Walch, R. Castello, N. Mohajeri, F. Guignard, M. Kanevski, and J.-L. Scartezzini (2019).
Spatio-temporal modelling and uncertainty estimation of hourly global solar irradianceusing Extreme Learning Machines.
\textit{Energy Procedia}, 158:6378–6383.\\
\href{https://doi.org/10.1016/j.egypro.2019.01.219}{doi:10.1016/j.egypro.2019.01.219}
% \quad \bibentry{walch_spatio-temporal_2019}
\end{tcolorbox}
The spatio-temporal modelling of solar photovoltaic and shallow geothermal energy potentials at national scale for Switzerland requires the availability of high-resolution data on the existing building stock, meteorological and ground data.
This chapter provides an overview of the large-scale datasets used throughout this thesis and relevant pre-processing steps.
\section{Building and landscape data}
\label{data_bld_landscape}
The building and landscape data available for Switzerland include statistics and geometries related to the existing building stock, landscape geometries at different levels of detail, as well as high-resolution digital elevation models. Most of this data can be accessed freely on the geoportal of the Swiss Confederation\footnote{\url{https://map.geo.admin.ch/}}. Table~\ref{tab:bld_landscape} shows an overview of all building and landscape data used in this thesis, with a short explanation of each dataset provided below.
\begin{table}[tb]
\centering
\footnotesize
\caption[Overview of building and landscape datasets and the relevant chapters]{Overview of building and landscape datasets and the relevant chapters where the data is used. \textit{CH}: Entire Switzerland, \textit{GE, LU}: Cantons of Geneva and Lucerne.}
\label{tab:bld_landscape}
% \resizebox{\textwidth}{!}{%
\begin{tabular}{lllllll}
\hline
\textbf{Type} & \textbf{Dataset} & \textbf{Coverage} & \textbf{Spatial res.} & \textbf{Creation} & \textbf{Source} & \textbf{Chapter} \\ \hline
\multirow{5}{*}{\begin{tabular}[c]{@{}l@{}}\textit{Building} \\ \textit{data}\end{tabular}} & RBD & CH & Building & 2015/2019 & FSO \cite{bundesamt_fur_statistik_bfs_eidgenossisches_2015} & \ref{solar} %,\ref{hybrid_chapter}
\\
% & STATENT & CH & $100 \times 100$ m$^2$ & 2018 & FSO \cite{bfs_statistik_2018} & \ref{hybrid_chapter} \\ \hline
& Building footprints & CH & Building & 2017 & swisstopo \cite{swisstopo_swisstlm3d_2018} & \ref{solar},\ref{geothermal} %, \ref{hybrid_chapter}
\\
& Roof surfaces & CH & Rooftop & 2010-2016 & Sonnendach.ch \cite{klauser_solarpotentialanalyse_2016} & \ref{solar} % ,\ref{hybrid_chapter}
\\
& Roof superstructures & GE & Rooftop & 2005-2011 & SITG \cite{sitg_superstructures_2019} & \ref{solar} \\
& Building parcels & CH (ex. LU) & Parcel & various & Cantons* & \ref{geothermal} %, \ref{hybrid_chapter}
\\ \hline
\multirow{4}{*}{\begin{tabular}[c]{@{}l@{}}\textit{Landscape} \\ \textit{geometries}\end{tabular}} & TLM & CH & various & 2017 & swisstopo \cite{swisstopo_swisstlm3d_2018} & \ref{solar}, \ref{geothermal} \\
& OSM & LU & various & various & OpenStreetMap \cite{openstreetmap_wiki_elements_2021} & \ref{geothermal} \\
& CORINE & CH & 1:250 000 & 2018 & Copernicus \cite{copernicus_land_monitoring_service_corine_2018} & \ref{geothermal} \\
& Landscape typologies & CH & 1:100 000 & 2011 & ARE, FOEN, FSO \cite{camenzind_landschaftstypologie_2011} & \ref{geothermal} \\ \hline
\multirow{3}{*}{\begin{tabular}[c]{@{}l@{}}\textit{Digital} \\ \textit{elevation} \\ \textit{models}\end{tabular}} & DTM & CH & $2 \times 2$ m$^2$ & 2010-2016 & swisstopo \cite{swisstopo_swissalti3d_2017} &\ref{solar} \\
& DSM$_{2\text{m}}$ & CH & $2 \times 2$ m$^2$ & 2000-2008 & swisstopo \cite{swisstopo_dsm_2005} & \ref{solar} \\
& DSM$_{50\text{cm}}$ & GE & $0.5 \times 0.5$ m$^2$ & 2013 & SITG \cite{sitg_mns_2018} & \ref{solar} \\ \hline
\multicolumn{7}{l}{* Available at: \url{https://www.geodienste.ch/services/av} (not obtained for LU)} \\
\end{tabular}%
% }
\end{table}
\subsection{Building data}
\label{data_rbd_statent}
\label{data_buildings}
\textbf{Registry of Buildings and Dwellings (RBD).} The RBD, published and continuously updated by the Swiss Federal Statistical Office (FSO) \cite{bundesamt_fur_statistik_bfs_eidgenossisches_2015}, contains 2.2 million registered buildings in Switzerland.
It lists a large amount of building-related attributes, including building coordinates, floor area, construction period, number of floors, number of dwellings, and information on the building use (e.g. residential/service sector/industrial).
The RBD further contains a registry of 4.6 million dwellings, amongst other listing the dwelling's area, number of rooms and dwelling use.
As the attributes in the RBD are missing for some buildings and dwellings, the missing characteristics are filled in order to estimate RE potentials for the entire building stock. Details for the pre-processing steps to fill missing data are described in Appendix \ref{app:rbd}.
\textbf{Building footprints.}
Geometries of the building footprints for the entire Swiss building stock are available as 3.7 million vector polygons.
The polygons are part of the Topographic Landscape Model (TLM) of Switzerland published by the Swiss Federal Office of Topography (swisstopo) \cite{swisstopo_swisstlm3d_2018}.
The footprints are based on a 3D model of the Swiss building stock, which is created using a photogrammetric capturing method and has a high precision of $\pm 0.3 $ m.
\textbf{Roof surfaces.}
This dataset contains 9.6 million vector polygons which are derived from a national 3D building model (LOD~2) by swisstopo \cite{klauser_solarpotentialanalyse_2016}.
The roof polygons represent the roofs of the 3.7 million building footprints in Switzerland and contain information on the tilt, aspect and area of each roof.
This data has been published as part of a previous study of RPV potential in the Switzerland, carried out by the Swiss Federal Office of Energy (SFOE), titled and referred to throughout this thesis as "Sonnendach.ch" \cite{klauser_solarpotentialanalyse_2016}.
\textbf{Roof superstructures.}
In the Canton of Geneva, where detailed CityGML data (LOD~4) exists, an additional dataset of roof superstructures is available through the cantonal geoinformation service (SITG)~\cite{sitg_superstructures_2019}.
These superstructures are vector polygons which represent objects on rooftops such as dormers and chimneys.
Furthermore, small shapes like dormers, which are generally unsuitable for installing PV, are partly represented as separate roof polygons in the LOD~4 dataset.
For this reason, all surfaces smaller than 8 m$^2$ are converted to superstructures.
As the superstructure dataset has been derived from LiDAR data, building-integrated objects such as windows or already existing solar panels are not considered.
Furthermore, roofs without any superstructure information are excluded from the analysis, as a visual inspection showed that this data is missing on a large part of the rooftops. Using only rooftops where superstructure information is available yields a dataset of 37.7K roofs in the Canton of Geneva.
\textbf{Parcel geometries.}
Information on the boundaries of property units (parcels), which are obtained as vector polygons of official mensuration data collected at cantonal level, provide valuable insights in the attribution of surrounding land to building footprints.
At national level, the data is available via the interface \textit{geodienste.ch}\footnote{\url{https://www.geodienste.ch/services/av}} for most cantons.
While it is freely accessible for some cantons, registration or approval from the local geoinformation services is required for others.
I have collected parcel geometry data for 25 of the 26 Swiss cantons (all except Lucerne), whereby data from four cantons Appenzell Ausserrhoden, Neuchâtel, Vaud and Ticino have been directly obtained from the cantonal geoinformation services.
Of these, Ticino has a small number of missing parcels in alpine terrain with low building densities.
\subsection{Landscape geometries}
\label{data_landscape}
\textbf{Topographic Landscape Model (TLM).}
In addition to the building footprints, the TLM \cite{swisstopo_swisstlm3d_2018} contains a detailed 3D representation of various landscape objects in vector form.
These include built-up areas (e.g. roads, railways, parking lots, sports facilities etc.), natural (e.g. lakes, rivers) and artificial (e.g. pools) water bodies and land cover data (e.g. forests, wetlands, agricultural use, glaciers), protected areas, and more\footnote{See \url{https://www.swisstopo.admin.ch/en/geodata/landscape/tlm3d.html} for a full list of attributes}.
The TLM has a precision of $0.2-1.5$ m for well-defined objects such as roads or buildings, and of $1 - 3$ m for other landscape features such as forests \cite{swisstopo_swisstlm3d_2018}.
To create a homogeneous dataset of vector polygons, relevant line data, namely roads and railways, are buffered (expanded) by their width (see Appendix \ref{app:roads}).
Other TLM objects used in Chapter \ref{geothermal} are buffered by 1 m to account for the imprecision of the TLM.
\textbf{Open Street Map (OSM).}
OSM is another open source of building and landscape data. Its advantage is that it (i) covers a wide range of different landscape features, which would have to be combined from several different Swiss datasets, and (ii) that is available beyond the boundaries of Switzerland, such that the methods using OSM data can be directly transferred to other countries.
The OSM data structure is divided into nodes (points), ways (lines/polygons) and relations, describing the relationship between some elements, and labelled with tags \cite{openstreetmap_wiki_elements_2021}.
To extract OSM data, the overpass turbo API\footnote{\url{https://overpass-turbo.eu/}} is used here.
\begin{figure}[tb]
\centering
\begin{subfigure}{.65\textwidth}
\includegraphics[width=\linewidth]{images/Figs/CLC_CH_TYP.png}
\caption{}
\label{fig:CORINE}
\end{subfigure}
\begin{subfigure}{.65\textwidth}
\includegraphics[width=\linewidth]{images/Figs/landscape_typology.png}
\caption{}
\label{fig:landscape_typology}
\end{subfigure}
% include second image
\caption[Reclassification of CORINE land cover dataset and Swiss landscape typologies]{Reclassification of a) the CORINE land cover dataset for Switzerland into five classes and b) the Swiss landscape typologies into eight typologies (see legends).}
\end{figure}
\textbf{CORINE land cover (CLC) classification.}
Published by Copernicus, the European Union's Earth observation program, the CLC \cite{copernicus_land_monitoring_service_corine_2018} is a land cover dataset for the entire Europe.
It represents an inventory of the land use and land cover in Europe and is divided in 44 classes in three levels of detail.
The spatial resolution of the CLC is lower than that of the TLM (scale 1:250 000), with a precision of 10 m (new elements) to 50 m (old elements) \cite{copernicus_land_monitoring_service_corine_2018}.
The CLC is hence used for large-scale analyses, while the TLM/OSM data is used when a higher level of detail is needed.
For the work in this thesis, the CLC is aggregated into five categories (Urban fabric, Industrial/commercial areas, Agricultural areas, Natural areas, Water bodies (incl. glaciers)) as shown in Figure \ref{fig:CORINE}. Their definition is provided in Appendix \ref{app:corine}.
\textbf{Swiss landscape typologies.}
The dataset of Swiss landscape typologies, developed by the Federal Office of Spatial Development (ARE), the Federal Office for the Environment (FOEN) and the FSO, divides the Swiss surface into five regions and 38 landscape typologies \cite{camenzind_landschaftstypologie_2011}.
These typologies specify the topographic, regional and urban features of the landscape.
Similar to the CLC, the dataset is primarily targeted to large-scale analyses, with a scale of 1:100 000.
This dataset is aggregated into eight typologies (Urban, Suburban, Plateau, Jura, Alps (valley), Alps (lower), Alps (upper), Wetlands).
Figure \ref{fig:landscape_typology} shows the geographic distribution of these typologies, with their mapping from the original dataset provided in Appendix \ref{app:landscape_typologies}.
\subsection{Digital Elevation models}
\label{data_DEM}
\textbf{Digital Terrain Model.} Two types of surface datasets are used for the estimation of PV potential: A Digital Terrain Model (DTM) and a Digital Surface Model (DSM) of Switzerland. The DTM is a high-resolution surface model, without considering vegetation and constructions. It is available at national scale as pixels of $2\times2$ m$^2$ resolution. The DTM is derived from Light Detection and Ranging (LiDAR) data that was collected in the period from 2000-2008 and has been updated from 2010-2016.
%
It contains information on coordinates (x,y) and altitude (z). In addition, several spatial features derived from the DTM are considered at various spatial scales, including terrain slope and curvature (see Table \ref{tab:DTM_ftrs} for a complete list). These features have been computed by \citet{robert_spatial_2012} for an aggregation level of $250 \times 250$ m$^2$.
\textbf{Digital Surface Model.} The DSM is a complete model of the landscape, including all visible landscape elements. As the DTM, it is available in $2\times2$ m$^2$ resolution from LiDAR data that was collected in the period from 2000-2008, but contrary to the DSM is has not been updated. More recent DSMs have however been created for individual cantons in Switzerland at a resolution of $0.5\times0.5$ m$^2$.
%
I use the higher-resolution and more updated DSM for the canton of Geneva to improve the estimation of RPV potential.
This has two-fold reasons. Firstly, the spatial resolution of the updated DSM is 16 times higher, and secondly, the newer period of construction accounts for some buildings which have been built after the completion of the first DSM.
An analysis of the RBD shows that 9.32\% of buildings in Switzerland have been constructed in the period of 2006-2015, corresponding approximately to the time span between the two studies. In Geneva, this fraction is comparable (7.28\%).
\begin{table}[h]
\centering
\footnotesize
\caption[Spatial features of the DTM (x,y,z) and additional features]{Spatial features of the DTM (x,y,z) and additional features derived from the DTM. All derived features are available for each scale~\cite{robert_spatial_2012}. DoG denotes the Derivative of Gaussians.}
\label{tab:DTM_ftrs}
\begin{tabular}{cll}
\hline
\textbf{Coordinates} & \textbf{Derived features} & \textbf{Scales} \\ \hline
x & slope & 250m (small) \\
y & directional slope (NS, EW) & 1.75km (med) \\
z & curvature (DoG) & 3.75km (big) \\ \hline
\end{tabular}
\end{table}
\section{Meteorological data}
\label{data_meteo}
Meteorological data is available from the Swiss Federal Office of Meteorology and Climatology (MeteoSwiss) as gridded data and for a network of measurement stations across the country.
Gridded data is preferred over data from measurement stations as it provides a better spatial coverage with an increased spatial resolution and has a very low missing data ratio ($<1\%$).
For the potential studies in Part~\ref{potential}, solar radiation and temperature data for Switzerland is used. I further derived heating and cooling degree days (HDD/CDD), which are used to estimate monthly variations of heating and cooling demands \cite{stadler_contribution_2018}. The meteorological datasets are summarised in Table~\ref{tab:meteo}.
\begin{table}[t]
\centering
\footnotesize
\caption{Overview of meteorolgical datasets and the relevant chapters. }
\label{tab:meteo}
\begin{tabular}{lllllll}
\hline
\textbf{Type} & \textbf{Dataset} & \textbf{Spatial res.} & \textbf{Time} & \textbf{Range} & \textbf{Source} & \textbf{Chapter} \\ \hline
\multirow{3}{*}{\begin{tabular}[c]{@{}l@{}}\textit{Solar} \\ \textit{radiation}\end{tabular}} & GHI & $1.25$ deg. min.* & hourly & 2004-2015 & MeteoSwiss \cite{stockli_daily_2013} & \ref{solar} \\
& DIR & $1.25$ deg. min.* & hourly & 2004-2015 & MeteoSwiss \cite{stockli_daily_2013} & \ref{solar} \\
& Albedo & $1.25$ deg. min.* & hourly & 2004-2015 & MeteoSwiss \cite{stockli_daily_2013} & \ref{solar} \\ \hline
\multirow{3}{*}{\textit{Temperature}} & Max. temp. & $1 \times 1$ km$^2$ & daily & 2004-2015 & MeteoSwiss \cite{meteoswiss_daily_2017} & \ref{solar}, \ref{geothermal} \\
& HDD & $200 \times 200$ m$^2$ & daily & averaged & Eq.~\eqref{eq:hdd} & \ref{geothermal} % , \ref{hybrid_chapter}
\\
& CDD & $200 \times 200$ m$^2$ & daily & averaged & Eq.~\eqref{eq:cdd} & \ref{geothermal} \\ \hline
\multicolumn{7}{l}{* Deg. min.: degree minutes on a longitude-latitude grid (1.25 deg. min. $\approx 1.6 \times 2.3$ km$^2$)}
\end{tabular}
\end{table}
\subsection{Solar radiation}
\label{data_solarRad}
\textbf{Global and direct horizontal radiation (GHI/DIR).} To estimate RPV potential, I use satellite data for global horizontal radiation (GHI), also known as surface incoming shortwave radiation, and direct beam radiation (DIR) provided by MeteoSwiss\cite{stockli_daily_2013}. The radiation describes the solar power at the earth's surface, given in W/m$^2$. The solar energy is called irradiation, given in Wh/m$^2$. The data are hourly values for the period from 2004-2015 on a longitude-latitude grid of 1.25 degree minutes, equivalent to around $1.6 \times 2.3$ km$^2$ (see Fig.~\ref{figa:GHI_patterns}).
The 12 years of satellite data are averaged in order to obtain a mean year in hourly resolution, i.e. $12 \times 365$ time steps for 11,243 satellite pixels (see Fig.~\ref{figb:GHI_patterns}). This reduces the variability of the radiation data, and allows the estimation of long-term RPV potential without bias due to extreme meteorological events of a specific year. Furthermore, all hours with constantly zero GHI measurements are removed (i.e. night hours). The remaining 10-17 day hours result in $\sim~3-6$ million hourly values for GHI and DIR per month, depending on the month.
\begin{figure}[tb]
\centering
\begin{subfigure}{.49\textwidth}
\centering
% include second image
\includegraphics[width=\linewidth]{Figs/yr_raw.png}
\caption{}
\label{figa:GHI_patterns}
\end{subfigure}
\begin{subfigure}{.49\textwidth}
\centering
% include first image
\includegraphics[width=\linewidth]{Figs/hours.png}
\caption{}
\label{figb:GHI_patterns}
\end{subfigure}
\caption[Yearly solar irradiation and hourly variation at selected location (mean of 2004-2015)]{(a) Yearly solar irradiation ($G_h$) satellite data, (b) hourly $G_h$ at selected location (mean of 2004-2015).}
\label{fig:GHI_patterns}
\end{figure}
\textbf{Surface albedo.} A dataset for surface reflectance, also referred to as surface albedo, is available at the same spatio-temporal resolution as the global and direct horizontal radiation. In particular in Switzerland, where a large part of the country is covered by mountains, the surface albedo plays an important role in the quantification of the reflected solar radiation component \cite{kahl_bright_2019}. As the surface albedo shows low variations throughout the day, the data is aggregated to daily values, before being averaged across the 12 years as described above. \\
All solar radiation data has been derived by MeteoSwiss from Meteosat Second Generation (MSG) satellite observations using the Heliomont algorithm~\cite{stockli_heliomont_2017}. It was developed to improve the quality of the data, particularly in Switzerland's Alpine territories.
\citet{ineichen_long_2014} performed a comprehensive validation of various satellite-based products against measurement data, and found a negligible bias of the hourly satellite data across 18 measurement stations.
The standard deviation for hourly global and direct radiation is 19\% and 39\%, respectively.
\subsection{Temperature}
\label{data_T_HDD_CDD}
\textbf{Maximum daily air temperature.} Air temperature data, including the daily mean, maximum and minimum air temperature, is available from MeteoSwiss on a grid of $1 \times 1$ km$^2$ for Switzerland \cite{meteoswiss_daily_2017}.
For coherence with the solar radiation data described above, daily air temperature data for the years of $2004-2015$ was collected.
Maximum air temperature is used in the estimation of RPV potential (Chapter~\ref{solar}) as the maximum solar yield occurs around midday when temperatures are near their daily maximum, resulting in a conservative estimate of the PV panel efficiency.
\textbf{Heating degree days (HDD).}
\label{app:HDD}
The HDD is defined in the norm SIA 2028 as the sum of the heating degrees for each month $m$, averaged across 20 years, such that \cite{sia_klimadaten_2010}:
\begin{equation}
\label{eq:hdd}
HDD = \frac{1}{20} \sum_{y=1}^{20} \sum_{d=1}^{d_m} (20 - T_{m}(d, m, y)) \quad \forall T_{m} (d, m, y) \leq 12 ^\circ C.
\end{equation}
where $d_m$ is the number of days of each month and $T_{m}$ is the daily mean temperature on day $d$ in month $m$ of year $y$.
Gridded daily mean air temperature data for 20 years ($1991-2011$) \cite{meteoswiss_daily_2017} is used to compute the HDD for each of the $1 \times 1$ km$^2$ pixels of the temperature grid (see above). The time span of $1991-2011$ is chosen in order to yield results that are comparable to the tabulated HDD in \cite{sia_klimadaten_2010}.
The HDD is then spatially interpolated to a resolution of $200 \times 200$ m$^2$ using a Random Forest algorithm (see Section \ref{RF}) with the pixel coordinates and the altitude (obtained from DTM) as features.
\textbf{Cooling degree days (CDD).} In contrast to the HDD, no Swiss norm exists for the CDD. It is thus obtained from \cite{christenson_climate_2006,stadler_contribution_2018}, which suggest a reference temperature of around 18 °C:
\begin{equation}
\label{eq:cdd}
CDD = \frac{1}{20} \sum_{y=1}^{20} \sum_{d=1}^{d_m} (T_{m}(d, m, y)-18) \quad \forall T_{m} (d, m, y) \geq 18 ^\circ C.
\end{equation}
The CDD is obtained for the same timespan ($1991-2011$) and spatial resolution ($1 \times 1$ km$^2$) as the HDD, which is then interpolated to pixels of $200 \times 200$ m$^2$ as described above.
\section{Ground data}
\label{data_ground_all}
Ground data are more scarcely available than building or meteorological data.
The available ground data include national maps of the geological characteristics of the Swiss terrain, digitalised by swisstopo, as well as cantonal geothermal cadastres containing maps of the thermal ground properties.
To the best of my knowledge, the cadastres of the cantons of Geneva (provided by SITG \cite{sitg_geothermie_2021}) and Vaud (provided by ASIT-VD \cite{asit_vd_cadastre_geo_2019}) are currently the only ones with readily available maps of thermal ground properties.
In addition, these cadastres contain information on restriction zones for geothermal installations within the case study area and existing installations.
At national scale, this information is complemented by data on protected areas and usable groundwater to assess restrictions for geothermal installations.
An overview of the datasets described below is provided in Table~\ref{tab:geoData}.
\begin{table}[tb]
\centering
\footnotesize
\caption[Overview of subsurface datasets]{Overview of subsurface datasets. CH denotes availablility in the entire Switzerland, VD is the Canton of Vaud and GE is the Canton of Geneva.}
\label{tab:geoData}
\resizebox{\textwidth}{!}{%
\begin{tabular}{lllllc}
\hline
\textbf{Type} & \textbf{Dataset} & \textbf{Coverage} & \textbf{Spatial res.} & \textbf{Source} & \textbf{Chapter} \\ \hline
\multirow{4}{*}{\textit{\begin{tabular}[c]{@{}l@{}}Geological \\ characteristics\end{tabular}}} & Geological maps GK500 & CH & Polygons & swisstopo \cite{swisstopo_geomaps_nodate} & \ref{geothermal} \\
& Molasse extent & Molasse basin & Polygons & GK500 \cite{swisstopo_geomaps_nodate} & \ref{geothermal} \\
& \begin{tabular}[c]{@{}l@{}}Thickness of \\ unconsolidated rocks\end{tabular} & CH (partly) & $25 \times 25$ m$^2$ & swisstopo & \ref{geothermal} \\ \hline
\multirow{3}{*}{\textit{\begin{tabular}[c]{@{}l@{}}Thermal \\ ground\\ properties\end{tabular}}} & Thermal conductivity ($\lambda$) & VD,GE & $50 \times 50 \times 50$ m$^3$ & ASIT-VD \cite{asit_vd_cadastre_geo_2019}, SITG \cite{sitg_geothermie_2021} & \ref{geothermal} \\
& Heat capacity ($\rho C$) & VD,GE & $50 \times 50 \times 50$ m$^3$ & SITG \cite{sitg_geothermie_2021} & \ref{geothermal} \\
& Surface temperature ($T_0$) & CH & $200 \times 200$ m$^2$ & \citet{assouline_geothermal_2019} & \ref{geothermal} \\\hline
\multirow{3}{*}{\textit{\begin{tabular}[c]{@{}l@{}}Subsurface \\ protection\end{tabular}}} & Restriction zones & VD, GE* & Polygons & ASID-VD \cite{asit_vd_cadastre_geo_2019}, SITG \cite{sitg_geothermie_2021} & \ref{geothermal} \\
& Water protection plan & CH & Polygons & Cantonal** & \ref{geothermal} \\
& Usable groundwater & CH & Polygons & GK500 \cite{swisstopo_geomaps_nodate} & \ref{geothermal} \\ \hline
\multicolumn{6}{l}{* Overview of restriction maps for other cantons: \url{http://www.hetag.ch/geolinks.html}} \\
\multicolumn{6}{l}{** Available at: \url{https://www.geodienste.ch/services/planerischer_gewaesserschutz}}\\
\end{tabular}
}
\end{table}
\subsection{Geological characteristics}
\label{data_geo_GK500}
\begin{table}[tb]
\centering
\footnotesize
\caption[Attributes of the GK500 dataset used in this study]{Attributes of the GK500 dataset used in this study, with a brief description and the Section/Chapter of application. Further details may be found in \cite{swisstopo_kurzbeschreibung_2014}.}
\label{tab:GK500}
\begin{tabular}{lllll}
\hline
\textbf{Type} & \textbf{Attribute} & \textbf{ID} & \textbf{Description} & \textbf{Application} \\ \hline
\multirow{3}{*}{Geotechnical map} & GESTEINKL & ID\_GESTEIN & Rock types & \multirow{3}{*}{Chapter~\ref{geoCH_ML_preproc} }\\
& LITH\_PET & ID\_LITH & Lithology/Petrology & \\
& LITHO & L\_ID & Lithology type & \\ \hline
Tectonic map & LEG\_TEK\_1 & T1\_ID & Tectonics (1st order) & Section~\ref{data_geo_GK500} \\
Hydrogeological map & PRODUCTIV & H1\_ID, H2\_ID & Groundwater resource productivity & Section~\ref{data_restrict}\\ \hline
\end{tabular}
\end{table}
\textbf{Geological Maps (GK500).} The GK500 \cite{swisstopo_geomaps_nodate}, published by the Swiss Geological Survey, is a vector dataset of around 13,500 polygons covering the entire Swiss terrain.
The attributes of the polygons contain six primary maps of Switzerland at a scale of 1:500,000.
For the quantification of shallow geothermal potential (Chapter~\ref{geothermal}), the geotechnical, tectonic and hydrogeological maps are relevant, with the attributes used in this work shown in Table~\ref{tab:GK500}.
The geotechnical data allows to map ground thermal properties to the near-surface rock types.
The GK500 differentiates between four rock types (unconsolidated, sedimentary, magmatic, metamorphous), surface waters and glaciers (see Fig. \ref{figa:geoCH}), which are divided into 68 lithology types (LITH\_PET).
Additional lithological information is provided by the LITHO attribute, which is used as secondary source here.
Tectonic information is used to define the extent of the Swiss Molasse (see below).
Hydrogeological maps provide information on groundwater availability, which is relevant for potential restrictions for shallow geothermal resources.
\textbf{Extent of Swiss Molasse basin.}
The Swiss Molasse, spanning from the north-east to the south-west of the country, has a well-known near-surface geology and contains most of the Swiss buildings.
To define the extent of the Swiss Molasse basin, I use the first-order tectonic map of Switzerland. As areas with significant quarternary (unconsolidated) deposits above the Molasse are attributed to the "quarternary" class, I add the "quarternary" polygons within the convex hull of the "Molasse basin" class, yielding the Molasse extent shown in Fig.~\ref{figb:geoCH}.
\begin{figure}[tb]
\centering
\begin{subfigure}{.65\textwidth}
\centering
% include second image
\includegraphics[width=\linewidth]{images/Figs/rockType_CH.png}
\caption{}
\label{figa:geoCH}
\end{subfigure}
\begin{subfigure}{.65\textwidth}
\centering
% include first image
\includegraphics[width=\linewidth]{images/Figs/thickness_unconsolidated_deposits.png}
\caption{}
\label{figb:geoCH}
\end{subfigure}
\caption[Geotechnical characteristics of Switzerland]{Geotechnical characteristics of Switzerland. (a) Classification of rock types, and (b) thickness of unconsolidated deposits (in m), available mostly in the Swiss Molasse basin (black line) and large alluvial plains.}
\label{fig:geoCH}
\end{figure}
\textbf{Depth of unconsolidated deposits.}
In addition to the near-surface lithology data provided in the GK500, the depth of the unconsolidated deposits is available from the national geological service (see Fig. \ref{figb:geoCH}). This dataset covers mostly the Swiss Molasse basin (see above) and alluvial plains in large mountain valleys. The depth of the unconsolidated deposits may be used to improve the estimate of the thermal ground properties for shallow BHEs by accounting for the near-surface unconsolidated deposits and the underlying (mostly sedimentary) rock.
\subsection{Thermal ground properties}
\label{data_thermal_properties}
\textbf{Thermal conductivity ($\mathbf{\lambda}$).} For the Cantons of Vaud and Geneva, high-resolution maps of the thermal conductivity are obtained from the geothermal cadastres \cite{asit_vd_cadastre_geo_2019,sitg_geothermie_2021}.
The data is available for depths of $50-300$ m as pixels of $50$ m spatial resolution in all 3 dimensions.
The conductivities are computed as the average of the ground properties of each rock layer weighted by its thickness, which is given by 3D models of the subsurface \cite{groupe_de_travail_pgg_evaluation_2011-1}.
To obtain a homogeneous dataset for $\lambda$ across both cantons, the polygon data of the geothermal cadastre of Geneva has been converted into rasters of $50 \times 50 \times 50$ m$^3$.
For the remaining Switzerland, no maps of the thermal conductivity or thermal diffusivity (see below) exist. They will be approximated from the geological characteristics and literature data in Chapter~\ref{geoCH_ML_preproc}.
\textbf{Thermal diffusivity ($\mathbf{\alpha}$).}
The thermal diffusivity is obtained from maps of the heat capacity ($\rho C$) as $\alpha = \lambda / \rho C$ (see Eq.~\eqref{eq:alpha}).
Heat capacity data is available only in the Canton of Geneva.
To obtain $\alpha$ for both geothermal cadastres, missing values for $\rho C$ have been estimated as weighted averages using
tabulated data \cite{groupe_de_travail_pgg_evaluation_2011-1, sia_sondes_2010}, and Eq.~\eqref{eq:alpha} has been applied to the rasterised maps of $\rho C$ using the same spatial resolution as the thermal conductivity.
\textbf{Ground surface temperature ($T_0$).}
The ground surface temperature has been provided by \citet{assouline_geothermal_2019} for pixels of $200 \times 200$ m$^2$ resolution, as shown in Fig.~\ref{fig:T0_CH}.
The data is estimated from ground measurements using ML \cite{assouline_geothermal_2019}.
To minimise the impact of the built environment on these measurements, I use the annual average ground temperature at a depth of 1 m.
\begin{figure}[b]
\centering
% include second image
\includegraphics[width=.65\linewidth]{images/Figs/T_surface_CH.png}
\caption[Ground surface temperature at 1m depth for Switzerland]{Ground surface temperature ($T_0$) at 1m depth for Switzerland \cite{assouline_geothermal_2019}.}
\label{fig:T0_CH}
\end{figure}
\subsection{Subsurface protection}
\label{data_restrict}
\textbf{Restriction zones for geothermal installations.}
The restriction zones as indicated in the geothermal cadastres \cite{asit_vd_cadastre_geo_2019,sitg_geothermie_2021} are divided into three categories, which indicate whether the installation of BHEs is permitted, limited, or prohibited.
Prohibited zones are excluded from this work.
As no data on the allowed drilling depth is available, the depth of existing installations, obtained from parsing a dataset of existing BHEs, is used in Chapter~\ref{case_study} to approximate a maximum allowed drilling depth.
Restrictions in other cantons are frequently divided into similar categories, whereby the level of detail varies\footnote{See \url{http://www.hetag.ch/geolinks.html} for details}.
\textbf{Water protection plan.} The water protection plan (German: Planerischer Gewässerschutz) is a dataset developed at cantonal level. It differentiates between (i) groundwater (GW) protection zones (S1 - S3), (ii) GW protection areas, and (iii) surface (Ao,Zo) and subsurface (Au, Zu) water protection sectors. The analysis of several cantonal cadastres has shown that geothermal installations are strictly prohibited in GW protection zones and areas in practically all cantons.
Additional regulations are added in several cantons for sectors with subsurface water protection, and rarely also in areas with surface water protection.
%
Similar to the parcel boundaries (Section~\ref{data_buildings}), the water protection plan is available for Switzerland via the interface \textit{geodienste.ch}\footnote{\url{https://www.geodienste.ch/services/planerischer_gewaesserschutz}}, mostly as freely accessible data.
\textbf{Usable groundwater.} The hydrogeological map of the GK500 dataset (see Table~\ref{tab:GK500}) is used to define the usability of the groundwater resources. Hereby, all classes with an indicated usable groundwater thickness of at least 2 m are considered as usable groundwater. This includes layers with usable groundwater thicknesses of (i) $2 - 10$ m, (ii) $10 - 20$ m, and (iii) $> 20$ m in the PRODUKTIV layer of the GK500. | {
"alphanum_fraction": 0.7330653646,
"avg_line_length": 93.3727959698,
"ext": "tex",
"hexsha": "308c1ac6a8f2c3cfe0239c0fa3c70fdcfcb4c040",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "88a13da49d9a851e00e19f83914bb7709b78bb86",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "aw1513/EPFL_thesis_template",
"max_forks_repo_path": "main/ch3_data.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "88a13da49d9a851e00e19f83914bb7709b78bb86",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "aw1513/EPFL_thesis_template",
"max_issues_repo_path": "main/ch3_data.tex",
"max_line_length": 613,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "88a13da49d9a851e00e19f83914bb7709b78bb86",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "aw1513/EPFL_thesis_template",
"max_stars_repo_path": "main/ch3_data.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 9906,
"size": 37069
} |
\documentclass[twocolumn,12pt]{article}
\textheight 9 in
\voffset -.75 in
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{bm}
\usepackage{graphicx}
\usepackage{epstopdf}
\usepackage{hyperref}
\usepackage{natbib}
\usepackage{tikz}
\usepackage{fancyhdr}
\usepackage{algpseudocode}
\usepackage{algorithm}
\usepackage[toc,page]{appendix}
\setlength{\parskip}{5pt plus1pt minus1pt}
\DeclareMathSizes{36}{36}{36}{36}
\title{Train Harder, Smarter:\\Using Graph Theory to Create Route Suggestions}
\author{Forest Trimble\\[email protected]}
\begin{document}
\setlength{\headheight}{15pt}
\pagestyle{fancy}
\fancyhead{}
\fancyhead[L]{Forest Trimble}
\fancyhead[R]{Train Harder, Smarter}
\maketitle
\newcommand{\mycite}[1]{[\citenum{#1}]}
\begin{abstract}
\emph{Cyclists are always in search of the perfect ride on the perfect
roads. They have criteria like distance and elevation gain to ensure
that they get in the workout that they want. Unfortunately, to
find this ideal ride manually, it takes a great deal of exploring and
time, and eventually, one may settle into the habit of using the same
roads that he/she already knows. We research a way to improve this
paradigm, and to generate cyclists exactly the route they are looking
for, without them having to do any work.}
\end{abstract}
\section{Background} \label{sec:back}
Cycling can be wildly different based on the roads that one takes: On busy
roads with no shoulders, it can be borderline miserable, while few things in
the world are better than spinning down a smoothly paved road with beautiful
vistas of open countryside and no traffic. Unfortunately, cyclists need to
invest massive amounts of time and energy exploring the roads and amassing a
repertoire that they can use. Additionally, it is difficult to satisfy criteria
for training, like an elevation gain and distance, using only that mental
repertoire. This paper chronicles the attempt to unroll a solution for this
problem.
Specifically, a good solution should take as input a distance to travel, a
start point, and, optionally, an elevation gain, and generate a route from the
start point that satisfies the distance and elevation gain within some
tolerance. A fully robust solution would also utilize user data to ensure that
it traverses the most pleasant roads to ride on and avoids the worst.
\section{Tools}
Creating a solution to this problem from scratch is unnecessary. The larger part
of this problem has already been solved: maps have been digitized and
parsers for the data format already exist. This section details the various
tools and technologies that the algorithm leverages.
First and foremost, we opted to use the Open Street Map format, which is open
source, fairly robust, and has a very active community that contributes to both
the maps themselves and to various (mostly open-source) software projects that
utilize the format. The interested reader should refer to \citenum{wik:osm} for
almost all information regarding OSM, including how to contribute, how to use,
and links to projects that utilize OSM instead of proprietary mapping software.
Additionally, we leveraged some existing code to more easily be able to create
a demonstrable algorithm. Pyroute was used, since it has the very nice feature
that the navigation component was entirely decoupled into PyrouteLib. This
allows easy utilization of the Pyroute tools for GUI setup and OSM download
while also making it rather explicit where the navigation functions are.
Notably, pyroute has generally been abandoned in favor of the more advanced
Open Source Routing Machine (OSRM) project, but pyroute was chosen for this
paper instead due to its relative ease of prototyping and intuitiveness. Readers
interested in OSRM should refer to \mycite{wik:osrm}; those interested in
pyroute can sate their interest with \mycite{wik:pyroute}. One notes that
pyroute uses the relatively simple $A^*$ algorithm for routing between two
points. This is a slower implementation than OSRM, but our usage of the actual
routing algorithm is minimal; for the most part, we use pyroute only for loading
maps and displaying routes on them.
\section{Algorithm}
The core idea of our algorithm
is that at any point along the route, the engine generates weights for each
different possible direction to take, generates a random number, and picks a
direction according to the random number and the weights. There are several
different ideas that are competing for weight, so we'll break it down into the
individual components first, and then describe the combination at the end.
Additionally, the algorithm requires a few inputs from the user (in addition
to the Open Street Map network of roads):
\begin{itemize}
\item A distance to travel, $d$.
\item A direction in which to head $\phi$.
\item An elevation to gain, $h$.
\item A start point, $x_0$.
\end{itemize}
\subsection{Weight for Distance}
\begin{figure}
\centering
\begin{tikzpicture}
\node (init) at (-0.5,0) {$\mbox{pos}_0$};
\filldraw (0,0) circle(0.05);
\draw (2,0) circle(2);
\draw[dashed] (0,0) -- (0.3,-.5) -- (0.8,-0.2) -- (0.5,0.1) -- (0.8,0.4) --
(1.0,0.8) -- (1.1,1.3) -- (1.3,1.0) -- (1.3,0.4) -- (1.4, 0) -- (1.6, -0.5)
-- (1.8, -1) -- (2.0,-1.5) -- (2.1,-1.8) -- (2.4,-1.8) -- (2.6,-2.3) --
(2.7,-2.8) -- (3.1,0.7) -- (3.4,0.8) -- (3.2,0.9) -- (3.0,0.4) -- (2.8,0.9)
-- (2.4,1.3) -- (2.2,0.6) -- (2.3,0.3) -- (2.0,-0.4) -- (1.7, -0.2) --
(1.4,-1) -- (1.0,-1.3) -- (-0.2,-1) -- (0,0);
\node (mainleg) at (5,2) {The ideal loop};
\node (altleg) at (5,1.5) {A possible loop};
\draw (3,2) -- (3.5,2);
\draw[dashed] (3,1.5) -- (3.5,1.5);
\end{tikzpicture}
\caption{The ideal loop starting at $\mbox{pos}_0$ contrasted with a possible
actual loop} \label{fig:ideal}
\end{figure}
The first weight is based on distance. The idea is that there is a theoretical,
perfect loop that covers exactly the amount of distance that you would like,
and the algorithm does its best to direct you along that loop. See
Figure~\ref{fig:ideal} for an example of what this might look like. In order to
direct you along this loop, weights are generated based on
direction. We'll consider the method for finding the optimal direction,
$\theta$.
\subsubsection{Calculating the Weights}
\begin{figure}
\centering
\begin{tikzpicture}
\node (theta) at (0,4.2) {$\theta$};
\node (max) at (0.2,3) {2};
\node (min) at (0.35,1) {0.5};
\node (avg1) at (-1,2.2) {1};
\node (avg2) at (1,2.2) {1};
\draw[thick,<->] (0,0) -- (0,4);
\draw[thick,<->] (-2,2) -- (2,2);
\end{tikzpicture}
\caption{A possible weighted compass} \label{fig:weights}
\end{figure}
First, consider how the weights are generated once
that direction is discovered. Basically, there is a weighted compass oriented
along $\theta$, with weights from 0.5 to 2. See Figure~\ref{fig:weights} to
see what we mean. As shown, the weights do not scale linearly. Instead, they
scale exponentially. This ensures that the weights are mostly centered around
the perpendicular, with growth accelerating farther from the perpendicular.
The idea is to have the weight be double in the ideal
direction, half moving exactly opposite the ideal direction, and unchanged
perpendicular to the ideal direction. Considering these three pieces of
information, we are looking for some $f : [0,2\pi] \to [0.5,2]$ as follows:
\begin{align*}
2 = & f(\theta) \\
1 = & f(\theta \pm \frac{\pi}{2}) \\
\frac{1}{2} = & f(\theta \pm \pi)
\end{align*}
Let $\tau$ be the angle under consideration. We let
\[ \mbox{ang}(\tau, \theta) = |\theta - \tau \mbox{ mod } \pi|. \]
This allows us to define $f$ in terms of the angle between $\tau$ and
$\theta$. This function is necessary because $\theta$ is not necessarily
0. Note that this function only allows $\mbox{ang}(\tau, \theta)
\in [0,\pi]$. We accept this since we would like a compass symmetric
over the angle $\theta$, and we don't care if the angles are negative
or positive. However, the original function was defined in terms of the
value of $\tau$; instead, we are interested in a function based on the
angle between $\tau$ and $\theta$, which requires a slight redefinition:
\begin{align*}
2 = & g(0) \\
1 = & g(\frac{\pi}{2}) \\
\frac{1}{2} = & g(\pi)
\end{align*}
We've already expressed the fact that we're interested in an exponential
function, and a base of two seems the obvious choice, leading to another
reduction:
\begin{align*}
2 = 2^{h(0)} & \to & h(0) = 1 \\
1 = 2^{h(\frac{\pi}{2})} & \to & h(\frac{\pi}{2}) = 0\\
\frac{1}{2} = 2^{h(\pi)} & \to & h(\pi) = -1
\end{align*}
This yields two sets of three collinear points, which is easy enough to
map one to the other. Consider $h : [0,\pi] \to [-1,1]$ as follows:
\[ h(\mbox{ang}(\tau,\theta)) = 1 - \frac{2}{\pi}\mbox{ang}(\tau, \theta). \]
This satisfies the three points we gave, so we plug it in and calculate weight
accordingly:
\begin{equation}
\mbox{WEIGHT} = 2^{1-\frac{2}{\pi}\mbox{ang}(\tau,\theta)}. \label{eq:weight}
\end{equation}
\subsubsection{Calculating the Ideal Loop}
Now, we consider the method for finding the ideal direction. The idea behind the
direction loop is that if we had an ideal route, it would run exactly along that
path. Then the ideal direction is the one that puts us in the correct location
along that loop. First, consider how the loop is derived. It is a perfect
circle, passing through $x_0$, with a circumfrence of $d$, oriented according to
$\phi$. Note that $d$ and $x_0$ are fairly easy to understand, but the meaning
of $\phi$ is less obvious. In order to understand what we mean, first consider
the parametric equations for a circle:
\[ x = \cos t \]
\[ y = \sin t \]
These equations generate the unit circle around the origin, something that we
will address later. For now, we are concerned only about what they mean for
$\phi$. Note that without alteration these equations will generate a
west-oriented loop: the circle starts at the rightmost point and proceeds left.
Consider a revised set of equations incorporating $\phi$:
\[ x = \cos (t + \phi) \]
\[ y = \sin (t + \phi). \]
As mentioned already, $\phi = 0$ corresponds to the west-oriented loop, and one
can quickly see that $\phi = \frac{\pi}{2}$ corresponds to the southern loop,
$\phi = \pi$ to the eastern, and $\phi = \frac{3\pi}{2}$ to the northern. That
being said, none of these loops actually start at the proper point; they are
centered around the origin rather than beginning at it. In order to have any
choice of $\phi$ generate a loop that starts at the origin, we can simply zero
out the initial value:
\[ x = \cos (t + \phi) - \cos \phi \]
\[ y = \sin ( t + \phi ) - \sin \phi. \]
This gives a much more obvious representation of our ideal loop, which starts
at (0,0), and traces out a loop according to $\phi$. One thing of interest is
that $\phi$ is exactly $\pi$ plus the compass direction that we would expect.
Once you recall that our distance goal, $d$, is the circumfrence of the loop,
it is easy to see that the radius is $\frac{d}{2\pi}$. We can then adjust our
equation to correspond with our starting point and radius as follows:
\[ x = \frac{d}{2\pi}(\cos ( t + \phi ) - \cos \phi ) + x_0 \]
\[ y = \frac{d}{2\pi}(\cos ( t + \phi ) - \cos \phi ) + y_0 \]
Of course, this needs some adjustment to deal with the difference of using
latitude and longitude over a cartesian coordinate system, since latitude
and longitude are not actually coordinates on a 2-d plane, but rather angles
that are made against the center of the Earth. For now, this is sufficient to
get an idea of what is happening, but we'll cover how this will be converted
into a latitude/longitude format in Section~\ref{sec:latlong}.
Until now, we've used $t$ to represent the angle in the parametric equation,
without delving into what it really means. However, we can use a much more
useful expression to represent this angle: $\frac{2\pi}{d}d_i$, where $d_i$ is
the current distance travelled, gives us the
angle in terms of a percentage of the requisite distance travelled. This gives
us the advantage of knowing exactly where in the loop we should be at any
current distance travelled. This also needs to be capped after $d_i \geq d$.
Incorporating this yields the final equations:
\[ x = \frac{d}{2\pi}(\cos (\min(\frac{2\pi}{d}d_i,2\pi) + \phi) - \cos \phi) + x_0 \]
\[ y = \frac{d}{2\pi}(\sin (\min(\frac{2\pi}{d}d_i,2\pi) + \phi) - \sin \phi)+ y_0. \]
\begin{figure}
\centering
\begin{tikzpicture}
\draw (2,0) circle(2);
\filldraw (0,0) circle(0.05);
\node at (0.4,0.1) {$\bm{x_0}$};
\draw[thick,<->] (-0.2,0) arc (180:300:2.2);
\node at (0.6,-2) {$d_i$};
\filldraw (3.05,-1.7) circle(0.05);
\draw[thick,<->] (4.2,0) arc (0:-60:2.2);
\node at (4.2,-1.3) {$d_j$};
\filldraw (4,0) circle(0.05);
\node at (4.2,0.2) {$\bm{x_j}$};
\node at (1.8,-0.7) {$\bm{x_i}$};
\node at (2.7,-1.4) {$\bm{x}_{\mbox{{\tiny ideal}}}$};
\filldraw (2.1,-0.7) circle (0.05);
\draw[dashed] (2.1,-0.7) -- (4,0) node[above,midway,sloped] {$d_j$};
\draw[dashed] (2.1,-0.7) -- (3.2,-0.7);
\draw[<->] (3.2,-0.7) arc(0:20:1.1);
\node at (3.4,-0.5) {$\theta$};
\end{tikzpicture}
\caption{Important values for the algorithm \emph{Note: not to scale}}
\label{fig:algvals}
\end{figure}
The next step is to utilize these equations to calculate $\theta$. At any point
along the algorithm, we are at some $\bm{x_i}$, and we have travelled a
distance, $d_i$. Figure~\ref{fig:algvals} should help to understand what exactly
the algorithm is attempting to achieve. The idea is this: if we have travelled
some distance $d_i$, then there is an $\bm{x}_{\mbox{{\tiny ideal}}}$ on then
``ideal'' loop. The arc between $\bm{x}_{\mbox{{\tiny ideal}}}$ and $\bm{x_0}$
will be of length $d_i$. The algorithm searches for a point $x_j$ such that the
arc between $\bm{x}_{\mbox{{\tiny ideal}}}$ and $\bm{x_j}$ will have the
\emph{same} distance as the line between $\bm{x_i}$ and $\bm{x_j}$. Fortunately,
we have framed the equations for calculating $\bm{x_j}$ in terms of the distance
travelled along the arc, and the distance between $\bm{x_i}$ and $\bm{x_j}$ can
easily be calculated using the pythagorean theorem. Note that we find $\bm{x_j}$
as follows:
\begin{align}
x_j = \frac{d}{2\pi} ( & \cos( \frac{ 2 \pi }{ d } ( \min( d, d_i + d_j ) )
+ \phi ) \notag \\ & - \cos \phi ) + x_0
\label{eq:xj} \\
y_j = \frac{d}{2\pi} ( & \sin( \frac{ 2 \pi }{ d } ( \min( d, d_i + d_j ) )
+ \phi ) \notag \\ & - \sin \phi ) + y_0.
\label{eq:yj}
\end{align}
To satisfy our constraint, we have
\begin{equation}
d_j = \sqrt{ ( x_j(d_j) - x_i )^2 + ( x_j(d_j) - x_i )^2 }. \label{eq:dj}
\end{equation}
\subsubsection{Finding $d_j$}
Obviously, this is a rather difficult equation to truly solve, so we must only
heuristically find a solution. Unfortunately, even the heuristic solution is
a difficult one for a few reasons. Most problematically, the sine and cosine
functions have up to two stationary points on the interval we are considering
and there may not be a solution of $d_j$ that will satisfy the equality.
Nonetheless, we will do our best to mitigate these issues and apply a good
heuristic to find $d_j$. First, we rearrange our equation so that we can apply
a root-finding approximation algorithm to it:
\[ f(d_j) = \sqrt{(x_j(d_j) - x_i)^2 + (y_j(d_j) - y_i)^2} - d_j. \]
This simply allows the $d_j$ we are looking for to be the zero of the function.
The first exciting bit of news is that for most solutions of this equation with
$d_j < d$, it will work out that $f(0) > 0$ and $f(d-d_i) < 0$.
We know that $f(0) \geq 0$ by the
positivity of norms, and the square root of the sum of the squares is the
euclidian norm. We also can be certain that $f(d-d_i) < 0$ since
the norm is also unable to change by more than the diameter
of the ideal loop, or even less based on the proximity of $x_i$ to the center of
the loop. In situations where $f(d-d_i) > 0$, we notice that the distance to the
start of the loop is further away than the amount of distance we have left to
travel; this means that we should just route back to the start anyways. As such,
we have bounds inside of which we know that there exists a solution. We apply
Brent's method, a robust root approximation algorithm, to determine the solution
inside of this zone. The idea behind Brent's method is to combine the bisection
method, the secant method, and inverse quadratic interpolation. Readers
unfamiliar with any of these techniques should refer to \mycite{wik:sec},
\mycite{wik:bisect}, \mycite{wik:invquad}, \mycite{wik:brent} for a brief
overview of the techniques. A more interested reader might consider
\mycite{sauanal} for an in-depth analysis of the techniques as well as some
context for using and understanding them. Some pseudocode is given for Brent's
Method in Algorithm~\ref{alg:brent}, with some helper functions in
Algorithm~\ref{alg:bhelp}. The idea
is this:
\begin{algorithm}[t]
\caption{Using Brent's Method to find a zero of a function}
\label{alg:brent}
\begin{algorithmic}
\Require $f(a_0)f(b_0) > 0$
\Function{Brent}{$f : [a,b] \to \mathbb{R}$, $a_0$, $b_0$}
\If{$|f(a_0)| < |f(b_0)|$}
\State \Call{swap}{$a_0$,$b_0$}
\EndIf
\State $b_{k-1} \gets a_0$
\State $B \gets$ True \Comment{bisected last time?}
\While{$f(b_k) \not = 0$ {\bf and} $|b_k - a_k| > \epsilon$}
\If{$|f(a_k)| < |f(b_k)|$}
\State \Call{swap}{$a_k$,$b_k$}
\EndIf
\State $b_{k+1} \gets$\Call{inv\_quad\_interp}{$f,a_k,b_k,b_{k-1}$}
\State $B$ $\gets$ \Call{nds\_bi}{$a_k$,$b_{k-2}$,$b_{k-1}$,$b_k$,$b_{k+1}$,$B$}
\If{$B$} \Comment{use bisection method}
\State $\displaystyle b_{k+1} \gets \frac{a_k+b_k}{2}$
\EndIf
\State $b_{k-2} \gets b_{k-1}$
\State $b_{k-1} \gets b_k$
\If{$f(a_k)f(s) < 0$}
\State $b_k \gets s$
\Else
\State $a_k \gets s$
\EndIf
\EndWhile
\EndFunction
\end{algorithmic}
\end{algorithm}
\begin{algorithm}[t!]
\caption{Helper functions for Brent's method} \label{alg:bhelp}
\begin{algorithmic}
\State \Comment{\emph{Determines whether to use bisection~~~~~~~}}
\State \Comment{\emph{method or inverse quadratic~~~~~~~~~~~~~~~~~}}
\State \Comment{\emph{interpolation~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~}}
\Function{nds\_bi}{$a_k$,$b_{k-2}$,$b_{k-1}$,$b_k$,$b_{k+1}$,$B$}
\If{$\displaystyle b_{k+1} \not \in \left[ \frac{3a_k+b_k}{4},b_k \right]$}
\State \Return True
\ElsIf{$B$ is True}
\State \Return
$\displaystyle |b_{k+1}-b_k| \geq \frac{|b_k-b_{k-1}|}{2}$
\State ~~~~~~~~~~~$\land$~~$|b_k-b_{k-1}| < |\delta|$
\Else
\State \Return $\displaystyle |b_{k+1}-b_k| \geq \frac{|b_{k-1}-b_{k-2}|}{2}$
\State ~~~~~~~~~~~$\land$~~$|b_{k-1}-b_{k-2}| < |\delta|$
\EndIf
\EndFunction
\end{algorithmic}
\begin{algorithmic}
\State \Comment{\emph{returns the next iterate under inverse~~~~~}}
\State \Comment{\emph{quadratic interpolation~~~~~~~~~~~~~~~~~~~~~~~~~}}
\Function{inv\_quad\_interp}{$f$,$a$,$b$,$c$}
\If{$f(a) \not= f(c)$ {\bf and} $f(b) \not= f(c)$}
\State \Return $\displaystyle \frac{af(b)f(c)}{(f(a)-f(b))(f(a)-f(c))}$
\State ~~~~~~~~~~~$\displaystyle + \frac{bf(a)f(c)}{(f(b)-f(a))(f(b)-f(c))}$
\State ~~~~~~~~~~~$\displaystyle + \frac{cf(a)f(b)}{(f(c)-f(a))(f(c)-f(b))}$
\Else
\State \Return $\displaystyle b - f(b)\frac{b-a}{f(b)-f(a)}$
\EndIf
\EndFunction
\end{algorithmic}
\end{algorithm}
First, we need a bracket in which our function has a root. For us, this is
simple enough; we just use $[0, d-d_i]$. If that bracket does not satisfy the
condition, we just set $d_j = d-d_i$ and don't bother with anything.
The algorithm details out the actual swapping of $a_k$ and $b_k$, but here
we'll just assume that $|f(a_k)| > |f(b_k)|$ at all points in time; that is,
$b_k$ is a better approximation of the solution than $a_k$. We also require
$b_{k-1}$; at the beginning, $a_k$ will suffice instead.
This is a heuristic, so we'll keep repeating the algorithm until either
$a_k \approx b_k$ or $f(b_k) \approx 0$. At the $k^{\mbox{{\tiny th}}}$ iteration,
we'll calculate $b_{k+1}$ using both inverse quadratic interpolation and the
bisection method.
In general, the inverse quadratic interpolation can proceed very quickly, but we
add the bisection method to ensure convergence. In order to use interpolation,
we require that a few conditions are satisfied. Namely, $s$ has to be in a certain
interval, and the iterates need to obey certain bounds.
The rest of the pseudocode serves only to ensure that variables are assigned
correctly. One notes that $B$ indicates whether the bisection method was used
on the previous iteration.
Thus, by utilizing Brent's Algorithm, we can find $x_j$. From here, calculating
$\theta$ is fairly straightforward:
\begin{align}
\tan \theta = & \frac{y_j - y_i}{x_j-x_i} \notag \\
\theta = & \tan^{-1} \frac{y_j - y_i}{x_j-x_i} \label{eq:theta}
\end{align}
Finally, we have a method for calculating the weights based on distance and
direction! Use Brent's algorithm as described in Algorithm~\ref{alg:brent}
with the function to be zeroed as \eqref{eq:dj}. Use that $d_j$ in conjunction
with \eqref{eq:xj} and \eqref{eq:yj} to find
$x_j$ and $y_j$, respectively, and plug into \eqref{eq:theta} to find a
$\theta$. Finally, use $\theta$ in conjunction with \eqref{eq:weight} to assign
weights to directions.
\subsection{Miscellaneous Weights}
There are a few more important characteristics to consider when calculating
weights. Perhaps the easiest of these to deal with are the built in road types
that the Open Street Map format provides. It is easy enough to assign these
priorities based on what a cyclist would reasonably hope to see. Basically,
busy roads get weighted lower than low-traffic roads, but things like
pedestrian walkways and other unridable roads also get high weights.
Interestingly, pyroute already sets weights for these values, but these are
generated based on preferences for navigating; these vary greatly from
preferences for going out and having a good time. A good example of this is the
weight that pyroute assigns for cycling on steps, which is not 0. It is
certainly possible for the cyclist to dismount and climb the steps, but one who
is riding for fun is highly unlikely to be excited about this prospect.
Another characteristic to consider is whether a road has already been
travelled. Repeatedly riding along the same stretch of road can make a ride
rather boring, so by weighting against this, it can be avoided. In order to deal
with this, the algorithm stores stretches of road that have been travelled and
halves the weight of any path that the rider has already ridden.
\subsection{Using the Weights to Generate a Route}
Now that we have developed all of the weight calculations, we can consider using
them to generate a route. Algorithm~\ref{alg:grow} lays out the basic idea in
pseudocode. It omits definitions for many of the functions because they were
covered in sufficient detail in previous sections. In particular the function
for calculating weights is omitted, but it utilizes the algorithms and
characteristics discussed in previous sections
The general procedure is
to loop until the algorithm returns to the start point after at least 90\%
of the distance, $d$ is travelled. At each iteration, $\theta$ will be
calculated using \eqref{eq:theta}, where $(x_j,y_j)$ are calculated by applying
Algorithm~\ref{alg:brent} on \eqref{eq:dj}. Once $\theta$ is calculated, a
weight is generated for each node adjacent to $x_i$, and one of the nodes is
selected at random based on their weights.
\begin{algorithm}[t!]
\caption{Calculating a Route} \label{alg:grow}
\begin{algorithmic}
\Require \Call{edges}{} finds adjacent nodes and \Call{random}{} generates
a random number in the given range.
\Function{grow\_cycle}{$x_0$,$d$,$e$,$\phi$}
\State $X \gets \{\}$ \Comment{the route}
\State $d_k \gets 0$
\State $\theta \gets \phi$
\While{$x_{k}\not = x_0$ {\bf or} $d_k < 0.9d$}
\State $\theta \gets$ \Call{get\_theta}{}
\State $w \gets 0$ \Comment{sum of all weights}
\For{$x \in$ \Call{edges}{$x_k$}}
\State $w \gets w +$\Call{weight}{$x$,$\theta$}
\EndFor
\State $q \gets$ \Call{random}{$[0,w]$}
\For{$x \in$ \Call{edges}{$x_k$}}
\If{$q < $ \Call{weight}{$x$,$\theta$}}
\State $x_{k+1} \gets x$
\State $d_{k+1} \gets d_k +$\Call{dist}{$x_k$,$x_{k+1}$}
\State $X \gets X \cup \{x_{k+1}\}$
\State break
\Else~$q \gets q -$\Call{weight}{$x$,$\theta$}
\EndIf
\EndFor
\EndWhile
\State \Return $X$
\EndFunction
\end{algorithmic}
\end{algorithm}
\section{Using Spherical Coordinates} \label{sec:latlong}
As mentioned earlier, using the Euclidian norm for latitudes and longitudes
is inaccurate; latitudes and longitudes are spherical coordinates with
fixed $r$. As one can see in Figure~\ref{fig:sphere},
spherical coordinates are the three dimensional
equivalent of polar coordinates. Unfortunately, this poses two problems,
since we have only described generating a route around the origin in a two
dimensional plane. The first problem is switching to a spherical coordinate
system; the second is switching to three dimensions. Clearly,
considering the latitude and longitude to be values on a cartesian
coordinate plane is a flawed idea. Latitudes and longitudes are angles, not
positions in space.
\begin{figure}
\centering
\includegraphics[height=2in]{images/sphere.png}
\caption{The Spherical Coordinate System~\mycite{img:spher}} \label{fig:sphere}
\end{figure}
\subsection{Distance Calculations}
Fortunately, distances are very well defined under any coordinate system, so we
can tackle both problems here without much ado. In the spherical coordinate
system, The haversine formula is used to calculate distances between two points
on the surface of a sphere. The haversine formula uses the haversin function,
which is defined as follows:
\newcommand{\haversin}{\mbox{haversin}}
\[ \haversin (\theta) = \sin^2(\frac{\theta}{2}). \]
Given two coordinate pairs, $(\phi_1, \lambda_1)$ and $(\phi_2, \lambda_2)$,
and a radius, $r$, the haversine formula calculates the distance $d$ between
the two coordinate pairs \mycite{wik:haversin}:
\begin{align*}
d = r\haversin^{-1}(&\haversin(\phi_2 - \phi_1) + \\
& \cos(\phi_1)\cos(\phi_2)\haversin(\lambda_2 -\lambda_1)
\end{align*}
Using the definition of the haversin function, this can be reduced into a
function in terms of elementary trigonometric functions. The only non-trivial
thing to note is that $\haversin^{-1}(x) = 2\sin^{-1}(\sqrt{x})$. Armed with
this knowledge, it is fairly easy to reduce. We'll use an intermediate variable
to hold the value under the square root:
\begin{align*}
h = & \haversin(\phi_2-\phi_1) + \\
& \cos(\phi_1)\cos(\phi_2)\haversin(\lambda_2-\lambda_1) \\
= & \sin^2\left(\frac{\phi_2 - \phi_1}{2}\right) + \\
& \cos(\phi_1)\cos(\phi_2)\sin^2\left(\frac{\lambda_2-\lambda_1}{2}\right)\\
d = & r\haversin^{-1}\left(\sqrt{h}\right) \\
= & 2r\sin^{-1}\left(\sqrt{h}\right)
\end{align*}
By simply substituting this as the right hand side of \eqref{eq:dj}, many of the
projection issues can be avoided.
However, it still isn't perfect; the haversine
formula does not account for variations in the elevation of the earth. Instead,
it assumes one is travelling at sea level. Additionally, it assumes the earth is
a perfect sphere, which is also slightly inaccurate. Less accurate formulae
include flat-surface projection formulae, while more accurate formulae
generate geodesics along the surface of an ellipsoid that models the earth
\mycite{wik:dist}. For our purposes, however, the haversine formula will be
sufficiently accurate.
\subsection{Ideal Loop Calculation}
Distance calculations are the easier half of the problem;
the actual ideal loop and, consequently,
\eqref{eq:xj} and \eqref{eq:yj} depend on a set of parametric equations along
a cartesian coordinate plane. We'll consider a few different methods for
converting to spherical coordinates.
\subsubsection{A Na\"{i}ve Approach} \label{sec:naiveloop}
The first, and perhaps most obvious, method is to simply use latitude and
longitude as the $x$ and $y$ in \eqref{eq:xj} and \eqref{eq:yj}. This is
not a superb solution, but it does have a few advantages, chief of which are
the relative ease of computation and development. For small loops relatively
close to the equator, it may even be fairly similar to the intended ideal loop.
However, the larger the loop and the further from the equator the more squashed
the loop will begin to be. One also notes that the conversion does require a
slight change to the original formulae: one cannot use $d$ as the radius of the
circle. Instead, one must use an approximate conversion of the input distance
to an angular delta. Latitudinal distances change very little (they only
change because the Earth is an ellipsoid, not a sphere), and, as such, the
nautical mile was derived as the distance between a minute of latitude
\mycite{wik:nautmil}. Thus, we can easily convert between a distance goal and an
approximate latitudinal radius. Unfortunately, this will be dramatically
incorrect the closer to the poles we get.
\subsubsection{Accurate Approach}
\begin{figure}
\centering
\includegraphics[width=2in]{images/220px-Numerical_aperture.png}
\caption{The half-aperture of a cone \mycite{img:aperture}} \label{fig:aperture}
\end{figure}
To find the real ideal loop, a cone must be projected from the origin of the
sphere, and the intersection of the cone and the sphere can be used to calculate
the ideal loop. Note that this approach, while far superior to the flattened
approach, still does not compensate for the ellipsoidal nature of the Earth, or
for the variations in the elevation on the surface of the earth.
The following equation describes a cone:,
\[ 0 = (\bm{u} \cdot \bm{d})^2 - |\bm{d}|^2|\bm{u}|^2\cos^2 \phi , \]
where $\bm{u}= (x,y,z)$, $\bm{d} = (a,b,c)$ is a vector parallel to the axis of
the cone, and $\phi$ is one-half the aperture of the cone \mycite{wik:cones}. In
Figure~\ref{fig:aperture}, the $\theta$ pictured is the same as the $\phi$ in
this equation; we have used $\phi$ instead of the more accepted $\theta$ to
avoid confusion with the zenith in spherical coordinates.
Additionally, recall the equation for a sphere:
\[ r = |\bm{u}|. \]
For the most part, we can consider $r = 1$, since we are dealing with the
surface of a sphere. However, we do have to make certain calculations using
the actual radius of the earth. For example, in order to calculate the
aperture $\phi$, we realize that the length of the sides of the cone will be the
radius of the earth. Then $\sin \phi = \frac{r_c}{r}$, so we can calculate
$\phi$ as follows:
\begin{equation}
\phi = \sin^{-1} \left( \frac{\frac{d}{2\pi}}{r} \right) \label{eq:phi}
\end{equation}
With $r = 1$, we do our best to create an equation for the circle of interest:
\begin{align*}
0 = & (\bm{u}\cdot \bm{d})^2 - |\bm{d}|^2|\bm{u}|^2\cos^2 \phi \\
= & (ax + by + cz)^2 - \cos^2 \phi \\
= & a^2x^2 + b^2y^2 + c^2z^2 \\
& + 2abxy + 2acxz
+ 2bcyz - \cos^2 \phi \\
\end{align*}
Unfortunately, these equations for spheres and cones are of little aid. The
parametric equations for a circle are inspired by the polar coordinate form
of a circle, so we convert these equations to spherical form. Ideally, this will
likewise inspire a set of parametric equations for this circle.
\begin{align*}
0 = & a^2x^2 + b^2y^2 + c^2z^2 \\
& 2abxy + 2acxz + 2bcyz - \cos^2 \phi \\
= & a^2\cos^2 \psi + b^2\sin^2 \psi + c^2\sin^2 \theta \\
& + 2ab\cos \psi\sin \psi + 2ac\cos \psi \sin \theta \\
& + 2bc\sin \psi \sin \theta - \cos \phi
\end{align*}
Unfortunately, this doesn't make much headway. Without further inspiration,
we'll stick with the na\"{i}ve approach discussed in
Section~\ref{sec:naiveloop}.
\section{Experimentation \& Results}
At first, the algorithm was rather unsuccessful. The generated weights may have
been useful, but a few things kept them from being very successful. First, the
weights were calculated at every node. This means that at every point along a
road, it would be determined if one should continue down the road or turn around
and head back in the other direction. Unfortunately, since many roads are rather
curvy, the weights could often tell the user to oscillate back and forth on a
single road for an entirely uninteresting ride.
In subsequent iterations of the algorithm, this was noticed and adjusted for;
instead, the next intersection is found, and the weights are calculated for the
point at the next intersection. This also allows for weights for previously
travelled sections of road to be compressed to pairs of nodes for each stretch
between an intersection.
Another issue arose with the preliminary calculation of $\theta$. At first, a
much more na\"{i}ve approach was taken. Instead of using Brent's method to find
the zeroes of \eqref{eq:dj}, the algorithm simply divided the search space into
one hundred sections and chose the section that was closest.
Another difficult issue was getting the algorithm to terminate. Sometimes the
algorithm might get stuck in a dead end where $x_0$ is a straight line through
the dead end as the crow flies, so the algorithm weights continuing down the
dead end higher. Even if something like this did not happen, the loops were
almost invariably well above the distance input. In order to avoid this problem,
the algorithm utilizes point-to-point routing to route back to the start once
$d_i + \|\bm{x}_0-\bm{x}_i\| > 0.95d$; that is, once the direct distance between
the current point and the starting point summed with the distance travelled is
more than 95\% of the distance goal. This allows for both flexibility and
variation. If the route back to the starting point is direct, the loop will be
somewhat below the desired distance goal. On the other hand, if the route back
to the starting point is less direct, the loop will slightly exceed the input
distance goal.
\begin{figure}
\includegraphics[width=3in]{images/route.png}
\caption{A Short Route in Troy} \label{img:route}
\end{figure}
Finally, consider the results that the loop generator produces. A sample route
is given in Figure~\ref{img:route}. This is a short loop with a distance goal of
one mile generated in eastern Troy. Unfortunately, some issues with the
algorithm remain, as there are far too many situations where the algorithm
creates a U-turn. That being said, it does do a decent job of generating a path
that covers nearby roads to travel along. Indeed, especially at the beginning of
the project, the routes don't need to be perfect. Instead, the vision involves
the user uitlizing the algorithm to generate a prototype for a route and
altering it slightly to avoid known bad roads or to take scenic detours, and
other minor adjustments.
\section{Future Work}
There are many ways to expand the algorithm developed here. While we have done
our best to utilize as much data as possible, expanding the set of data utilized
to generate the routes will likely improve the algorithm. Additionally, some
features were abandoned due to time constraints.
Unfortunately, the API we used for OSM did not have easy access to elevation
data, something that would have also proved very useful to cyclists. A more
robust implementation of this algorithm would also find the elevation data of
every coordinate and use this information to augment the weights. This would
also give rise to some very interesting challenges due to the competing
interests of elevation and distance.
There has been research done into analyzing the probability of accidents
on a road based on certain metadata. This metadata is readily available to the
cities that maintain the roads, but it is beyond the scope of the information
contained in OSM data or even better-funded mapping applications such as
Google Maps. Ideally, as time progresses the data will percolate into these
services; if this data becomes more readily available, then it can be integrated
into the algorithm to help increase user safety.
Additionally, this has been a facet of a three pronged project developing an
immersive experience for bicycle training. The other two prongs are an
Android application to track training data during a ride and a website to track
long-term training data. Ideally, this algorithm will eventually be seamlessly
integrated into the mobile and web projects, so that routes can be generated
on the web and sent directly to the user's mobile phone for the user to follow
with the Android application.
Once the three prongs of this project have been integrated together, leveraging
the data from the other parts would also provide valuable input for the
algorithm. As mentioned in Section~\ref{sec:back}, cyclists generally perform
their own version of this algorithm to find routes that they will enjoy. Once
the web experience is fully fleshed out users will upload all of their rides
to the website so that they can be viewed and analyzed in the future. This also
provides extremely valuable data for the algorithm, since user generated routes
may favor better, more interesting roads than a purely weight driven experience.
With this data, the algorithm could add a ``heatmap'' layer to the algorithm
that keeps track of how popular a road is. Roads that are seldom used could be
weighted negatively, while extremely popular roads would be almost mandatorily
included in generated routes. One interesting challenge to avoid here would be
dealing with population density. That is, there may be a rural road that is
everything a cyclist could desire, while city riding is absolutely nightmarish.
Unfortunately, the city also has a much higher population density, so even
riders riding in the city only to get out of it and find better roads will
likely add more ``heat'' to unpleasant city roads than to beautiful rural
roads.
Another valuable extension would be to generate routes that are not necessarily
circuits. In this paper, we have focused on loops since cyclists are likely to
want to return home after a good training ride. However, it might also be useful
to have some sort of quasi-navigation that aims for the most fun route
between two locations rather than the most direct route. This way, riders might
be able to mix commute and training.
Additionally, it might be worthwhile to explore other methods for generating
routes. A weight based algorithm makes randomization fairly easy, but other
concepts certainly exist. Another idea might be to just take a
breadth-first-search that returns to the start and choose at random one of the
solutions that is within some epsilon of the distance goal. This approach may
be a pretty solid one, but it does raise some challenges when extended for
additional goals, and enumerating all possibilities may prove computationally
taxing, especially if space constraints become an issue.
All of these pieces are hypothetical additional features to add; however, there
is room for improvement on the algorithm laid out here as well. Further
development on this algorithm should include a refinement of the weight
generation formulas, including experimentation with increasing adherence to the
ideal loop.
\bibliographystyle{plainnat}
\raggedright
\bibliography{paper}
\end{document}
| {
"alphanum_fraction": 0.7208620474,
"avg_line_length": 49.5099255583,
"ext": "tex",
"hexsha": "ec0ea871886105cd8037adeb2e8f1c12586c54ae",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d4343ecc9b13a3e1701c8460c8a1792d08b74567",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "ftrimble/route-grower",
"max_forks_repo_path": "paper.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d4343ecc9b13a3e1701c8460c8a1792d08b74567",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ftrimble/route-grower",
"max_issues_repo_path": "paper.tex",
"max_line_length": 86,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d4343ecc9b13a3e1701c8460c8a1792d08b74567",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "ftrimble/route-grower",
"max_stars_repo_path": "paper.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 11253,
"size": 39905
} |
\documentclass{article}
\usepackage{mathtools}
\newenvironment{componentoption}[1]%
{\textbf{#1}\newline}
{\newline}
\newcommand{\aliases}[1] {\newline \textit{Aliases - } #1}
\newcommand{\defaultvalue}[1] {\newline \textit{Default Value - } #1}
\newcommand{\valuetype}[1] {\newline \textit{Value Type - } \textbf{#1}}
\begin{document}
\raggedright
\title{ImageViewer}
\author{SOFA}
\maketitle
\begin{abstract}
Responsible for displaying images in SOFA.
\end{abstract}
\section{Requirements}
SOFA Packages:
The following must be enabled in sofa-local.prf
\begin{itemize}
\item Image Plugin
\end{itemize}
SOFA Plugins:
The following must be loaded in your SOFA instance
\begin{itemize}
\item Image Plugin
\end{itemize}
\section{Scene Settings}
\subsection{Required Settings}
\begin{componentoption}{template}
The defined template type must match the image pixel type. Image types are:
\begin{itemize}
\item \textbf{ImageC} (char)
\item \textbf{ImageUC} (unsigned char)
\item \textbf{ImageI} (int)
\item \textbf{ImageUI} (unsigned int)
\item \textbf{ImageS} (short)
\item \textbf{ImageUS} (unsigned short)
\item \textbf{ImageL} (long)
\item \textbf{ImageUL} (unsigned long)
\item \textbf{ImageF} (float)
\item \textbf{ImageD} (double)
\item \textbf{ImageB} (bool)
\end{itemize}
.
\valuetype{Image type}
\defaultvalue{ImageUC}
\end{componentoption}
\subsection{Optional Settings}
\begin{componentoption}{vectorvis}
Defines the options for visualizing vector and tensor information in images. The values are "subsampleXY subsampleZ scale rgb shape tensorOrder", where:
\begin{itemize}
\item \textbf{subsampleXY} - an integer \textit{n} such that a shape is displayed every \textit{n} voxels in the X and Y planes.
\item \textbf{subsampleZ} - an integer \textit{n} such that a shape is displayed every \textit{n} voxels in the Z plane.
\item \textbf{scale} - an integer \textit{n} such that each shape is displayed \textit{n} times its original size.
\item \textbf{rgb} - When true, a multichannel image is displayed as an RGB image. When false, a multichannel image is displayed in greyscale, where the value is the L2 norm of all the channels.
\item \textbf{shape} - When true, vectors are displayed as arrows in 3 channel images, and tensors are displayed as ellipsoids in 6 channel images.
\item \textbf{tensorOrder} - A string describing the order in which the 6 tensor values are given in the image. Currently supported:
\begin{itemize}
\item \textbf{LowerTriRowMajor} - The lower part of the symmetric matrix is given in Row Major order.
\[
\begin{matrix}
a & b & c \\
b & d & e \\
c & e & f
\end{matrix}
\]
given as
\[
\begin{array} {cccccc}
a & b & d & c & e & f
\end{array}
\]
\item \textbf{UpperTriRowMajor} - The upper part of the symmetric matrix is given in Row Major order.
\[
\begin{matrix}
a & b & c \\
b & d & e \\
c & e & f
\end{matrix}
\]
given as
\[
\begin{array} {cccccc}
a & b & c & d & e & f
\end{array}
\]
\item \textbf{DiagonalFirst} - The values along the diagonal of the symmetric matrix are given first.
\[
\begin{matrix}
a & b & c \\
b & d & e \\
c & e & f
\end{matrix}
\]
given as
\[
\begin{array} {cccccc}
a & d & f & b & c & e
\end{array}
\]
\end{itemize}
\end{itemize}
.
\valuetype{[int,int,int,bool,bool,string]}
\defaultvalue{[5,5,10,true,false,LowerTriRowMajor]}
\end{componentoption}
\begin{componentoption}{arrows}
If true, an image that contains vector information will display the vectors using arrows.
\valuetype{bool}
\defaultvalue{false}
\end{componentoption}
\begin{componentoption}{scale}
The relative scale (size) of the arrows.
\valuetype{real}
\defaultvalue{11.75}
\end{componentoption}
\begin{componentoption}{histogramValues}
Two values representing the minimum and maximum windowing (AKA clamping) values.
\valuetype{[real,real]}
\defaultvalue{No windowing}
\aliases{defaultHistogram, defaultHisto, histoValues}
\end{componentoption}
\begin{componentoption}{defaultSlices}
Three values describing the x, y and z slices to be displayed initially
\valuetype{[int,int,int]}
\defaultvalue{The middle slice in each plane}
\end{componentoption}
\begin{componentoption}{defaultRgb}
If true, an image that contains vector information will be displayed as an RGB image.
\valuetype{bool}
\defaultvalue{false}
\end{componentoption}
\begin{componentoption}{plane}
Actually used as Data, the default slices to be displayed initially can also be specified here.
\valuetype{[int,int,int]}
\defaultvalue{The middle slice in each plane}
\end{componentoption}
\section{Scene Data}
\subsection{Required Data}
\begin{componentoption}{image}
A link to the image in the ImageContainer component.
\valuetype{ImageTypes}
\aliases{outputImage}
\end{componentoption}
\subsection{Optional Data}
\begin{componentoption}{transform}
A link to the transformation in the ImageContainer component.
\valuetype{TransformType}
\aliases{outputTransform}
\end{componentoption}
\subsection{Examples}
image/examples/loadimage.scn
image/examples/loadHDR.scn
\end{document}
| {
"alphanum_fraction": 0.7358638231,
"avg_line_length": 27.1861702128,
"ext": "tex",
"hexsha": "cbd510e9cfa5dffae84a1bead19942030580552f",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "94855f488465bc3ed41223cbde987581dfca5389",
"max_forks_repo_licenses": [
"OML"
],
"max_forks_repo_name": "sofa-framework/issofa",
"max_forks_repo_path": "applications/plugins/image/doc/ImageViewer.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "94855f488465bc3ed41223cbde987581dfca5389",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"OML"
],
"max_issues_repo_name": "sofa-framework/issofa",
"max_issues_repo_path": "applications/plugins/image/doc/ImageViewer.tex",
"max_line_length": 195,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "94855f488465bc3ed41223cbde987581dfca5389",
"max_stars_repo_licenses": [
"OML"
],
"max_stars_repo_name": "sofa-framework/issofa",
"max_stars_repo_path": "applications/plugins/image/doc/ImageViewer.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1505,
"size": 5111
} |
\section{Linear combinations, span, and linear independence}
\begin{outcome}
\begin{enumerate}
\item Determine if a vector is within a given span.
\item Determine if a set is spanning.
\item Determine if a set is linearly independent.
\end{enumerate}
\end{outcome}
In this section, we will again explore concepts introduced earlier in terms of $\R^n$ and extend them to apply to abstract vector spaces.
We can now revisit many of the concepts first introduced in
Chapter~\ref{cha:vectors-rn} in the context of general vector spaces.
We will look at linear combinations, span, and linear independence in
this section, and at subspaces, bases, and dimension in the next
section.
\begin{definition}{Linear combination}{linear-combination-vector-space}
Let $V$ be a vector space over a field $K$. Let
$\vect{u}_1,\ldots,\vect{u}_n\in V$. A vector
$\vect{v}\in V$ is called a \textbf{linear combination}%
\index{linear combination!in a vector space}%
\index{linear combination!of vectors} of
$\vect{u}_1,\ldots,\vect{u}_n$ if there exist scalars
$a_1,\ldots,a_n\in K$ such that
\begin{equation*}
\vect{v} = a_1 \vect{u}_1 + \ldots + a_n \vect{u}_n.
\end{equation*}
\end{definition}
\begin{example}{Linear combination of matrices}{linear-combination-matrix}
Write the matrix $A=\begin{mymatrix}{rr} 1 & 3 \\ -1 & 2 \end{mymatrix}$
as a linear combination%
\index{linear combination!of matrices} of
\begin{equation*}
\begin{mymatrix}{rr} 1 & 0 \\ 0 & 1 \end{mymatrix},\quad
\begin{mymatrix}{rr} 1 & 0 \\ 0 & -1 \end{mymatrix},\quad
\begin{mymatrix}{rr} 0 & 1 \\ 1 & 0 \end{mymatrix},\quad\mbox{and}\quad
\begin{mymatrix}{rr} 0 & -1 \\ 1 & 0 \end{mymatrix}.
\end{equation*}
\end{example}
\begin{solution}
We must find coefficients $a,b,c,d$ such that
\begin{equation*}
\begin{mymatrix}{rr} 1 & 3 \\ -1 & 2 \end{mymatrix}
~=~ a \begin{mymatrix}{rr} 1 & 0 \\ 0 & 1 \end{mymatrix}
+ b \begin{mymatrix}{rr} 1 & 0 \\ 0 & -1 \end{mymatrix}
+ c \begin{mymatrix}{rr} 0 & 1 \\ 1 & 0 \end{mymatrix}
+ d \begin{mymatrix}{rr} 0 & -1 \\ 1 & 0 \end{mymatrix},
\end{equation*}
or equivalently,
\begin{equation*}
\begin{mymatrix}{rr} 1 & 3 \\ -1 & 2 \end{mymatrix}
~=~ \begin{mymatrix}{cc} a+b & c-d \\ c+d & a-b \end{mymatrix}.
\end{equation*}
This yields a system of four equations in four variables:
\begin{equation*}
\begin{array}{r@{~~}c@{~}r}
a+b &=& 1, \\
c+d &=& -1, \\
c-d &=& 3, \\
a-b &=& 2.
\end{array}
\end{equation*}
We can easily solve the system of equations to find the unique
solution $a=\frac{3}{2}$, $b=-\frac{1}{2}$, $c=1$, $d=-2$.
Therefore
\begin{equation*}
\begin{mymatrix}{rr} 1 & 3 \\ -1 & 2 \end{mymatrix}
~=~ \frac{3}{2} \begin{mymatrix}{rr} 1 & 0 \\ 0 & 1 \end{mymatrix}
- \frac{1}{2} \begin{mymatrix}{rr} 1 & 0 \\ 0 & -1 \end{mymatrix}
+ 1 \begin{mymatrix}{rr} 0 & 1 \\ 1 & 0 \end{mymatrix}
- 2 \begin{mymatrix}{rr} 0 & -1 \\ 1 & 0 \end{mymatrix}.
\end{equation*}
\end{solution}
\begin{example}{Linear combination of polynomials}{linear-combination-polynomials}
Write the polynomial $p(x) = 7x^2 + 4x - 3$ as a linear combination%
\index{linear combination!of polynomials} of
\begin{equation*}
q_1(x) = x^2,\quad
q_2(x) = (x+1)^2,\quad\mbox{and}\quad
q_3(x) = (x+2)^2.
\end{equation*}
\end{example}
\begin{solution}
Note that $q_2(x) = (x+1)^2 = x^2 + 2x + 1$ and
$q_3(x) = (x+2)^2 = x^2 + 4x + 4$. We must find coefficients $a,b,c$
such that $p(x) = aq_1(x) + bq_2(x) + cq_3(x)$, or equivalently,
\begin{equation*}
7x^2 + 4x - 3 ~=~ ax^2 ~+~ b(x^2 + 2x + 1) ~+~ c(x^2 + 4x + 4).
\end{equation*}
Collecting equal powers of $x$, we can rewrite this as
\begin{equation*}
7x^2 + 4x - 3 ~=~ (a+b+c)x^2 ~+~ (2b+4c)x ~+~ (b+4c).
\end{equation*}
Since two polynomials are equal if and only if each corresponding
coefficient is equal, this yields a system of three equations in
three variables
\begin{equation*}
\begin{array}{r@{~~}c@{~}r}
a+b+c &=& 7, \\
2b+4c &=& 4, \\
b+4c &=& -3.
\end{array}
\end{equation*}
We can easily solve this system of equations and find that the
unique solution is $a=\frac{5}{2}$, $b=7$,
$c=-\frac{5}{2}$. Therefore
\begin{equation*}
p(x) ~=~ \frac{5}{2}\,q_1(x) ~+~ 7\,q_2(x) ~-~ \frac{5}{2}\,q_3(x).
\end{equation*}
\end{solution}
As in Chapter~\ref{cha:vectors-rn}, the span of a set of vectors is
defined as the set of all of its linear combinations. We generalize
the concept of span to consider spans of arbitrary (possibly finite,
possibly infinite) sets of vectors.
\begin{definition}{Span of a set of vectors}{vector-space-span}
Let $V$ be a vector space over some field $K$, and let $S$ be a set
of vectors (i.e., a subset of $V$). The \textbf{span}%
\index{span}%
\index{vector!span}%
\index{vector space!span} of $S$ is the set of all linear
combinations of elements of $S$. In symbols, we have
\begin{equation*}
\sspan S
~=~ \set{a_1\vect{u}_1+\ldots+a_k\vect{u}_k \mid
\mbox{
$\vect{u}_1,\ldots,\vect{u}_k\in S$
and
$a_1,\ldots,a_k\in K$
}}.
\end{equation*}
\end{definition}
It is important not to misunderstand this definition. Even when the
set $S$ is infinite, each {\em individual} element
$\vect{v}\in\sspan S$ is a linear combination of only {\em finitely
many} elements $\vect{u}_1,\ldots,\vect{u}_k$ of $S$.
The definition does not talk about infinite linear combinations
\begin{equation*}
a_1\vect{u}_1 + a_2\vect{u}_2 + a_3\vect{u}_3 + \ldots
\end{equation*}
Indeed, such infinite sums do not typically exist. However, different
elements $\vect{v},\vect{w}\in\sspan S$ can be linear combinations of
a different (finite) number of vectors of $S$. For example, it is
possible that $\vect{v}$ is a linear combination of 10 elements of
$S$, and $\vect{w}$ is a linear combination of 100 elements of $S$.
\begin{example}{Spans of sequences}{spans-sequences}
Consider the vector space $\Seq_K$ of infinite sequences. For every
$k\in\N$, let $e^k$ be the sequence whose $k\th$ element is $1$ and
that is $0$ everywhere else, i.e.,
\begin{equation*}
\begin{array}{l}
e^0 = (1,0,0,0,0,\ldots), \\
e^1 = (0,1,0,0,0,\ldots), \\
e^2 = (0,0,1,0,0,\ldots), \\
\end{array}
\end{equation*}
and so on.
Let $S=\set{e^k \mid k\in\N}$. Which of the following sequences are in
$\sspan S$?
\begin{enumialphparenastyle}
\begin{enumerate}
\item $f = (1,1,1,0,0,0,0,0,\ldots)$ (followed by infinitely many zeros),
\item $g = (1,2,0,5,0,0,0,0,\ldots)$ (followed by infinitely many zeros),
\item $h = (1,1,1,1,1,1,1,1,\ldots)$ (followed by infinitely many ones),
\item $k = (1,0,1,0,1,0,1,0,\ldots)$ (forever alternating between $1$ and $0$).
\end{enumerate}
\end{enumialphparenastyle}
\end{example}
\begin{solution}
\begin{enumialphparenastyle}
\begin{enumerate}
\item We have $f\in\sspan S$, because $f = e^0 + e^1 + e^2$.
\item We have $g\in\sspan S$, because $g = 1e^0 + 2e^1 + 5e^3$.
\item The sequence $h$ is not in $\sspan S$, because each element
of $\sspan S$ is, by definition, a linear combinations of {\em
finitely many} elements of $S$. No linear combinations of
finitely many $e^k$ can end in infinitely many ones. Note that
we are not permitted to write an infinite sum such as
$e^0+e^1+e^2+\ldots$. Such infinite sums are not defined in
vector spaces.
\item The sequence $k$ is not in $\sspan S$, for the same reason.
We would need to add infinitely many sequences of the form $e^k$
to get a sequence that contains infinitely many non-zero
elements. However, this is not permitted by the definition of
span.
\end{enumerate}
\end{enumialphparenastyle}
\vspace{-4ex}
\end{solution}
\begin{example}{Span of polynomials}{span-of-polynomials}
Let $p(x)=7x^2+4x-3$. Is $p(x)\in\sspan\set{x^2,~ (x+1)^2,~ (x+2)^2}$?
\end{example}
\begin{solution}
The answer is yes, because we found in
Example~\ref{exa:linear-combination-polynomials} that
$p(x) = \frac{5}{2}\,x^2 ~+~ 7\,(x+1)^2 ~-~ \frac{5}{2}\,(x+2)^2$.
\end{solution}
We say that a set of vectors $S$ is a \textbf{spanning set}%
\index{spanning set}%
\index{vector space!spanning set} for $V$ if $V = \sspan S$.
\begin{example}{Spanning set}{spanning-set}
Let $S = \set{x^2,~ (x+1)^2,~ (x+2)^2}$. Show that $S$ is a
spanning set for $\Poly_2$, the vector space of all polynomials of
degree at most $2$.
\end{example}
\begin{solution}
This is analogous to Example~\ref{exa:linear-combination-polynomials}.
Consider an arbitrary element $p(x) = p_2x^2 + p^1x + p_0$ of
$\Poly_2$. We must show that $p(x)\in\sspan S$, i.e., that there
exists $a,b,c\in K$ such that
\begin{equation*}
p(x) = ax^2 + b(x+1)^2 + c(x+2)^2.
\end{equation*}
We can equivalently rewrite this equation as
\begin{equation*}
p_2x^2 + p^1x + p_0 ~=~ (a+b+c)x^2 ~+~ (2b+4c)x ~+~ (b+4c),
\end{equation*}
which yields the system of equations
\begin{equation*}
\begin{array}{r@{~~}c@{~}r}
a+b+c &=& p_2 \\
2b+4c &=& p_1 \\
b+4c &=& p_0
\end{array}
\quad\roweq\quad
\begin{mymatrix}{ccc|c}
1 & 1 & 1 & p_2 \\
0 & 2 & 4 & p_1 \\
0 & 1 & 4 & p_0 \\
\end{mymatrix}
\quad\roweq\quad
\begin{mymatrix}{ccc|c}
1 & 0 & 0 & p_2-\frac{3}{4}p_1+\frac{1}{2}p_0 \\
0 & 1 & 0 & p_1-p_0 \\
0 & 0 & 1 & \frac{1}{2}p_0-\frac{1}{4}p_1 \\
\end{mymatrix}.
\end{equation*}
Since the system has rank 3, it has a solution. Therefore,
$p(x)\in\sspan S$. Since $p(x)$ was an arbitrary element of
$\Poly_2$, it follows that $S$ is a spanning set for $\Poly_2$.
\end{solution}
To define the concept of linear independence in a general vector
space, it will be convenient to base our definition on the
``alternative'' characterization of
Theorem~\ref{thm:characterization-linear-independence}. Here too, we
generalize the definition to an arbitrary (finite or infinite) set of
vectors.
\begin{definition}{Linear independence}{linear-independence-vector-space}
Let $V$ be a vector space over some field $K$. A finite set of
vectors $\set{\vect{u}_1,\ldots,\vect{u}_k}$ is called
\textbf{linearly independent}%
\index{linear independence!in a vector space}%
\index{vector!linearly independent!in a vector space}%
\index{vector space!linear independence}
if the equation
\begin{equation*}
a_1\,\vect{u}_1 + \ldots + a_k\,\vect{u}_k = \vect{0}
\end{equation*}
has only the trivial solution $a_1,\ldots,a_k=0$. An infinite set
$S$ of vectors is called linearly independent if every finite subset
of $S$ is linearly independent. A set of vectors is called
\textbf{linearly dependent}%
\index{linear dependence}%
\index{vector!linearly dependent}if it is not linearly independent.
\end{definition}
\begin{example}{Linearly independent polynomials}{linear-independence-polynomial}
Determine whether the polynomials $x^2$, $x^2 + 2x - 1$, and
$2x^2 - x + 3$ are linearly independent.
\end{example}
\begin{solution}
According to the definition of linear independence, we must solve
the equation
\begin{equation*}
ax^2 + b(x^2 + 2x - 1) + c(2x^2 - x + 3) ~=~ 0.
\end{equation*}
If there is a non-trivial solution, the polynomials are linearly
dependent. If there is only the trivial solution, they are linearly
independent. We first rearrange the left-hand side to collect equal
powers of $x$:
\begin{equation*}
(a + b + 2c)x^2 + (2b - c)x + (3c - b) ~=~ 0.
\end{equation*}
This turns into a system of 3 equations in 3 variables:
\begin{equation*}
\begin{array}{rcl}
a + b + 2c &=& 0 \\
2b - c &=& 0 \\
3c - b &=& 0
\end{array}
\quad\roweq\quad
\begin{mymatrix}{rrr|r}
1 & 1 & 2 & 0 \\
0 & 2 & -1 & 0 \\
0 & -1 & 3 & 0
\end{mymatrix}
\quad\roweq\quad
\begin{mymatrix}{rrr|r}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0
\end{mymatrix}.
\end{equation*}
Since the system has rank 3, there are no free variables. The only
solution is $a=b=c=0$, and the polynomials are linearly
independent.
\end{solution}
\begin{example}{Linearly independent sequences}{linear-independence-sequences}
Let $K$ be a field, and consider again the sequences from
Example~\ref{exa:spans-sequences},
\begin{equation*}
\begin{array}{l}
e^0 = (1,0,0,0,0,\ldots), \\
e^1 = (0,1,0,0,0,\ldots), \\
e^2 = (0,0,1,0,0,\ldots), \\
\end{array}
\end{equation*}
and so on. Let $S=\set{e^0, e^1, e^2,\ldots}$. This is an infinite
subset of $\Seq_K$. Show that $S$ is linearly independent.
\end{example}
\begin{solution}
Since $S$ is an infinite set, we have to show that every finite
subset of $S$ is linearly independent. So consider a finite subset
\begin{equation*}
\set{e^{k_1}, e^{k_2}, \ldots, e^{k_n}} \subseteq S
\end{equation*}
and assume that
\begin{equation}\label{eqn:linear-independence-sequences}
a_1e^{k_1} + a_2e^{k_2} + \ldots + a_ne^{k_n} = 0.
\end{equation}
We have to show that $a_1,\ldots,a_n=0$. Consider some index
$i\in\set{1,\ldots,n}$. Then the $k_i\th$ element of
$a_1e^{k_1} + \ldots + a_ne^{k_n}$ is equal to $a_i$ by the
left-hand side of {\eqref{eqn:linear-independence-sequences}}, but
it is also equal to $0$ by the right-hand side of
{\eqref{eqn:linear-independence-sequences}}. It follows that $a_i=0$
for all $i\in\set{1,\ldots,n}$, and therefore
$\set{e^{k_1}, e^{k_2}, \ldots, e^{k_n}}$ is linearly
independent. Since $\set{e^{k_1}, e^{k_2}, \ldots, e^{k_n}}$ was an
arbitrary finite subset of $S$, it follows, by definition, that $S$
is linearly independent.
\end{solution}
\begin{example}{Linearly dependent matrices}{linearly-dependent-matrices}
Determine whether the following elements of $\Mat_{m,n}$ are
linearly independent:
\begin{equation*}
M_1 = \begin{mymatrix}{rr} -1 & 0 \\ 1 & -1 \end{mymatrix},\quad
M_2 = \begin{mymatrix}{rr} 1 & 1 \\ 1 & 2 \end{mymatrix},\quad
M_3 = \begin{mymatrix}{rr} 1 & 3 \\ 5 & 4 \end{mymatrix}.
\end{equation*}
\end{example}
\begin{solution}
To determine whether $\set{M_1,M_2,M_3}$ is linearly independent, we
look for solutions to
\begin{equation*}
aM_1 + bM_2 + cM_3 = 0.
\end{equation*}
Notice that this equation has non-trivial solutions, for example
$a=2$, $b=3$ and $c=-1$. Therefore the matrices are linearly
dependent.
\end{solution}
\begin{example}{Linearly independent functions}{linearly-independent-functions}
In the vector space $\Func_{\R,\R}$ of real-valued functions on the
real numbers, show that the functions $f(x)=\sin x$ and $g(x)=\cos
x$ are linearly independent.
\end{example}
\begin{solution}
Assume $A\sin x+B\cos x=0$. Note that this is an equality of
functions, which means that it is true for all $x$. In particular,
substituting $x=0$ into the equation, and using the fact that
$\sin 0=0$ and $\cos 0=1$, we have
\begin{equation*}
0 = A\sin 0 + B\cos 0 = A\cdot 0 + B\cdot 1 = B,
\end{equation*}
and therefore $B=0$. On the other hand, substituting
$x=\frac{\pi}{2}$ into the equation, and using the fact that
$\sin\frac{\pi}{2} = 1$ and $\cos\frac{\pi}{2}=0$, we have
\begin{equation*}
0 = A\sin\frac{\pi}{2} + B\cos\frac{\pi}{2} = A\cdot 1 + B\cdot 0
= A,
\end{equation*}
and therefore $A=0$. Therefore, the equation $A\sin x+B\cos x=0$
only has the trivial solution $A=B=0$, and it follows that $\sin x$
and $\cos x$ are linearly independent.
\end{solution}
The properties of linear independence that were discussed in
Chapter~\ref{cha:vectors-rn} remain true in the general setting of
vector spaces. For example, the first two parts of
Proposition~\ref{prop:properties-linear-independence} apply without change.
(The third part specifically mentions $\R^n$, but can be generalized
to any vector space of dimension $n$). We also have the usual
characterization of linear dependence in terms of redundant vectors:
\begin{proposition}{Linear dependence and redundant vectors}{linear-dependence-redundant}
Let $V$ be a vector space, and let $\vect{u}_1,\vect{u}_2,\ldots$ be
a (finite or infinite) sequence of vectors in $V$. If
$\vect{u}_1,\vect{u}_2,\ldots$ are linearly dependent, then at least
one of the vectors can be written as a linear combination of earlier
vectors in the sequence:
\begin{equation*}
\vect{u}_j = a_1\,\vect{u}_1 + a_2\,\vect{u}_2 + \ldots + a_{j-1}\,\vect{u}_{j-1},
\end{equation*}
for some $j$.
\end{proposition}
\begin{proof}
Suppose that the vectors are linearly dependent. Then the equation
$b_1\vect{u}_1+\ldots+b_k\vect{u}_k=\vect{0}$ has a non-trivial solution
for some $k$. In other words, there exist scalars $b_1,\ldots,b_k$,
not all equal to zero, such that
$b_1\vect{u}_1+\ldots+b_k\vect{u}_k=\vect{0}$. Let $j$ be the largest index
such that $b_j\neq 0$. Then
$b_1\vect{u}_1+\ldots+b_j\vect{u}_j=\vect{0}$. Dividing by $b_j$ and
solving for $\vect{u}_j$, we have
$\vect{u}_j = -\frac{b_1}{b_j}\vect{u}_1 - \ldots -
\frac{b_{j-1}}{b_j}\vect{u}_{j-1}$, so $\vect{u}_j$ can be written
as a linear combination of earlier vectors as claimed.
\end{proof}
\begin{example}{Polynomials of increasing degree}{polynomials-increasing-degree}
Consider a sequence of non-zero polynomials $p_1(x), \ldots, p_k(x)$
of increasing degree, i.e., such that the degree of each $p_i(x)$ is
strictly larger than that of $p_{i-1}(x)$. Show that
$p_1(x),\ldots,p_k(x)$ are linearly independent in the vector space
$\Poly$.
\end{example}
\begin{solution}
A polynomial of degree $n$ cannot be a linear combination of
polynomials of degree less than $n$. Therefore, none of the
polynomials $p_1(x), \ldots, p_k(x)$ can be written as a linear
combination of earlier polynomials. By
Proposition~\ref{prop:linear-dependence-redundant}, $p_1(x), \ldots,
p_k(x)$ are linearly independent.
\end{solution}
Theorems~\ref{thm:unique-linear-combination} and
{\ref{thm:linearly-independent-subset}} also remain true in the
setting of general vector spaces. The original proofs can be used
without change. Thus, if $\vect{u}_1,\ldots,\vect{u}_k$ are linearly
independent, then every vector
$\vect{v}\in\sspan\set{\vect{u}_1,\ldots,\vect{u}_k}$ can be uniquely
written as a linear combination of
$\vect{u}_1,\ldots,\vect{u}_k$. Also, given any finite set of vectors,
we can find a subset of the vectors that is linearly independent and
has the same span.
We finish this section with a useful observation about linear
independence. Namely, given a linearly independent set of vectors and
one more vector that is not in their span, then we can add the vector
to the set and it will remain linearly independent.
\begin{proposition}{Adding to a linearly independent set}{adding-linearly-independent}
Suppose $\set{\vect{u}_1,\ldots,\vect{u}_k}$ is linearly
independent and
$\vect{v}\notin \sspan\set{\vect{u}_1,\ldots,\vect{u}_k}$. Then
the set
\begin{equation*}
\set{\vect{u}_1,\ldots,\vect{u}_k,\vect{v}}
\end{equation*}
is also linearly independent.
\end{proposition}
\begin{proof}
Assume, on the contrary, that the set were linearly dependent. Then
by Proposition~\ref{prop:linear-dependence-redundant}, one of the
vectors can be written as a linear combination of earlier vectors.
This vector cannot be one of the $\vect{u}_i$, because
$\vect{u}_1,\ldots,\vect{u}_k$ are linearly independent. It
also cannot be $\vect{v}$, because
$\vect{v}\notin
\sspan\set{\vect{u}_1,\ldots,\vect{u}_k}$. Therefore, our
assumption cannot be true, and the set is linearly independent.
\end{proof}
| {
"alphanum_fraction": 0.6592715565,
"avg_line_length": 39.756,
"ext": "tex",
"hexsha": "487c52224a75a030d2335201644a561b38a93d02",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-06-30T16:23:12.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-09T11:12:03.000Z",
"max_forks_repo_head_hexsha": "37ad955fd37bdbc6a9e855c3794e92eaaa2d8c02",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "selinger/linear-algebra",
"max_forks_repo_path": "baseText/content/VectorSpaces-Spanning.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "37ad955fd37bdbc6a9e855c3794e92eaaa2d8c02",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "selinger/linear-algebra",
"max_issues_repo_path": "baseText/content/VectorSpaces-Spanning.tex",
"max_line_length": 137,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "37ad955fd37bdbc6a9e855c3794e92eaaa2d8c02",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "selinger/linear-algebra",
"max_stars_repo_path": "baseText/content/VectorSpaces-Spanning.tex",
"max_stars_repo_stars_event_max_datetime": "2021-06-30T16:23:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-03-21T06:37:13.000Z",
"num_tokens": 7132,
"size": 19878
} |
\newcommand{\globalassum}{Assumptions \ref{assu:paper_smoothness}---\ref{assu:paper_bounded} }
\section{Detailed assumptions, lemmas, and proofs\label{sec:appendix_proofs}}
\subsection{Tools}
We begin by stating two general propositions that will be useful.
First, we show that a version of Cauchy-Schwartz can be applied to
weighted sums of tensors.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{prop}
\label{propref:tensor_cauchy_schwartz}Tensor array version of H{\"o}lder's
inequality. Let $w$ be an array of scalars and let $a=\left(a_{1},...,a_{N}\right)$
be an array of tensors, were each $a_{n}$ is indexed by $i=1,\ldots,D_{A}$
($i$ may be a multi-index---e.g., if $A$ is a $D\times D$ matrix,
then $i=\left(j,k\right)$, for $j,k\in\left[D\right]$ and $D_{A}=D^{2}$).
Let $p,q\in\left[1,\infty\right]$ be two numbers such that $p^{-1}+q^{-1}=1$.
Then
\begin{align*}
\norm{\frac{1}{N}\sum_{n=1}^{N}w_{n}a_{n}}_{1} & \le\frac{D_{A}^{\frac{1}{p}}}{N}\norm w_{p}\norm a_{q}.
\end{align*}
In particular, with $p=q=2$,
\begin{align*}
\norm{\frac{1}{N}\sum_{n=1}^{N}w_{n}a_{n}}_{1} & \le\sqrt{D_{A}}\frac{\norm w_{2}}{\sqrt{N}}\frac{\norm a_{2}}{\sqrt{N}}.
\end{align*}
\end{prop}
%
\begin{proof}
The conclusion follows from the ordinary H{\"o}lder's inequality
applied term-wise to $n$ and Jensen's inequality applied to the indices
$i$.
\begin{align*}
\norm{\frac{1}{N}\sum_{n=1}^{N}w_{n}a_{n}}_{1} & =\sum_{i=1}^{D_{A}}\left|\frac{1}{N}\sum_{n=1}^{N}w_{n}\left(a_{n}\right)_{i}\right|\\
& \le\frac{1}{N}\sum_{i=1}^{D_{A}}\left|\left(\sum_{n=1}^{N}\left|w_{n}\right|^{p}\right)^{\frac{1}{p}}\left(\sum_{n=1}^{N}\left|\left(a_{n}\right)_{i}\right|^{q}\right)^{\frac{1}{q}}\right|\text{(H{\"o}lder)}\\
& =\frac{1}{N}\norm w_{p}\frac{D_{A}}{D_{A}}\sum_{i=1}^{D_{A}}\left(\sum_{n=1}^{N}\left|\left(a_{n}\right)_{i}\right|^{q}\right)^{\frac{1}{q}}\\
& \le\frac{1}{N}\norm w_{p}D_{A}\left(\frac{1}{D_{A}}\sum_{i=1}^{D_{A}}\sum_{n=1}^{N}\left|\left(a_{n}\right)_{i}\right|^{q}\right)^{\frac{1}{q}}\textrm{ (Jensen applied to }i\textrm{)}\\
& =\frac{1}{N}\norm w_{p}D_{A}\left(\frac{1}{D_{A}}\sum_{n=1}^{N}\norm{a_{n}}_{q}^{q}\right)^{\frac{1}{q}}\\
& =\frac{1}{N}\norm w_{p}D_{A}^{1-\frac{1}{q}}\norm a_{q}\\
& =\frac{D_{A}^{\frac{1}{p}}}{N}\norm w_{p}\norm a_{q}.
\end{align*}
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
Next, we prove a relationship between the term-wise difference between
matrices and the difference between their operator norms. It is well-known
that the minimum eigenvalue of a non-singular matrix is continuous
in the entries of the matrix. In the next proposition, we quantify
this continuity for the $L_{1}$ norm.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{prop}
\label{propref:operator_norm_continuity}
%
Let $A$ and $B$ be two square matrices of the same size.
Let $\norm{A^{-1}}_{op}\le \constop$ for some finite $\constop$, and Then
%
\begin{align*}
\norm{A-B}_{1}\le \frac{1}{2} (\constop)^{-1} &
\quad\Rightarrow\quad\norm{B^{-1}}_{op} \le 2 \constop.
\end{align*}
%
\begin{proof}
%
We will use the results stated in Theorem 4.29 of \citet{schott:2016:matrix} and
the associated discussion in Example 4.14, which establish the following result.
Let $A$ be a square, nonsigular matrix, and let $I$ be the identity matrix of
the same size. Let $\norm{\cdot}$ denote any matrix norm satisfying $\norm{I} =
1$. Let $D$ be a matrix of the same size as $A$ satisfying
%
\begin{align}\eqlabel{ab_matrix_condition}
\norm{A^{-1}} \norm{D} \le 1.
\end{align}
%
Then
%
\begin{align}\label{eq:matrix_norm_continuity}
\norm{A^{-1} - (A + D)^{-1}} \le
\frac{\norm{A^{-1}}\norm{D}}{1 - \norm{A^{-1}\norm{D}}} \norm{A^{-1}}.
\end{align}
%
We will apply \eqref{matrix_norm_continuity} using the operator norm
$\norm{\cdot}_{op}$, for which $\norm I_{op}=1$ and with $D := B - A$.
Because $\norm{A^{-1}}_{op}\le \constop$, $A$ is invertible.
Assume that $\norm{A-B}_{1}\le \frac{1}{2} (\constop)^{-1}$. First, note that
%
\begin{align}\label{eq:ab_matrix_condition_fulfilled}
\norm{A^{-1}}_{op} \norm{D}_{op} &=
\norm{A^{-1}}_{op}\norm{B - A}_{op} \nonumber \\
&\le\norm{A^{-1}}_{op}\norm{B - A}_{1}
& \textrm{(ordering of matrix norms)}\nonumber \\
& \le \constop \frac{1}{2} (\constop)^{-1}
& \textrm{(by assumption)} \nonumber \\
&= \frac{1}{2} < 1,
\end{align}
%
so \eqref{ab_matrix_condition} is satisfied and we can apply
\eqref{matrix_norm_continuity}. Then
%
\begin{align*}
\norm{B^{-1}}_{op}
& \le \norm{B^{-1}-A^{-1}}_{op} + \norm{A^{-1}}_{op}
& \textrm{ (triangle inequality)}\\
& \le \frac{\norm{A^{-1}}_{op}\norm{B - A}_{op}}
{1 - \norm{A^{-1}}_{op}\norm{B - A}_{op}}
\norm{A^{-1}}_{op} + \norm{A^{-1}}_{op}
& \textrm{(\eqref{matrix_norm_continuity})}\\
& \le \frac{\frac{1}{2}}{1-\frac{1}{2}}\norm{A^{-1}}_{op} +
\norm{A^{-1}}_{op}
&\textrm{(\eqref{ab_matrix_condition_fulfilled})} \\
& \le 2 \constop.&\textrm{(by assumption)}
\end{align*}
\end{proof}
%
\end{prop}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Next, we define the quantities needed to make use of the integral form of
the Taylor series remainder.\footnote{We are indebted to Pang Wei Koh for
pointing out the need to use the integral form of the remainder for
Taylor series expansions of vector-valued functions.}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{prop}
\label{propref:taylor_series_remainder}
%
For any $\theta \in \Omega_{\theta}$ and any $\tilde{w} \in W$,
%
\begin{align*}
G(\theta, \tilde{w}) - G(\thetaone, \tilde{w}) =&
\left(\int_0^1 H(\thetaone + t (\theta - \thetaone), w) dt\right)
\left(\theta - \thetaone\right)
\end{align*}
%
\end{prop}
%
\begin{proof}
%
Let $G_d(\theta, \tilde{w})$ denote the $d$-th component of the vector
$G(\theta, \tilde{w})$, and define the function $f_d(t) := G_d(\thetaone + t
(\thetaone - \theta), \tilde{w})$. The proposition follows by taking the
integral remainder form of the zero-th order Taylor series expansion of $f_d(t)$
around $t=0$ \citep[Appendix B.2]{dudley:2018:analysis}, and stacking the result
into a vector.
%
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The Taylor series residual of \proprefref{taylor_series_remainder} will show up
repeatedly, so we will give it a concise name in the following definition.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{defn}
\label{defref:hess_integral}
%
For a fixed weight $w$ and a fixed parameter $\theta$, define the Hessian
integral
%
\begin{align*}
\hint(\theta, w) :=&
\int_0^1 H(\thetaone + t (\theta - \thetaone), w) dt.
\end{align*}
\end{defn}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Lemmas}
We now prove some useful consequences of our assumptions. The proof
roughly proceeds for all $w\in W_{\delta}$ by the following steps:
%
\begin{enumerate}
\item When $\delta$ is small we can make $\norm{{{\thetaw}}-\thetaone}_{2}$
small. (\lemref{theta_difference_bound} below.)
\item When $\norm{\theta-\thetaone}_{2}$ is small, then the derivatives
$H\left(\theta,w\right)$ are close to their optimal value, $H\left(\thetaone,\onevec\right)$.
(\lemref{bounded_continuous} and \lemref{gh_difference_from_one}
below.)
\item When the derivatives are close to their optimal values, then $H\left(\theta,w\right)$
is uniformly non-singular. (\lemref{continuous_invertibility} below.)
\item When the derivatives are close to their optimal values and $H\left(\theta,w\right)$
is uniformly non-singular we can control the error in $\thetaij-\thetaw$
in terms of $\delta$. (\thmrefref{taylor_error_first} below.)
\end{enumerate}
%
We begin by showing that the difference between $\thetaw$ and $\thetaone$
for $w\in W_{\delta}$ can be made small by making $\delta$ from
\condref{paper_uniform_bound} small. First, however, we need to prove that
operator norm bounds on $H(\theta, w)$ also apply to the integrated Hessian
$\hint(\theta, w)$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{lem}\label{lem:hess_integral_invertible}
Invertibility of the integrated Hessian.
%
If, for some domain $\Omega$ and some constant $C$, $\sup_{\theta \in \Omega}
\norm{H(\theta, w)^{-1}}_{op} \le C$, then
$\sup_{\theta \in \Omega} \norm{\hint(\theta, w)^{-1}}_{op} \le C$.
%
\end{lem}
\begin{proof}
%
By definition of the operator norm,
%
\begin{align*}
\norm{\hint(\theta, w)^{-1}}_{op}^{-1} =&
\min_{v \in \mathbb{R}^D: \norm{v}_2 = 1} v^T \hint(\theta, w) v \\
=& \min_{v \in \mathbb{R}^D: \norm{v}_2 = 1}
\int_0^1 v^T H(\thetaone + t (\theta - \thetaone), w) v dt \\
\ge& \int_0^1 \min_{v \in \mathbb{R}^D: \norm{v}_2 = 1}
v^T H(\thetaone + t (\theta - \thetaone), w) v dt \\
\ge& \inf_{\theta \in \Omega} \min_{v \in \mathbb{R}^D: \norm{v}_2 = 1}
v^T H(\theta, w) v \\
\ge& C^{-1}.
\end{align*}
%
The result follows by inverting both sides of the inequality.
%
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%
\begin{lem}
\label{lem:theta_difference_bound}Small parameter changes. Under
\globalassum and \condref{paper_uniform_bound},
\begin{align*}
\textrm{for all }w\in W_{\delta},\quad\norm{{{\thetaw}}-\thetaone}_{2} &
\le\constop\delta.
\end{align*}
\end{lem}
%
\begin{proof}
%
Applying \proprefref{taylor_series_remainder} with $\theta = \thetaw$
and $\tilde{w} = \onevec$ gives
%
%
\begin{align*}
G\left(\thetaw,\onevec\right) &
=G\left(\thetaone,\onevec\right) +
\hint\left(\thetaw, \onevec\right)
\left(\thetaw-\thetaone\right).
\end{align*}
%
By \assuref{paper_hessian} and
% $\sup_{\theta \in \Omega_\theta}
% \norm{H(\theta,\onevec)^{-1}} \le \constop$, so by
\lemref{hess_integral_invertible},
$\sup_{\theta \in \Omega_\theta}
\norm{\hint(\theta,\onevec)^{-1}} \le \constop$.
%
In particular, $\hint(\theta,\onevec)$ is non-singular.
A little manipulation, together with the fact that
$G\left(\thetaw,w\right)=G\left(\thetaone,\onevec\right)=0$ gives
%
\begin{align*}
\thetaw-\thetaone & =
\hint\left(\thetaw,\onevec\right)^{-1}
\left(G\left(\thetaw, \onevec\right) - G\left(\thetaw,w\right)\right).
\end{align*}
%
Taking the norm of both sides gives
%
\begin{align*}
\norm{{{\thetaw}}-\thetaone}_{2} & =
\norm{\hint\left(\thetaw,\onevec\right)^{-1}
\left(G\left({{\thetaw}},\onevec\right) -
G\left({{\thetaw}},w\right)\right)}_{2}\\
& \le\norm{\hint\left(\thetaw,\onevec\right)^{-1}}_{op}
\norm{\left(G\left({{\thetaw}},\onevec\right) -
G\left({{\thetaw}},w\right)\right)}_{2}\\
& \le\constop\norm{G\left({{\thetaw}},\onevec\right) -
G\left({{\thetaw}},w\right)}_{2}
\textrm{ (Lemma \ref{lem:hess_integral_invertible})}\\
& \le\constop\norm{
G\left({{\thetaw}},\onevec\right) -
G\left({{\thetaw}},w\right)}_{1}\textrm{ (relation between norms)}\\
& \le\constop\sup_{\theta\in\Omega_{\theta}}
\norm{G\left(\theta,\onevec\right)-G\left(\theta,w\right)}_{1}\\
& \le\constop\delta.\textrm{ (Condition \ref{cond:paper_uniform_bound}).}
%
\end{align*}
%
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%
%
Because we will refer to it repeatedly, we give the set of $\theta$
defined in \lemref{theta_difference_bound} a name.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{defn}
For a given $\delta$, define the region around $\thetaone$ given
by \lemref{theta_difference_bound} as
\begin{align*}
\thetadeltaball & :=\left\{ \theta:\norm{\theta-\thetaone}_{2}\le\constop\delta\right\} \bigcap\Omega_{\theta}.
\end{align*}
\end{defn}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
In other words, \lemref{theta_difference_bound} states that \condref{paper_uniform_bound}
implies $\thetaw\in\thetadeltaball$ when $w\in W_{\delta}$.
Next, we show that closeness in $\theta$ will mean closeness in $H\left(\theta,w\right)$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{lem}
\label{lem:bounded_continuous} Boundedness and continuity. Under
\paperallcoreassum and \condref{paper_uniform_bound},
%
\begin{align*}
\textrm{for all }\theta\in\thetaball,\quad\sup_{w\in W}
\norm{H\left(\theta,w\right) - H\left(\thetaone,w\right)}_{1}
&\le D\constw\liph\norm{\theta-\thetaone}_{2}.
\end{align*}
\end{lem}
\begin{proof}
%
For $\theta\in\thetaball$,
%
\begin{align*}
\sup_{w\in W}\norm{H\left(\theta,w\right)-H\left(\thetaone,w\right)}_{1} &=
\sup_{w\in W}\norm{\frac{1}{N}
\sum_{n=1}^{N}w_{n}\left(h_{n}\left(\theta\right) -h_{n}\left(\thetaone\right)\right)}_{1}\textrm{ (by definition)}\\
& \le D\sup_{w\in W}\frac{\norm w_{2}}{\sqrt{N}}
\frac{\norm{h\left(\theta\right) -
h\left(\thetaone\right)}_{2}}{\sqrt{N}}
\textrm{ (Proposition \ref{propref:tensor_cauchy_schwartz})}\\
& \le D\constw\frac{\norm{h\left(\theta\right) -
h\left(\thetaone\right)}_{2}}{\sqrt{N}}
\textrm{ (Assumption \ref{assu:paper_weight_bounded})}\\
& \le D\constw\liph\norm{\theta-\thetaone}_{2}
\textrm{ (Assumption \ref{assu:paper_lipschitz} and }
\theta\in\thetaball\textrm{)}.
\end{align*}
%
\end{proof}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
We now combine \lemref{theta_difference_bound} and \lemref{bounded_continuous}
to show that $H\left(\theta,w\right)$ is close to its value at the
solution $H\left(\thetaone,\onevec\right)$ for sufficiently small
$\delta$ and for all $\theta\in\thetadeltaball$.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{lem}
\label{lem:gh_difference_from_one}Bounds for difference in parameters.
Under \paperallcoreassum and \condref{paper_uniform_bound}, if $\delta\le\thetasize\constop[-1]$,
then
\begin{align*}
\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}} &
\norm{H\left(\theta,w\right)-H\left(\thetaone,\onevec\right)}_{1}
\le\left(1 + D\constw\liph\constop\right)\delta.
\end{align*}
\end{lem}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{proof}
By \lemref{theta_difference_bound}, $\delta\le\thetasize\constop[-1]$
implies that $\constop\delta\le\thetasize$ and so $\thetadeltaball\subseteq\thetaball$.
Consequently, we can apply \lemref{bounded_continuous}:
\begin{align*}
\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}}\norm{H\left(\theta,w\right)-H\left(\thetaone,w\right)}_{1} & \le\sup_{\theta\in\thetaball}\sup_{w\in W_{\delta}}\norm{H\left(\theta,w\right)-H\left(\thetaone,w\right)}_{1}\\
& \le D\constw\liph\norm{\theta-\thetaone}_{2}\textrm{ (Lemma \ref{lem:bounded_continuous})}\\
& \le D\constw\liph\constop\delta\quad\textrm{ (because }\theta\in\thetadeltaball\textrm{)}.
\end{align*}
%
Next, we can use this to write
\begin{align*}
\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}} & \norm{H\left(\theta,w\right)-H\left(\thetaone,\onevec\right)}_{1}\\
& =\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}}\norm{H\left(\theta,w\right)-H\left(\theta,\onevec\right)+H\left(\theta,\onevec\right)-H\left(\thetaone,\onevec\right)}_{1}\\
& \le\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}}\norm{H\left(\theta,w\right)-H\left(\theta,\onevec\right)}_{1}+\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}}\norm{H\left(\theta,\onevec\right)-H\left(\thetaone,\onevec\right)}_{1}\\
& \le\sup_{\theta\in\Omega_{\theta}}\sup_{w\in W_{\delta}}\norm{H\left(\theta,w\right)-H\left(\theta,\onevec\right)}_{1}+\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}}\norm{H\left(\theta,\onevec\right)-H\left(\thetaone,\onevec\right)}_{1}\\
& \le\delta+\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}}\norm{H\left(\theta,\onevec\right)-H\left(\thetaone,\onevec\right)}_{1}\textrm{ (Condition \ref{cond:paper_uniform_bound})}\\
& \le\delta+D\constw\liph\constop\delta.
\end{align*}
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
The constant that appears multiplying $\delta$ at the end of the proof of
\lemref{gh_difference_from_one} will appear often in what follows, so we give it
the special name $\constij$ in \defrefref{constants_definition}.
Note that \lemref{gh_difference_from_one} places a condition on how small
$\delta$ must be in order for our regularity conditions to apply.
\lemref{theta_difference_bound} will guarantee that $\thetaw\in\thetadeltaball$,
but if we are not able to make $\delta$ arbitrarily small in
\condref{paper_uniform_bound}, then we are not guaranteed to ensure that
$\thetadeltaball\subseteq\thetaball$, will not be able to assume Lipschitz
continuity, and none of our results will apply.
Next, using \lemref{gh_difference_from_one}, we can extend the operator bound on
$\hone^{-1}$ from \assuref{paper_hessian} to $H\left(\theta,w\right)^{-1}$ for
all $w\in W_{\delta}$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{lem}
\label{lem:continuous_invertibility}Uniform invertibility of the
Hessian. Under \paperallcoreassum and \condref{paper_uniform_bound}, if $\delta\le\min\left\{ \thetasize\constop[-1],\frac{1}{2}\constij^{-1}\constop[-1]\right\} $,
then
\begin{align*}
\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}}\norm{H\left(\theta,w\right)^{-1}}_{op} & \le2\constop.
\end{align*}
\end{lem}
\begin{proof}
By \assuref{paper_hessian}, $\norm{H\left(\thetaone,\onevec\right)^{-1}}_{op}\le\constop$.
So by \proprefref{operator_norm_continuity}, it will suffice to select
$\delta$ so that
\begin{align}
\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}}\norm{H\left(\theta,w\right)-H\left(\thetaone,\onevec\right)}_{1} & \le\frac{1}{2}\constop[-1].\label{eq:h_bound}
\end{align}
When we can apply \lemref{gh_difference_from_one}, we have
\begin{align*}
\sup_{\theta\in\thetadeltaball}\sup_{w\in W_{\delta}}\norm{H\left(\theta,w\right)-H\left(\thetaone,\onevec\right)}_{1} & \le\constij\delta.
\end{align*}
So $H\left(\theta,w\right)$ will satisfy \eqref{h_bound} if we can
apply \lemref{gh_difference_from_one} and if
\begin{align*}
\delta\le & \frac{1}{2}\constop[-1]\constij^{-1}.
\end{align*}
To apply \lemref{gh_difference_from_one} we additionally require
that $\delta\le\thetasize\constop[-1]$. By taking $\delta\le\min\left\{ \thetasize\constop[-1],\frac{1}{2}\constop[-1]\constij^{-1}\right\} $,
we satisfy \eqref{h_bound} and the result follows.
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Next, we show that a version of \lemref{gh_difference_from_one} also applies
to the integrated Hessian $\hint(\theta, w)$ when $\theta \in \thetadeltaball$.
%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{lem}
\label{lem:int_h_difference_from_one}
Bounds for difference of the integrated Hessian.
%
Under \paperallcoreassum and \condref{paper_uniform_bound}, if $\delta\le\thetasize\constop[-1]$ and $\theta \in \thetadeltaball$,
%
\begin{align*}
\sup_{w\in W_{\delta}}\norm{
\hint\left(\theta, w\right) -
H(\thetaone, \onevec)}_{1}
\le& \left(1 + D\constw\liph\constop\right)\delta.
\end{align*}
%
\end{lem}
%
\begin{proof}
\begin{align*}
\MoveEqLeft
\sup_{w\in W_{\delta}}\norm{
\hint\left(\theta, w\right) -
H(\thetaone, \onevec)}_{1} &\\
=& \sup_{w\in W_{\delta}}\norm{
\int_0^1 \left(
H(\thetaone + t (\theta - \thetaone), w) dt -
H(\thetaone, \onevec)\right)}_{1}
&\textrm{ (Definition \ref{defref:hess_integral})} \\
\le&
\sup_{w\in W_{\delta}}
\int_0^1 \norm{
H(\thetaone + t (\theta - \thetaone), w) -
H(\thetaone, \onevec)}_{1} dt
&\textrm{ (Jensen's inequality)} \\
\le&
\sup_{\theta\in\thetadeltaball} \sup_{w\in W_{\delta}}
\norm{H(\theta), w) - H(\thetaone, \onevec)}_{1} &\\
\le&
\left(1 + D\constw\liph\constop\right)\delta
&\textrm{ (Lemma \ref{lem:gh_difference_from_one})}
\end{align*}
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%
With these results in hand, the upper bound on $\delta$ will at last be
sufficient to control the error terms in our approximation. For compactness, we
give it the upper bound on $\delta$ the name $\deltasize$ in
\defrefref{constants_definition}.
Finally, we state a result that will allow us to define derivatives
of $\thetaw$ with respect to $w$.
\begin{lem}
\label{lem:implicit_function_theorem}Inverse function theorem. Under
\paperallcoreassum and \condref{paper_uniform_bound}, and for $\delta\le\deltasize$,
there exists a continuous, differentiable function of $w$, $\thetahat\left(w\right)$,
such that, for all $w\in W$, G$\left(\thetahat\left(w\right),w\right)=0$.
\end{lem}
\begin{proof}
This follows from \lemref{continuous_invertibility} and the implicit
function theorem.
\end{proof}
By definition, $\thetahat\left(\onevec\right)=\thetaone$.
\subsection{Bounding the errors in a Taylor expansion}
We are now in a position to use \paperallcoreassum and \condref{paper_uniform_bound}
to bound the error terms in a first-order Taylor expansion of $\thetaw$.
We begin by simply calculating the derivative $d\thetahat\left(w\right)/dw$.
\begin{prop}
\label{propref:theta_w_first_derivative}For any $w\in W$ for which
$H\left(\thetaw,w\right)$ is invertible, and for any vector $a\in\mathbb{R}^{N}$,
\begin{align*}
\frac{d\thetaw}{dw^{T}}\at wa & =-H\left(\thetaw,w\right)^{-1}G\left(\thetaw,a\right).
\end{align*}
\end{prop}
\begin{proof}
Because $G\left(\thetaw,w\right)=0$ for all $w\in W$, by direct
calculation,
\begin{align*}
0 & =\frac{d}{dw^{T}}G\left(\thetaw,w\right)\at wa\\
& =\left(\frac{\partial G}{\partial\theta^{T}}\frac{d\hat{\theta}}{dw^{T}}+\frac{\partial G}{\partial w^{T}}\right)\at{_{w}}a\\
& =H\left(\thetaw,w\right)\frac{d\hat{\theta}}{dw^{T}}\at{_{w}}a+\left(\frac{\partial}{\partial w^{T}}\frac{1}{N}\sum_{n=1}^{N}w_{n}g_{n}\left(\theta\right)\right)\at{_{w}}a\\
& =H\left(\thetaw,w\right)\frac{d\hat{\theta}}{dw^{T}}\at{_{w}}a+\frac{1}{N}\sum_{n=1}^{N}g_{n}\left(\thetaw\right)a\\
& =H\left(\thetaw,w\right)\frac{d\hat{\theta}}{dw^{T}}\at{_{w}}a+G\left(\thetaw,a\right).
\end{align*}
Because $H\left(\thetaw,w\right)$ is invertible by assumption, the
result follows.
\end{proof}
\begin{defn}
\label{defref:theta_infinitesimal_jackknife}Define
\begin{align*}
\thetaij\left(w\right) & :=\thetaone+\frac{d\thetaw}{dw^{T}}\at{\onevec}\left(w-\onevec\right)\\
& =\thetaone-\hone^{-1}G\left(\thetaone,w\right).\textrm{ (because }G\left(\thetaone,\onevec\right)=0\textrm{)}
\end{align*}
\end{defn}
%
$\thetaij\left(w\right)$ in \defrefref{theta_infinitesimal_jackknife}
is the first term in a Taylor series expansion of $\thetaw$ as a
function of $w$. We want to bound the error, $\thetaij\left(w\right)-\thetaw$.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{thm}
\label{thmref:taylor_error_first}
Under \paperallcoreassum and \condref{paper_uniform_bound},
when $\delta\le\deltasize$,
%
\begin{align*}
\sup_{w\in W_{\delta}}\norm{\thetaij\left(w\right)-\thetahat\left(w\right)}_{2} & \le2\constop[2]\constij\delta^{2}.
\end{align*}
%
\end{thm}
%
\begin{proof}
%
Applying \proprefref{taylor_series_remainder} with $\theta = \thetaw$ and
$\tilde{w} = w$, we have
%
\begin{align*}
0=G\left(\thetaw,w\right) & =
G\left(\thetaone, w\right)
+\hint\left(\thetaw, w\right) \left(\thetaw[w] - \thetaone\right).
\end{align*}
%
Because $\delta\in W_{\delta}$, \lemref{theta_difference_bound} implies that
$\thetaw\in\thetadeltaball$ so, \lemref{continuous_invertibility}
and \lemref{hess_integral_invertible} imply that
$\hint\left(\thetaw,w\right)$ is invertible and we can solve for
$\thetaw - \thetaone$.
%
\begin{align*}
\thetaw[w]-\thetaone & =
-\hint\left(\thetaw, w\right)^{-1}G\left(\thetaone,w\right)\\
& =\left(-\hint\left(\thetawiggle,w\right)^{-1} +
H\left(\thetaone,\onevec\right)^{-1} -
H\left(\thetaone,\onevec\right)^{-1}\right)
G\left(\thetaone,w\right)\\
& =\left(H\left(\thetaone,\onevec\right)^{-1} -
\hint\left(\thetaw,w\right)^{-1}\right) G\left(\thetaone,w\right)+
\thetaij\left(w\right)-\thetaone.
\end{align*}
%
Eliminating $\thetaone$ and taking the supremum of both sides gives
%
\begin{align*}
\sup_{w\in W_{\delta}} &
\norm{\thetaij\left(w\right)-{{\thetaw[w]}}}_{2}\\
& =\sup_{w\in W_{\delta}}\norm{
\left(H\left(\thetaone,\onevec\right)^{-1} -
\hint\left(\theta, w\right)^{-1}\right)
G\left(\thetaone,w\right)}_{2}\\
& =\sup_{w\in W_{\delta}}\norm{
\hint\left(\thetaw, w\right)^{-1}\left(
\hint\left(\thetaw,w\right) -
H\left(\thetaone,\onevec\right)\right)
H\left(\thetaone,\onevec\right)^{-1}
G\left(\thetaone,w\right)}_{2}\\
& \le2\constop\sup_{w\in W_{\delta}}\norm{
\left(\hint\left(\thetaw,w\right) -
H\left(\thetaone,\onevec\right)\right)
H\left(\thetaone,\onevec\right)^{-1}
G\left(\thetaone,w\right)}_{2}\textrm{ (Lemma
\ref{lem:hess_integral_invertible})}\\
& \le2\constop\sup_{w\in W_{\delta}}\norm{
\hint\left(\thetaw,w\right) -
H\left(\thetaone,\onevec\right)}_{op}
\norm{H\left(\thetaone,\onevec\right)^{-1}G\left(\thetaone,w\right)}_{2}\\
& \le2\constop\sup_{w\in W_{\delta}}\norm{
\hint\left(\thetaw,w\right) -
H\left(\thetaone,\onevec\right)}_{1}
\norm{H\left(\thetaone,\onevec\right)^{-1}G\left(\thetaone,w\right)}_{2}
\textrm{ (ordering of matrix norms)} \\
& \le 2\constop\constij\delta
\sup_{w\in W_{\delta}}
\norm{H\left(\thetaone,\onevec\right)^{-1}
G\left(\thetaone,w\right)}_{2}
\textrm{ (Lemma \ref{lem:int_h_difference_from_one})}\\
& \le2\constop[2]\constij\delta
\sup_{w\in W_{\delta}}\norm{G\left(\thetaone,w\right)}_{2}
\textrm{ (Assumption \ref{assu:paper_hessian})}\\
& =2\constop[2]\constij\delta
\sup_{w\in W_{\delta}}
\norm{G\left(\thetaone,w\right) -
G\left(\thetaone,\onevec\right)}_{2}
\textrm{ (because }G\left(\thetaone,\onevec\right)=0\textrm{)}\\
& \le2\constop[2]\constij\delta^{2}
\textrm{ (Condition \ref{cond:paper_uniform_bound})}.
\end{align*}
\end{proof}
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Use cases\label{sec:use_cases}}
First, let us state a simple condition under which \coreassum
hold. It will help to have a lemma for the Lipschitz continuity.
\begin{lem}
Derivative Cauchy Schwartz. Let $a\left(\theta\right)=\left(a_{1}\left(\theta\right),...,a_{N}\left(\theta\right)\right)$
be an array of tensors with multi-index $i\in\left[D_{A}\right]$,
and let $\frac{\partial a\left(\theta\right)}{\partial\theta}=\left(\frac{\partial}{\partial\theta}a_{1}\left(\theta\right),...,\frac{\partial}{\partial\theta}a_{N}\left(\theta\right)\right)$
be an array of tensors of size $D\times D_{A}$. Then
\begin{align*}
\norm{\frac{\partial}{\partial\theta}\norm{a\left(\theta\right)}_{2}}_{2} & \le D_{A}\norm{\frac{\partial a}{\partial\theta}}_{2}.
\end{align*}
\end{lem}
\begin{proof}
By direct calculation,
\begin{align*}
\norm{\frac{\partial}{\partial\theta}\norm{a\left(\theta\right)}_{2}^{2}}_{2}^{2} & =\sum_{r=1}^{D}\left(\frac{\partial}{\partial\theta_{r}}\sum_{n=1}^{N}\sum_{i=1}^{D_{A}}a_{n,i}\left(\theta\right)^{2}\right)^{2}\\
& =\sum_{r=1}^{D}\left(\sum_{n=1}^{N}\sum_{i=1}^{D_{A}}2a_{n,i}\left(\theta\right)\frac{\partial a_{n,i}\left(\theta\right)}{\partial\theta_{r}}\right)^{2}\\
& \le\sum_{r=1}^{D}\left(2\sum_{i=1}^{D_{A}}\left(\sum_{n=1}^{N}a_{n,i}\left(\theta\right)^{2}\right)^{\frac{1}{2}}\left(\sum_{n=1}^{N}\left(\frac{\partial a_{n,i}\left(\theta\right)}{\partial\theta_{r}}\right)^{2}\right)^{\frac{1}{2}}\right)^{2}\\
& \le\sum_{r=1}^{D}\left(2D_{A}^{2}\left(\frac{1}{D_{A}}\sum_{i=1}^{D_{A}}\sum_{n=1}^{N}a_{n,i}\left(\theta\right)^{2}\right)^{\frac{1}{2}}\left(\frac{1}{D_{A}}\sum_{n=1}^{N}\left(\frac{\partial a_{n,i}\left(\theta\right)}{\partial\theta_{r}}\right)^{2}\right)^{\frac{1}{2}}\right)^{2}\\
& =4D_{A}^{2}\norm a_{2}^{2}\sum_{r=1}^{D}\norm{\frac{\partial a}{\partial\theta_{r}}}_{2}^{2}\\
& =4D_{A}^{2}\norm a_{2}^{2}\norm{\frac{\partial a}{\partial\theta}}_{2}^{2}.
\end{align*}
By the chain rule,
\begin{align*}
\norm{\frac{\partial}{\partial\theta}\norm{a\left(\theta\right)}_{2}}_{2}^{2} & =\frac{1}{4\norm{a\left(\theta\right)}_{2}^{2}}\norm{\frac{\partial}{\partial\theta}\norm{a\left(\theta\right)}_{2}^{2}}_{2}^{2}\le D_{A}^{2}\norm{\frac{\partial a}{\partial\theta}}_{2}^{2}.
\end{align*}
\end{proof}
%
\begin{lem}
\label{lem:lipschitz_helper}Let $a\left(\theta\right)\in\mathbb{R}^{D\times D}$
be a continuously differentiable random matrix with a $D\times D\times D$
derivative tensor. (Note that the function, not $\theta$, is random.
For example, $\mbe\left[a\left(\theta\right)\right]$ is still a function
of $\theta$.) Suppose that $\mbe\left[\norm{a\left(\theta\right)}_{2}\right]$
is finite for all $\theta\in\Omega_{\theta}$. Then, for all $\theta_{1},\theta_{2}\in\Omega_{\theta}$,
\begin{align*}
\left|\mbe\left[\norm{a\left(\theta_{1}\right)}_{2}\right]-\mbe\left[\norm{a\left(\theta_{2}\right)}_{2}\right]\right| & \le\sqrt{\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{\frac{\partial a\left(\theta\right)}{\partial\theta}}_{2}^{2}\right]}\norm{\theta_{1}-\theta_{2}}_{2}.
\end{align*}
\end{lem}
\begin{proof}
For any tensor $a$ with multi-index $i$,
\begin{align*}
\norm{\frac{\partial}{\partial\theta}\norm a_{2}^{2}}_{2}^{2} & =\sum_{r=1}^{D}\left(\frac{\partial}{\partial\theta_{r}}\norm a_{2}^{2}\right)^{2}\\
& =\sum_{r=1}^{D}\left(\frac{\partial}{\partial\theta_{r}}\sum_{i=1}^{D_{A}}a_{i}^{2}\right)^{2}\\
& =\sum_{r=1}^{D}\left(2\sum_{i=1}^{D_{A}}a_{i}\frac{\partial a_{i}}{\partial\theta_{r}}\right)^{2}\\
& \le4\sum_{r=1}^{D}\sum_{i=1}^{D_{A}}a_{i}^{2}\sum_{i=1}^{D_{A}}\left(\frac{\partial a_{i}}{\partial\theta_{r}}\right)^{2}\textrm{ (Cauchy-Schwartz)}\\
& =4\sum_{i=1}^{D_{A}}a_{i}^{2}\sum_{r=1}^{D}\sum_{i=1}^{D_{A}}\left(\frac{\partial a_{i}}{\partial\theta_{r}}\right)^{2}\\
& =4\norm a_{2}^{2}\norm{\frac{\partial a}{\partial\theta}}_{2}^{2}.
\end{align*}
Consequently,
\begin{align*}
\norm{\frac{\partial}{\partial\theta}\norm{a\left(\theta\right)}_{2}}_{2}^{2} & =\norm{\frac{1}{2\norm{a\left(\theta\right)}_{2}}\frac{\partial}{\partial\theta}\norm{a\left(\theta\right)}_{2}^{2}}_{2}^{2}\\
& =\frac{1}{4\norm{a\left(\theta\right)}_{2}^{2}}\norm{\frac{\partial}{\partial\theta}\norm{a\left(\theta\right)}_{2}^{2}}_{2}^{2}\\
& \le\frac{4\norm{a\left(\theta\right)}_{2}^{2}}{4\norm{a\left(\theta\right)}_{2}^{2}}\norm{\frac{\partial}{\partial\theta}a\left(\theta\right)}_{2}^{2}\\
& =\norm{\frac{\partial a\left(\theta\right)}{\partial\theta}}_{2}^{2}.
\end{align*}
So for any $\theta_{1},\theta_{2}\in\Omega_{\theta}$,
\begin{align*}
\left|\mbe\left[\norm{a\left(\theta_{1}\right)}_{2}\right]-\mbe\left[\norm{a\left(\theta_{2}\right)}_{2}\right]\right| & \le\mbe\left[\left|\norm{a\left(\theta_{1}\right)}_{2}-\norm{a\left(\theta_{2}\right)}_{2}\right|\right]\\
& \le\mbe\left[\left(\sup_{\theta\in\Omega_{\theta}}\norm{\frac{\partial}{\partial\theta}\norm{a\left(\theta\right)}_{2}}_{2}\right)\right]\norm{\theta_{1}-\theta_{2}}_{2}\textrm{ (}\theta\textrm{ is not random)}\\
& \le\mbe\left[\left(\sup_{\theta\in\Omega_{\theta}}\norm{\frac{\partial a\left(\theta\right)}{\partial\theta}}_{2}\right)\right]\norm{\theta_{1}-\theta_{2}}_{2}\\
& \le\sqrt{\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{\frac{\partial a\left(\theta\right)}{\partial\theta}}_{2}^{2}\right]}\norm{\theta_{1}-\theta_{2}}_{2}.
\end{align*}
The result follows. Note that the bound still holds (though vacuously)
if $\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{\frac{\partial a\left(\theta\right)}{\partial\theta}}_{2}^{2}\right]$
is infinite.
\end{proof}
\begin{prop}
\label{prop:assumptions_hold}Let $\Omega_{\theta}$ be a compact
set. Let $g_{n}\left(\theta\right)$ be twice continuously differentiable
IID random functions. Define
\begin{align*}
h_{n}\left(\theta\right) & :=\frac{\partial g{}_{n}\left(\theta\right)}{\partial\theta}\\
r_{n}\left(\theta\right) & :=\frac{\partial^{2}g{}_{n}\left(\theta\right)}{\partial\theta\partial\theta},
\end{align*}
where $r_{n}\left(\theta\right)$ is a $D\times D\times D$ tensor.
Assume that
1a) $\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{g_{n}\left(\theta\right)}_{2}^{2}\right]<\infty$;
1b) $\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{h_{n}\left(\theta\right)}_{2}^{2}\right]<\infty$;
1c) $\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{r_{n}\left(\theta\right)}_{2}^{2}\right]<\infty;$
2) $\mbe\left[h_{n}\left(\theta\right)\right]$ is non-singular for
all $\theta\in\Omega_{\theta}$;
3) We can exchange expectation and differentiation.
Then $\lim_{N\rightarrow\infty}P\left(\textrm{\coreassum\ hold}\right)=1.$
\end{prop}
%
\begin{proof}
%
The proof follows from Theorems 9.1 and
9.2 of \citet{keener:2011:theoretical}. We will first show that the expected values of
the needed functions satisfy \coreassum, and then that the sample versions
converge uniformly.
By Jensen's inequality,
\begin{align*}
\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{g_{n}\left(\theta\right)}_{2}\right] & =\mbe\left[\sqrt{\sup_{\theta\in\Omega_{\theta}}\norm{g_{n}\left(\theta\right)}_{2}^{2}}\right]\le\sqrt{\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{g_{n}\left(\theta\right)}_{2}^{2}\right]}.
\end{align*}
Also, for the $i^{th}$ component of $g_{n}\left(\theta\right)$
\begin{align*}
\mbe\left[\sup_{\theta\in\Omega_{\theta}}\left|g_{n,i}\left(\theta\right)\right|\right] & \le\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{g_{n}\left(\theta\right)}_{\infty}\right]\le\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{g_{n}\left(\theta\right)}_{2}\right].
\end{align*}
By Theorem 9.1 of \citet{keener:2011:theoretical}, $\mbe\left[\norm{g_{n}\left(\theta\right)}_{2}^{2}\right]$
, $\mbe\left[\norm{g_{n}\left(\theta\right)}_{2}\right]$, and $\mbe\left[g_{n}\left(\theta\right)\right]$
are continuous functions of $\theta$, and because $\Omega_{\theta}$
is compact, they are each bounded. Similar reasoning applies to $h_{n}\left(\theta\right)$
and $r_{n}\left(\theta\right)$. Consequently we can define
\begin{align*}
\sup_{\theta\in\Omega_{\theta}}\mbe\left[\norm{g_{n}\left(\theta\right)}_{2}^{2}\right] & =:Q_{g}^{2}<\infty\\
\sup_{\theta\in\Omega_{\theta}}\mbe\left[\norm{h_{n}\left(\theta\right)}_{2}^{2}\right] & =:Q_{h}^{2}<\infty.
\end{align*}
Below, these constants will be used to satisfy \assuref{paper_smoothness}
and \assuref{paper_bounded} with high probability.
Because $\Omega_{\theta}$ is compact, $\mbe\left[h_{n}\left(\theta\right)\right]$
is continuous, $\mbe\left[h_{n}\left(\theta\right)\right]$ is non-singular,
and the operator norm is a continuous function of $\mbe\left[h_{n}\left(\theta\right)\right]$,
we can also define
\begin{align*}
\sup_{\theta\in\Omega_{\theta}}\norm{\mbe\left[h_{n}\left(\theta\right)\right]^{-1}}_{op} & =:Q_{op}<\infty.
\end{align*}
Below, this constant be used to satisfy \assuref{paper_hessian}
with high probability.
Finally, we turn to the Lipschitz condition. \lemref{lipschitz_helper}
implies that
\begin{align*}
\left|\mbe\left[\norm{h_{n}\left(\theta_{1}\right)}_{2}\right]-\mbe\left[\norm{h_{n}\left(\theta_{2}\right)}_{2}\right]\right| & \le\sqrt{\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{r_{n}\left(\theta\right)}_{2}^{2}\right]}\norm{\theta_{1}-\theta_{2}}_{2}.
\end{align*}
Define
\begin{align*}
\Lambda_{h} & =\sqrt{\mbe\left[\sup_{\theta\in\Omega_{\theta}}\norm{r_{n}\left(\theta\right)}_{2}^{2}\right]},
\end{align*}
so that we have shown that $\mbe\left[\norm{h_{n}\left(\theta\right)}_{2}\right]$
is Lipschitz in $\Omega_{\theta}$ with constant $\Lambda_{h}$, which
is finite by assumption.
We have now shown, essentially, that the expected versions of the
quantities we wish to control satisfy \coreassum with $N=1$.
We now need to show that the sample versions satisfy \coreassum
with high probability, which will follow from the fact that the sample
versions converge uniformly to their expectations by
Theorem 9.2 of \citet{keener:2011:theoretical}.
First, observe that \assuref{paper_smoothness} holds with probability
one by assumption. For the remaining assumption choose an $\epsilon>0$,
and define
\begin{align*}
\constg & :=\sqrt{Q_{g}^{2}+\epsilon}\\
\consth & :=\sqrt{Q_{h}^{2}+\epsilon}\\
\constop & :=2Q_{op}\\
\liph & :=\sqrt{D^{4}\Lambda_{h}^{2}+\epsilon}.
\end{align*}
By \citet{keener:2011:theoretical} Theorem 9.2,
\begin{align*}
\sup_{\theta\in\Omega_{\theta}}\left|\frac{1}{N}\sum_{n=1}^{N}\norm{g_{n}\left(\theta\right)}_{2}^{2}-\mbe\left[\norm{g_{n}\left(\theta\right)}_{2}^{2}\right]\right| & \xrightarrow[N\rightarrow\infty]{p}0.
\end{align*}
Because
\begin{align*}
\sup_{\theta\in\Omega_{\theta}}\left|\frac{1}{N}\sum_{n=1}^{N}\norm{g_{n}\left(\theta\right)}_{2}^{2}\right| & >Q_{g}^{2}+\epsilon\ge\sup_{\theta\in\Omega_{\theta}}\mbe\left[\norm{g_{n}\left(\theta\right)}_{2}^{2}\right]+\epsilon\Rightarrow\\
\sup_{\theta\in\Omega_{\theta}}\left|\frac{1}{N}\sum_{n=1}^{N}\norm{g_{n}\left(\theta\right)}_{2}^{2}-\mbe\left[\norm{g_{n}\left(\theta\right)}_{2}^{2}\right]\right| & >\epsilon,
\end{align*}
we have
\begin{align*}
& P\left(\sup_{\theta\in\Omega_{\theta}}\left|\frac{1}{N}\sum_{n=1}^{N}\norm{g_{n}\left(\theta\right)}_{2}^{2}\right|\ge Q_{g}^{2}+\epsilon\right)\le\\
& \quad P\left(\sup_{\theta\in\Omega_{\theta}}\left|\frac{1}{N}\sum_{n=1}^{N}\norm{g_{n}\left(\theta\right)}_{2}^{2}-\mbe\left[\norm{g_{n}\left(\theta\right)}_{2}^{2}\right]\right|\le\epsilon\right),
\end{align*}
so
\begin{align*}
P\left(\sup_{\theta\in\Omega_{\theta}}\left|\frac{1}{N}\sum_{n=1}^{N}\norm{g_{n}\left(\theta\right)}_{2}^{2}\right|\ge\constg^{2}\right) & \xrightarrow[N\rightarrow\infty]{}0.
\end{align*}
An analogous argument holds for $\frac{1}{N}\norm{h_{n}\left(\theta\right)}_{2}^{2}$.
Consequently, $P\left(\textrm{Assumption \ref{assu:paper_bounded} holds}\right)\xrightarrow[N\rightarrow\infty]{}1$.
We now consider \assuref{paper_hessian}. Again, by \citet{keener:2011:theoretical} Theorem 9.2
applied to each element of the matrix $h_{n}\left(\theta\right)$,
using a union bound over each of the $D^{2}$ entries,
\begin{align*}
\sup_{\theta\in\Omega_{\theta}}\norm{\frac{1}{N}\sum_{n=1}^{N}h_{n}\left(\theta\right)-\mbe\left[h_{n}\left(\theta\right)\right]}_{1} & \xrightarrow[N\rightarrow\infty]{p}0.
\end{align*}
By the converse of \proprefref{operator_norm_continuity}, because
$\norm{\mbe\left[h_{n}\left(\theta\right)\right]^{-1}}_{op}\le Q_{op}$,
\begin{align*}
\norm{\left(\frac{1}{N}\sum_{n=1}^{N}h_{n}\left(\theta\right)\right)^{-1}}_{op} & >2Q_{op}=\constop\Rightarrow\\
\norm{\frac{1}{N}\sum_{n=1}^{N}h_{n}\left(\theta\right)-\mbe\left[h_{n}\left(\theta\right)\right]}_{1} & >\frac{1}{2}Q_{op}^{-1}.
\end{align*}
Consequently,
\begin{align*}
& P\left(\norm{\left(\frac{1}{N}\sum_{n=1}^{N}h_{n}\left(\theta\right)\right)^{-1}}_{op}\ge\constop\right)\le\\
& \quad P\left(\norm{\frac{1}{N}\sum_{n=1}^{N}h_{n}\left(\theta\right)-\mbe\left[h_{n}\left(\theta\right)\right]}_{1}\right)\xrightarrow[N\rightarrow\infty]{p}0,
\end{align*}
and $P\left(\textrm{Assumption \ref{assu:paper_hessian} holds}\right)\xrightarrow[N\rightarrow\infty]{}1.$
Finally, applying \lemref{lipschitz_helper} to $\frac{1}{\sqrt{N}}\norm{h\left(\theta_{2}\right)}_{2}$,
\begin{align*}
\left|\frac{1}{\sqrt{N}}\norm{h\left(\theta_{1}\right)}_{2}-\frac{1}{\sqrt{N}}\norm{h\left(\theta_{2}\right)}_{2}\right| & \le\sup_{\theta\in\Omega_{\theta}}\norm{\frac{\partial}{\partial\theta}\frac{1}{\sqrt{N}}\norm{h\left(\theta\right)}_{2}}_{2}\norm{\theta_{1}-\theta_{2}}_{2}\\
& \le\frac{D^{2}}{\sqrt{N}}\sup_{\theta\in\Omega_{\theta}}\norm{r\left(\theta\right)}_{2}\norm{\theta_{1}-\theta_{2}}_{2}\\
& =D^{2}\sqrt{\sup_{\theta\in\Omega_{\theta}}\frac{1}{N}\norm{r\left(\theta\right)}_{2}^{2}}\norm{\theta_{1}-\theta_{2}}_{2}.
\end{align*}
Consequently,
\begin{align*}
\left|\frac{1}{\sqrt{N}}\norm{h\left(\theta_{1}\right)}_{2}-\frac{1}{\sqrt{N}}\norm{h\left(\theta_{2}\right)}_{2}\right| & \ge\liph\norm{\theta_{1}-\theta_{2}}_{2}\Rightarrow\\
D^{2}\sqrt{\sup_{\theta\in\Omega_{\theta}}\frac{1}{N}\norm{r\left(\theta\right)}_{2}^{2}} & \ge\liph\Rightarrow\\
\sup_{\theta\in\Omega_{\theta}}\frac{1}{N}\norm{r\left(\theta\right)}_{2}^{2}-\sup_{\theta\in\Omega_{\theta}}\mbe\left[\norm{r_{n}\left(\theta\right)}_{2}^{2}\right] & \ge\frac{\liph^{2}}{D^{4}}-\sup_{\theta\in\Omega_{\theta}}\mbe\left[\norm{r_{n}\left(\theta\right)}_{2}^{2}\right]\Rightarrow\\
\sup_{\theta\in\Omega_{\theta}}\left|\frac{1}{N}\norm{r\left(\theta\right)}_{2}^{2}-\mbe\left[\norm{r_{n}\left(\theta\right)}_{2}^{2}\right]\right| & \ge\frac{\liph^{2}}{D^{4}}-\Lambda_{h}^{2}=\epsilon.
\end{align*}
However, again by \citet{keener:2011:theoretical} Theorem 9.2,
\begin{align*}
\sup_{\theta\in\Omega_{\theta}}\left|\frac{1}{N}\norm{r\left(\theta\right)}_{2}^{2}-\mbe\left[\norm{r_{n}\left(\theta\right)}_{2}^{2}\right]\right| & \xrightarrow[N\rightarrow\infty]{p}0,
\end{align*}
so $P\left(\textrm{Assumption \ref{assu:paper_lipschitz} holds}\right)\xrightarrow[N\rightarrow\infty]{}1$.
\end{proof}
| {
"alphanum_fraction": 0.6450526932,
"avg_line_length": 45.9036954087,
"ext": "tex",
"hexsha": "2f398bcd2178571b4658b6ed997f67bb537bb175",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1a74a8890f1e1889faa966f24f041f6ffddf3437",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "rgiordan/AISTATS2019SwissArmyIJ",
"max_forks_repo_path": "writing/arxiv/app_theory.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1a74a8890f1e1889faa966f24f041f6ffddf3437",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "rgiordan/AISTATS2019SwissArmyIJ",
"max_issues_repo_path": "writing/arxiv/app_theory.tex",
"max_line_length": 295,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "1a74a8890f1e1889faa966f24f041f6ffddf3437",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "rgiordan/AISTATS2019SwissArmyIJ",
"max_stars_repo_path": "writing/arxiv/app_theory.tex",
"max_stars_repo_stars_event_max_datetime": "2020-12-03T20:20:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-06-13T01:21:15.000Z",
"num_tokens": 15244,
"size": 40992
} |
\section*{Acknowledgement}
The authors would especially like to thank the University of Applied Sciences Leipzig, Germany
for providing the PhD scholarship.
We also thank the DCIM - Dresden Center of Intelligent Materials, Germany
for providing the material data and test results of the high-speed rotor. | {
"alphanum_fraction": 0.8256578947,
"avg_line_length": 60.8,
"ext": "tex",
"hexsha": "7c46b9558052829cfef6ffb5b1888513ae6e56fa",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fd7584a586c564819fd8eddba34520d7760d506a",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "Modular-Design/paper",
"max_forks_repo_path": "src/acknowledgement.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fd7584a586c564819fd8eddba34520d7760d506a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "Modular-Design/paper",
"max_issues_repo_path": "src/acknowledgement.tex",
"max_line_length": 94,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fd7584a586c564819fd8eddba34520d7760d506a",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "Modular-Design/paper",
"max_stars_repo_path": "src/acknowledgement.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 58,
"size": 304
} |
\chapter{Introduction} %Title of the First Chapter
Using past experience to update ones behaviour is what differentiates intelligent beings from other creatures in our world. To artificially create said systems, that can learn from data acquired through experience, is the crowning goal of the field of Artificial Intelligence (AI) and Machine Learning (ML) \citep{turing}.
Realising this goal will have a large impact on the world and society we live in, as it will liberate humans from tasks that require cognition, like driving cars, booking appointments, and interpreting and acting on medical records, to give a few examples. Though, arguably, the field has still a long way to go before fulfilling its full potential \citep{grace2018will}.
An elegant approach to formalise the concept of learning through acquired experience is that of \emph{Bayesian learning} \citep{murphy}. In this framework, one specifies beliefs about quantities of interest, and updates them after observing data. The beliefs are represented using probability distributions: the \emph{prior} describes what is known about the quantity before any data is observed, and the so-called \emph{likelihood} describes the relation between the observed data and the quantity of interest. The framework provides a recipe for obtaining an updated belief over the quantity of interest after data has been observed. This distribution is known as the \emph{posterior}. Bayesian learning makes decisions atop these beliefs, and uses the posterior to reason about the optimality of a decision or the cost associated to a certain action.
The performance of a decision-making system will depend on the speed at which it can learn and the optimality of the decisions made---quantified by the \emph{regret}. It can be shown that, under certain assumptions, Bayesian learning gives rise to the optimal regret \citep{Lattimore}. However, such excellence comes at a high computational cost, which in many scenarios renders Bayesian learning---in its purest form---impractical. Fortunately, the literature has proposed many approximate methods which have lightened the computational complexity of the Bayesian paradigm \citep{Neal1993Probabilistic,jordan1999introduction,minka2001expectation}.
Unquestionably, models for decision-making systems need to deal with \emph{uncertainty}. They need to be able to quantify what is known, and what is not known. The importance of quantifying uncertainty for decision-making systems becomes clear from the many sources it can stem from. For example, there can be multiple different settings of a model that explain the data, so one needs to be uncertain about the setting that actually generated it. One also needs to be uncertain about the model itself, as most probably the model at hand is a simplification of the real-world process. Moreover, the environment in which the system operates may also be inherently uncertain, in which case even an infinite amount of data (i.e. experience) would not make the system any smarter (e.g., trying to predict the outcome of rolling a fair dice).
Gaussian Processes (GPs) \citep{rasmussen2006} can be argued to provide the perfect compromise between computational complexity and Bayesian rigour. Their non-parametric nature makes them complex enough to model a wide range of problems, while their kernel formulation makes them applicable to many different domains: graphs, vectors, images, etc. Instead of representing probability distributions on weights, Gaussian processes can be used to represent uncertainty directly on the function that the weights represents. The \emph{function-space} view of Gaussian processes makes them more amenable to mathematical analysis, which in turn leads to strong guarantees on the future performance of the system and overall robustness. This may be necessary for deploying these systems in critical applications. For these reasons studying Gaussian processes is very worthwhile.
This reports presents two pieces of research in the domain of approximate Bayesian inference for GP models conducted during the first year of my PhD degree. Namely, \cref{chapter:vish} introduces an interdomain inducing variable approach that speeds up inference and prediction in GPs by two orders of magnitude by making use of the spectral properties of the kernel. In \cref{chapter:dnn-as-point-estimate-for-dgps} we marry the strengths of deep neural networks and deep GPs by establishing an equivalence between the forward passes of both models. This results in models that can either be seen as neural networks with improved uncertainty prediction or deep GPs with increased prediction accuracy. The final part of this report, \cref{chapter:future-research}, elaborates on a future research agenda. Prior to all of this, we start with covering the theoretical background in \cref{chapter:theoretical-framework}.
The material presented in \cref{chapter:vish,chapter:dnn-as-point-estimate-for-dgps} is either published or is currently under review:
\begin{enumerate}
\item \fullcite{Dutordoir2020spherical}
\item \fullcite{dutordoir2021deep}
\item \fullcite{dutordoir2021gpflux}
\end{enumerate}
| {
"alphanum_fraction": 0.8172333849,
"avg_line_length": 235.2727272727,
"ext": "tex",
"hexsha": "6d6bb3a52c06fa69c6e1dc8c69c72f4d50a67a07",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e32e175235720c7651c3b5200dcccf8046ab3099",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vdutor/FYR",
"max_forks_repo_path": "Chapter1/chapter1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e32e175235720c7651c3b5200dcccf8046ab3099",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vdutor/FYR",
"max_issues_repo_path": "Chapter1/chapter1.tex",
"max_line_length": 917,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e32e175235720c7651c3b5200dcccf8046ab3099",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vdutor/FYR",
"max_stars_repo_path": "Chapter1/chapter1.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1056,
"size": 5176
} |
\documentclass{article}
\usepackage{amsmath, fancyhdr, float, graphicx, hyperref, listings}
\lstset{
columns=fullflexible,
numberstyle=\small,
frame=single,
breaklines=true
}
\title{An analysis of 7,020,950 NFT transactions on the Ethereum blockchain}
\author{
The Moonstream team\\
\href{mailto:[email protected]}{[email protected]}
}
\begin{document}
\maketitle
\begin{abstract}
We present the Ethereum NFTs dataset, a representation of the activity on the Ethereum non-fungible
token (NFT) market between April 1, 2021 and September 25, 2021, constructed purely from on-chain data.
This dataset consists of all 7,020,950 token mints and transfers across 727,102 accounts between block 12,150,245 and block 13,296,011.
We analyze this data with a view towards answering the following questions:
\begin{enumerate}
\item Who owns NFTs on the Ethereum blockchain?
\item Are NFTs for a small number of wealthy elite or are they for the masses?
\item How should one measure the utility of a non-fungible token?
\end{enumerate}
The distribution of the number of NFTs owned by Ethereum addresses is
Zipfian in nature. This indicates that the NFT market is indeed an open market, free for anyone to
participate in and with low barriers to entry.
Most NFTs have few owners relative to the number of tokens in their token supply.
The probability distribution that a contract induces on owners of its tokens reflects the utility of that contract. Based on this observation, we propose an entropy-based measure of utility for NFT contracts -- their \emph{ownership entropy}.
\end{abstract}
\section{Introduction}
A non-fungible token, or NFT, is a unique and non-interchangeable unit of data
stored on a digital ledger (blockchain)\cite{nft-definition}. It is non-fungible in the sense that it immutably and exclusively represents a data asset. This means that it can be used to represent virtually anything -- ideas, physical objects, intellectual property, etc. It will never change what it represents and the represented object or concept admits no \emph{other} representation on the blockchain.
The global market for NFTs has seen a massive boom between June and October of 2021. Visual artists and other content creators are digitizing their creations as NFTs to distribute their work to patrons. Game producers are tokenizing assets in computer and mobile games as NFTs to create shared worlds with other content creators.
Conventional digital representations of physical works can be replicated aribtrarily and indefinitely. For example, if you own a digital copy of a book, you could in principle make arbitrarily many clones of that copy and distribute it to anyone who asked for it.
In contrast, NFTs naturally reflect the scarcity of the objects they represent. This essential scarcity makes NFTs a perfect tool to globally conserve value when transferring ideas and assets from one digital reality to another. NFTs allow people to create common representations of scarce resources across multiple realities.
The recent boom in the NFT market \cite{reuters-nft-surge} has led to an increased variance in utility across NFTs. Similarly, there is a growing number of first-time NFT buyers. This paper analyzes these variances and derives statistics that can be used to classify NFTs and NFT purchasers.
\section{The Ethereum NFTs Dataset}
The majority of recent NFT action has been centered around the Ethereum blockchain. This made Ethereum the natural starting point for a series of analyses, of which this is paper represents the first.
This section introduces the Ethereum NFTs dataset \cite{ethereum-nfts}. It describes the structure of NFT data on the Ethereum blockchain, elaborates on our methods of data collection, and articulates the structure of the dataset.
\subsection{Contracts, tokens, events, and ERC721}
On the Ethereum blockchain, non-fungible tokens are created using Ethereum smart contracts \cite{ethereum-smart-contracts}. The most famous examples of Ethereum NFT contracts are CryptoPunks \cite{cryptopunks} and CryptoKitties \cite{cryptokitties}.
Ethereum smart contracts expose public methods which may be executed by participants on the Ethereum blockchain. When a smart contract method is executed, it can change the state of the Ethereum blockchain by changing the state of addresses associated with the smart contract and also by emitting events which log the activity of that smart contract.
An NFT contract typically represents a plurality of non-fungible tokens. NFT contracts normally represent a thematically consistent set of tokens. This is reflected by the language of NFT marketplaces like OpenSea \cite{opensea} and Nifty Gateway \cite{nifty}, which call them \emph{collections}.
Anyone wishing to create a non-fungible token on the Ethereum blockchain is free to implement their tokens in any manner whatsoever subject to the requirement of non-fungibility. The most \emph{common} implementation follows the Ethereum ERC721 protocol \cite{erc721}.
There is a growing ecosystem of secondary marketplaces such as OpenSea \emph{opensea} that are immediately accessible to NFTs which follow the ERC721 standard. Tokens which do not follow the standard, such as CryptoPunks \cite{cryptopunks}, have to create ERC721 wrappers in order to participate in these marketplaces. This, combined with the existence of high quality, battle-tested implementations of the ERC721 standard, means that ERC721-compliant tokens account for the overwhelming majority of Ethereum NFTs.
ERC721 contracts emit a $Transfer$ event into global Ethereum state whenever a token is created on that contract or an existing token on that contract is transferred from one address to another. This event has the following structure:
\begin{lstlisting}[caption={ERC721 Transfer event}]
event Transfer(address indexed _from, address indexed _to, uint256 indexed _tokenId);
\end{lstlisting}
We built the Ethereum NFTs dataset by scanning all blocks between block number $12,150,245$ and $13,296,011$. Our scan of these $1,145,767$ blocks yielded transfer activity for $7,020,950$ tokens from $9,292$ NFT contracts across $727,102$ addresses. These mints and transfers form the core of the dataset.
The Ethereum NFTs dataset is built purely from on-chain Ethereum data, collected by Moonstream \cite{moonstream}.
The Ethereum NFTs dataset is a single SQLite database consisting of two core relations -- $mints$ and $transfers$. The $mints$ relation enumerates all NFT mints (creations) that took place in the window of time for which we collected data. The $transfers$ relation enumerates all transfers of NFTs from one owner to another which took place in the same window of time.
Both relations have the same schema. This is the creation statement for the $mints$ table.
\begin{lstlisting}[caption={$mints$ relation}]
CREATE TABLE mints
(
event_id TEXT NOT NULL UNIQUE ON CONFLICT FAIL,
transaction_hash TEXT,
block_number INTEGER,
nft_address TEXT REFERENCES nfts(address),
token_id TEXT,
from_address TEXT,
to_address TEXT,
transaction_value INTEGER,
timestamp INTEGER
);
\end{lstlisting}
The $from\_address$ and $to\_address$ fields denote the address of the original owner and the address of the new owner for each transfer. They do \emph{not} denote the $from$ and $to$ parameters of the actual Ethereum transaction that initiated the mint or transfer.\footnote{The $to$ parameter would be the address of the NFT contract, which we list under $nft_address$. We plan to include the transaction $from$ information in a future iteration of the dataset.}
The dataset also contains other relations, derived from these core relations, but included in the dataset for ease of analysis. These are:
\begin{enumerate}
\item{$nfts$ -- available metadata about the NFT contracts represented in the dataset}
\item{$current\_market\_values$ -- the current (estimated) market value of each NFT in WEI}
\item{$current\_owners$ -- the current owner of each NFT}
\item{$transfer\_statistics\_by\_address$ -- number of transfers in and out of every address that was involved in an NFT transfer in the window of time the dataset represents}
\end{enumerate}
Throughout this paper, we take the world \emph{current} to mean ``as of block $13,296,011$''.
\subsection{Caveats}
The Ethereum NFTs dataset is constructed purely from events on the Ethereum blockchain. It does not include any data from Layer 2 networks like Polygon. Nor does it include any data from centralized APIs like the OpenSea API. It does not account for events or data from any non-ERC721 smart contracts associated with these platforms on the Ethereum blockchain.
This means that two parties could exchange a positive amount of funds for a transfer off-chain and conduct the transfer on-chain and we would not be able to distinguish the transfer from a gift.
It is also possible for a single transaction to involve multiple NFT transfers. \cite{loot-middleware} is an example of such a transaction involving several NFT layers on top of a Loot token \cite{loot}.
If a transaction involves multiple NFT transfers and has a non-zero value, it is difficult to understand whether that value is related to the transfers and, if so, how it distributes over the transfers.
For that reason, in this first version of our dataset, the valuation numbers should only be treated as a rough estimate of the actual value for each NFT.
\subsection{Access}
The complete Ethereum NFTs dataset is available on Kaggle under a Creative Commons license \cite{ethereum-nfts}.
\section{Who is buying NFTs?}
It is difficult to understand how much of the hype surrounding NFTs is manufactured and how much of it reflects the situation on the market.
Is there a small number of people who each carry significant NFT holdings? Are they driving the hype and carrying the market with them?
Or is the market home to many different people, each of whom own a relatively small number of NFTs?
For each $n > 0$, let $A_n$ denote the number of addresses that assumed ownership of exactly $n$ NFTs between block $12,150,245$ (April 1, 2021) and block $13,296,011$ (September 25, 2021).
Figure \ref{tokens-owned-per-address-logarithmic} plots $A_n$ against $n$ on a logarithmic scale.
\begin{figure}
\resizebox{\textwidth}{!}{\includegraphics{tokens_owned_histogram_log.original.png}}
\caption{Addresses by number of tokens owned}\label{tokens-owned-per-address-logarithmic}
\end{figure}
Of course, the NFT owners in the full dataset includes the addresses of smart contracts which act as exchanges and clearinghoues for NFTs and work with thousands and even tens of thousands of NFTs at a time. It also includes the addresses of bots which may not be implemented as smart contracts but which automatically submit transactions based on their triggering logic.
Figure \ref{tokens-owned-per-address-logarithmic-low-scale} considers only those addresses which assumed ownership of at most 1500 NFTs between block $12,150,245$ (April 1, 2021) and block $13,296,011$ (September 25, 2021).
\begin{figure}
\resizebox{\textwidth}{!}{\includegraphics{tokens_owned_histogram_low_scale.png}}
\caption{Addresses with 1500 tokens or fewer by number of tokens owned}\label{tokens-owned-per-address-logarithmic-low-scale}
\end{figure}
Even this graph is better viewed on a lograthmic scale, as in Figure \ref{tokens-owned-per-address-logarithmic-log-low-scale}.
\begin{figure}
\resizebox{\textwidth}{!}{\includegraphics{tokens_owned_histogram_log_low_scale.png}}
\caption{Addresses with 1500 tokens or fewer by number of tokens owned (log scale)}\label{tokens-owned-per-address-logarithmic-log-low-scale}
\end{figure}
These statistics suggest one additional hypothesis - that the distribution of the number of NFTs per owner follows a Zipf distribution.
This hypothesis is clearly supported \cite{powers-zipf} by Figure \ref{zipf}, which plots the log-log graph of each number $n$ of tokens that an address could own relative to the number addresses owning that number of tokens.
\begin{figure}
\resizebox{\textwidth}{!}{\includegraphics{zipf.png}}
\caption{Log of number of tokens owned vs. log of number of addresses owning that number of tokens}\label{zipf}
\end{figure}
Table \ref{ownership-percentages} quantifies this relationship further. It shows, for example, that $83.29\%$ of the addresses which assumed ownership of an NFT between block $12,150,245$ (April 1, 2021) and block $13,296,011$ (September 25, 2021) did so for only a handful of tokens $n$, where $1 \leq n \leq 10$.
\begin{table}
\resizebox{\textwidth}{!}{\begin{tabular}{| c | c | c | c | c |}
\hline
\textbf{NFTs owned} $n$ & \textbf{Number of addresses} & \textbf{Proportion of addresses} & \textbf{Total tokens owned by addresses} & \textbf{Proportion of tokens owned by addresses} \\
\hline
$n \geq 1$ & $625,354$ & $1$ & $7,020,950$ & $1$ \\
\hline
$1 \leq n \geq 1000$ & $625,107$ & $0.9996$ & $6,112,780$ & $0.8707$ \\
\hline
$1 \leq n \geq 100$ & $615,658$ & $0.9845$ & $4,036,089$ & $0.5749$ \\
\hline
$1 \leq n \geq 10$ & $520,834$ & $0.8329$ & $1,335,177$ & $0.1902$ \\
\hline
$1 \leq n \geq 5$ & $456,399$ & $0.7298$ & $842,892$ & $0.1201$ \\
\hline
$1 \leq n \geq 2$ & $348,948$ & $0.558$ & $438,090$ & $0.0624$ \\
\hline
$n = 1$ & $259,806$ & $0.4155$ & $259,806$ & $0.037$ \\
\hline
\end{tabular}}
\caption{Number of addresses by NFTs owned}\label{ownership-percentages}
\end{table}
It is possible that there is a small number of people or organizations who are creating a distinct wallet for each NFT they purchase, but doing so at a scale that would our analysis would be technologically and operationally complex enough, and expensive enough, that it is virtually impossible.
What this data shows us is that the Ethereum NFT market is open in the sense the vast majority of its participants are small-time purchasers who likely make their purchases manually. There are few barriers to entry for those who wish to participate in this market.
There is also a great inequality in the Ethereum NFT market in the sense that the top $16.71\%$ of NFT owners control $80.98\%$ of the NFTs. This latter statistic does require a little more nuance in its interpretation, however, as many of those owners are marketplaces and clearinghouses like OpenSea \cite{opensea}, Nifty Gateway \cite{nifty}, and other platforms of the same ilk. We plan to expand on this analysis in a future report.
\section{The utility of an NFT}
People buy NFTs for different reasons. Some buyers may purchase an NFT to support their favorite artists or communities. Others may prefer to purchase NFTs that bring them extrinsic utility. A good example of this kind of utility is the Ethereum Name Service (ENS) \cite{ens}, which allows anyone to create a human-friendly name (such as vitalik.eth) associated with the Ethereum addresses (and more). The associations are represented as NFTs on the ENS registry contract, and many services (e.g. Coinbase, Metamask, etc.) support resolution of ENS names as part of transfers and other blockchain operations.
The Ethereum blockchain is simultaneously home to NFTs like the Ethereum Name Service and governance NFTs for various decentralized protocols, and NFTs that only have subjective value like those containing links to digital art and other forms of media.
How do we distinguish between NFTs that represent intrinsic, subjective value and those which have clear extrinsic utility?
This is a question that the blockchain community and blockchain regulators have been concerned with for several years. Until now, there has been no objective, quantitative measure of the level of extrinsic utility of a token.
We propose that the extrinsic utility of a token should be measured at the level of its NFT collection (smart contract). For tokens that do represent extrinsic utility for a large pool of users, other tokens deployed as part of the same contract are must also represent similar utility. Market forces demand this, as exaggerated scarcity diminishes utility \cite{menger}.
Let us consider a few different possible statistics that could act as measures of extrinsic utility for NFT contracts. We will start by considering statistics that perform poorly as measures of utility and address their faults to arrive at good candidates.
\subsection{Maximum token value}
We could attempt to use the maximum value of a token in an NFT contract as a measure of its utility. However, this statistic does not capture distributional information about other tokens in the same contract. We would not be able to use this statistic to understand if all the tokens in the same contract seemed to have similar utility. This makes it a poor statistic for the purposes of measuring external utility.
\subsection{Distribution of value over the tokens in a collection}
This statistic has an advantage over the maximum token value in that it encodes information about all the tokens in a collection.
It suffers from two problems:
\begin{enumerate}
\item{ It is not a scalar statistic. We would need to calculate several moments of the token value distribution over the collection in order to capture all the information it contains, and this could make it awkward to work with.}
\item{ It requires us to estimate the value of the tokens in a contract. The estimation of value from on-chain data is difficult because people are not required to exchange monetary value on the blockchain. It would be a simple matter for two parties to exchange money off-chain and then exchange their NFTs on chain.}
\end{enumerate}
This second problem is a practically insurmountable obstacle to the use of any statistic based on the distribution of values over the tokens in an NFT contract.
\subsection{Distribution of number of transfers over the tokens in a collection}
This statistic has an advantage over the previous statistics in that it doesn't require us to estimate the value of NFTs in a collection.
Like the previous candidate, this too is not a scalar statistic. It would require a great deal of care in analysis in interpretation to use this distribution of transfers to draw conclusions about the utility of an NFT contract from this distribution.
A more serious concern is that both the value of tokens and the number of times they are transferred is dependent on the particular form of their extrinsic utility. One can imagine use cases in which tokens derive utility through being transferred or through being volatile in value, and other use cases in which tokens derive utility through being held or through being stable in value.
Because the \emph{form} of utility could have such a drastic effect on this candidate and the previous one, neither is an ideal candidate for a measure of utility. Our measure of utility should be independent of the form of the utility. We cannot predict how people will derive utility from their NFTs in the future, but we would like to be aware of when they start deriving it.
\subsection{Distribution of ownership over the tokens in a collection}
Now we narrow in on an invariant. We discussed why it is sensible to associate extrinsic utility with an NFT contract - a full collection of NFTs - rather than with the individual tokens. Because extrinsic utility applies to large populations of users rather than to a small number of individuals, and excessive scarcity diminishes utility.
This means that, if an NFT collection has extrinsic utility, then it should have many distinct owners relative to its number of tokens.
Suppose that a few parties strike out to purchase most of the tokens. Then the tokens would gain monetary value, and would become good vehicles for investment. But this represents a gain in intrinsic utility and a reduction in extrinsic utility. So we see that the dynamics whereby extrinsic utility is traded off for intrinsic utility correspond to a an increased concentration of ownership among a few addresses as compared to a dispersion of ownership across many addresses.
The level of dispersion of ownership across the tokens in an NFT contract is invariant to the particular form that the external utility of the tokens takes. If the form involves many transfers, for example, it still doesn't significantly affect the dispersion at any single point in time.
This notion of dispersion of ownership is an invariant of the NFT contract under different forms of extrinsic utility which nonetheless captures how attractive NFTs in that contract are to the general Ethereum community.
The notion of information theoretic entropy formalizes this concept of dispersion. We propose a statistic called \emph{ownership entropy} as a measure of the external utility of the tokens in an NFT contract.
\subsection{Ownership entropy}
Let $\pi$ be a probability distribution on the sample space ${1, 2, \ldots, n}$ for $n \geq 1$. Denote $\pi = (\pi_1, \pi_2, \ldots, \pi_n)$, where $\pi_j$ is the probability associated with the event $j$.
Then the entropy of $\pi$ is defined as: $$H(\pi) = \sum_{j=1}^n -\pi_j \log(\pi_j).$$
(Here, $\log$ represents the logarithm for base 2, although the constant is not so important.)
The entropy $H(\pi)$ is maximized for the distribution $\pi$ which assigns equal probability to all its outcomes. In fact, from Jensen's inequality,
$$H(\pi_1, \ldots, \pi_n) \leq \log(n),$$
with the maximum achieved if and only if $\pi_1 = \pi_2 = \ldots = \pi_n = \frac{1}{n}$.
$H(\pi)$ is an information theoretic measurement of how well distributed the probability mass of $\pi$ is over its sample set, and is maximized when the probability mass is evenly distributed. The units of entropy are bits (as in binary digits).
This makes it a natural candidate to measure the dispersion of ownership over the tokens of an NFT contract.
For an NFT contract $C$, let $T$ denote the set of tokens (represented by their token IDs) present in $C$. For each token $t \in T$, let $A_t$ denote the address owning that token. It is possible for $A_t$ to be any Ethereum address, including the $0$ address.
We can think of $C$ as a probability distribution over its tokens whereby we select each token $t$ with probability $\frac{1}{|T|}$. This induces a probability distribution $\pi_C$ on the set $\mathcal{A}$ of all Ethereum addresses whereby, for any address $A \in \mathcal{A}$, the probability of $\pi_C$ selecting that address is: $$\pi_{C,A} = \frac{|{t \in T : A_t = A}|}{|T|}.$$
We define the ownership entropy of $C$ to be the entropy $H(\pi_C)$ of this probability distribution that $C$ induces on $\mathcal{A}$.
Figure \ref{ownership-entropy} plots the number of contracts on the Ethereum blockchain that saw mint or transfer activity between block number $12,150,245$ and $13,296,011$, by ownership entropy.
\begin{figure}
\resizebox{\textwidth}{!}{\includegraphics{ownership_entropy.original.png}}
\caption{NFT contracts by ownership entropy}\label{ownership-entropy}
\end{figure}
The tokens with highest ownership entropy are those that have clear utility, such as the
Ethereum Name Service \cite{ens} and the Uniswap v3 Position token \cite{uniswap-v3}. The tokens at the bottom are NFT releases of art collections which have failed to gain any traction.
Table \ref{tokens} shows a sample of token collections at varying levels of ownership entropy, with a link to each project as well as the number of tokens which were active in that collection between between block $12,150,245$ (April 1, 2021) and block $13,296,011$ (September 25, 2021), and the number of distinct addresses which currently\footnote{As of block $13,296,011$.} hold ownership of these tokens.
\begin{table}
\resizebox{\textwidth}{!}{\begin{tabular}{| c | c | c | c | c |}
\hline
\textbf{NFT Collection} & \textbf{Ownership entropy} & \textbf{Level} & \textbf{Tokens traded} & \textbf{Owners registered} \\
\hline
\href{https://ens.domains/}{Ethereum Name Service} & 13.864019 & High utility & 145,303 & 64,717 \\
\href{https://rarible.com/}{Rarible Governance Token} & 13.831032 & High utility & 86,375 & 42,644 \\
\href{https://uniswap.org/}{Uniswap v3 Position NFT} & 13.742724 & High utility & 132,087 & 38,790 \\
\href{https://www.producthunt.com/posts/enigma-project}{The Enigma Project} & 6.011324 & Highly speculative & 381 & 108 \\
\href{https://rtfkt.com/spacedrip}{The RTFKT Capsule Space Drip} & 6.010405 & Highly speculative & 112 & 78 \\
\href{https://twitter.com/DommiesNFT}{Dommies} & 6.009679 & Highly speculative & 1,034 & 236 \\
\href{https://coinclarity.com/dapp/worldcuptoken/}{World Cup Token} & 2.004886 & Stalled & 11 & 5 \\
\href{https://www.eminem.com/news/shadycon-x-nifty-gateway}{SHADYCON (associated with Eminem)} & 2.002227 & Stalled & 101 & 23 \\
\hline
\end{tabular}}
\caption{A sample of tokens at varying levels of ownership spread}\label{tokens}
\end{table}
The cases of Ethereum Name Service and the Uniswap position NFT are particularly interesting because holders realize the value of those NFTs in very different ways - an address is much more likely to hold onto an ENS token and much more likely to trade their liquidity position on Uniswap v3. Despite the differences between these two contracts in the form of their utility, they both rise to the top when we consider their ownership entropies.
We also see that, at the lower ranges, the ownership entropy serves to measure adoption for tokens which have no extrinsic utility (like the RTFKT Capsule Space Drop and Dommies versus World Cup Token).
This data also highlights the importance of considering ownership entropy as a time series, and of tracking the differences in ownership entropy over time to reflect changing market perceptions of NFTs. There is a big difference between World Cup Token, which launched years ago and SHADYCON which only launched recently. We can only draw a true comparison between these tokens after both have been allowed time to achieve stable dynamics on the blockchain.
\section{Conclusions}
Our analysis paints a picture of the Ethereum NFT market as an open and free market which exhibits the same kinds of wealth disparities as conventional markets.
It also provides early validation for the viability of ownership entropy as a means of quantifying the extrinsic utility of NFT contracts.
In future versions of this report, we plan to:
\begin{enumerate}
\item{Conduct analysis of the openness of the Ethereum market over time. Rather than only considering data in a single window of time (in the case of this report, April 1, 2021 to September 25, 2021), we will consider the time series of the same statistics generated at frequent intervals from 2016 until the time of publication of the report.}
\item{Expand the analysis of onwership entropy into an analysis of ownership information gain - the change in ownership entry over time.}
\item{Enrich our dataset and our analyses with information about the addresses which funded NFT transfers.}
\item{Enrich our dataset and our analyses with side information about NFT valuations from centralized sources (like the OpenSea API).}
\item{ Provide further validation for ownership entropy as a measure of extrinsic utility, not only of ERC721 tokens, but also of ERC20 tokens.}
\end{enumerate}
\section{Collaboration}
The calculations presented here, as well as a more elaborate analysis, are available as a Kaggle notebook at:\\
\href{https://www.kaggle.com/simiotic/ethereum-nft-analysis}{https://www.kaggle.com/simiotic/ethereum-nft-analysis}\\
\noindent The Moonstream platform, which we used to gather this data, is free software released under the Apache 2.0 License \cite{apache-2}:\\
\href{https://github.com/bugout-dev/moonstream}{https://github.com/bugout-dev/moonstream}\\
\noindent The Ethereum NFTs dataset is available on Kaggle under a Creative Commons license (CC-BY-4.0) \cite{cc-by-4}:\\
\href{https://www.kaggle.com/simiotic/ethereum-nfts}{https://www.kaggle.com/simiotic/ethereum-nfts}\\
\noindent You can reach the Moonstream team at any time on Discord:\\
\href{https://discord.gg/K56VNUQGvA}{https://discord.gg/K56VNUQGvA}
\begin{thebibliography}{2}
\bibitem{nft-definition} Wikipedia.org. \href{https://en.wikipedia.org/wiki/Non-fungible_token}{Non-fungible token.}
\bibitem{reuters-nft-surge} Elizabeth Howcroft. \href{https://www.reuters.com/technology/nft-sales-surge-speculators-pile-sceptics-see-bubble-2021-08-25/}{NFT sales surge as speculators pile in, sceptics see bubble.} Reuters. August 25, 2021.
\bibitem{ethereum-nfts} \href{https://www.kaggle.com/simiotic/ethereum-nfts}{Sophia Arakelyan, Andrey Dolgolev, Neeraj Kashyap, Nana Landau, Daria Navoloshnikova, Tim Pechersky, Yhtyyar Sahatov, Sergei Sumarokov. \href{https://www.kaggle.com/simiotic/ethereum-nfts}{Ethereum NFTs}. Kaggle, 2021. (doi:10.34740/KAGGLE/DSV/2698517)}
\bibitem{ethereum-smart-contracts} Ethereum.org. \href{https://ethereum.org/en/developers/docs/smart-contracts/}{Introduction to Smart Contracts.}
\bibitem{cryptopunks} \href{https://www.larvalabs.com/cryptopunks}{CryptoPunks. https://www.larvalabs.com/cryptopunks}
\bibitem{cryptokitties} \href{https://www.cryptokitties.co/}{CryptoKitties. https://www.cryptokitties.co}
\bibitem{opensea} \href{https://opensea.io/}{OpenSea. https://opensea.io}
\bibitem{nifty} \href{https://niftygateway.com/}{Nifty Gateway. https://niftygateway.com}
\bibitem{erc721} William Entriken, Dieter Shirley, Jacob Evans, Nastassia Sachs. \href{https://github.com/ethereum/eips/issues/721}{EIP-721: A Non-Fungible Token Standard.} January 24, 2018.
\bibitem{moonstream} \href{https://moonstream.to}{Moonstream. https://moonstream.to}
\bibitem{loot-middleware} \href{https://etherscan.io/tx/0xa578aac6db19c464f69492747fa147985006281a57d77e46316fe09fb406deb2}{Transaction $0xa578aac6db19c464f69492747fa147985006281a57d77e46316fe09fb406deb2$. Etherscan.io}
\bibitem{loot} \href{https://www.lootproject.com/}{Loot Project. https://www.lootproject.com/}
\bibitem{powers-zipf} David M W Powers. \href{https://aclanthology.org/W98-1218/}{Applications and explanations of Zipf's law.} Joint conference on new methods in language processing and computational natural language learning, 1998. Association for Computing Linguistics. pp. 151-160.
\bibitem{ens} \href{https://ens.domains/}{Ethereum Name Service. https://ens.domains}
\bibitem{menger} \href{https://www.jstor.org/stable/2956146}{Karl Menger. On the Origin of Money. The Economic Journal Vol. 2, No. 6, June 1892.}
\bibitem{uniswap-v3} \href{https://uniswap.org/blog/uniswap-v3/}{The Uniswap Team. Introducing Uniswap V3}
\bibitem{apache-2} \href{https://www.apache.org/licenses/LICENSE-2.0}{Apache License, Version 2.0}
\bibitem{cc-by-4} \href{https://creativecommons.org/licenses/by/4.0/}{Creative Commons Attribution 4.0 International (CC BY 4.0).}
\end{thebibliography}
\end{document} | {
"alphanum_fraction": 0.7824471865,
"avg_line_length": 77.0099502488,
"ext": "tex",
"hexsha": "808c03aa136b77f2503554e0fa5e7751ad2bfc50",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5d29ce4c8c3b707f7b1b318264222d2a875f009e",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "atbe/moonstream",
"max_forks_repo_path": "datasets/nfts/papers/ethereum-nfts.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5d29ce4c8c3b707f7b1b318264222d2a875f009e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "atbe/moonstream",
"max_issues_repo_path": "datasets/nfts/papers/ethereum-nfts.tex",
"max_line_length": 608,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5d29ce4c8c3b707f7b1b318264222d2a875f009e",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "atbe/moonstream",
"max_stars_repo_path": "datasets/nfts/papers/ethereum-nfts.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7575,
"size": 30958
} |
\documentclass[12pt]{article}
\usepackage{pylatex}
\usepackage{mpllatex}
\usepackage{geometry}
\usepackage{pgf}
\usepackage{amsmath}
\usepackage{listings}
\usepackage{hyperref}
\usepackage{breqn}
\usepackage{caption}
\usepackage{examples}
\geometry{papersize={210mm,297mm},hmargin=2cm,tmargin=1.0cm,bmargin=1.5cm}
\def\pyLaTeX{{\tt\small pyLaTeX}}
\def\mplLaTeX{{\tt\small mplLaTeX}}
\begin{document}
\section*{Using output from other sources}
This document performs no computations (i.e., it has no active code blocks) but instead uses selected parts of the output created by other documents. Thus this document can be compiled using {\tt\small pdflatex summary}. The basic structure of this document is as follows.
\begin{minipage}[t]{0.75\textwidth}
\begin{latex}
\documentclass[12pt]{article}
\usepackage{pylatex} % so that we can use \py{foo}
\usepackage{mpllatex} % so that we can use \mpl{bah}
\usepackage{amsmath}
... % other packages such as geometry, hyperref, breqn etc.
\begin{document}
...
\input{example-01.pytex} % all Python output from example-01.tex
...
\begin{align*}
&\py*{ans.301}\\ % the Python output
&\py*{ans.302}
\end{align*}
...
\input{mixed.pytex} % all Python output from mixed.tex
\input{mixed.mpltex} % all Maple output from mixed.tex
...
\begin{gather*}
\mpl{ans.102} % the Maple output
\py{ans.102} % the Python output
\end{gather*}
...
\end{document}
\end{latex}
\end{minipage}
\vspace{10pt}
Note that care must be taken to avoid name clashes across tags from different sources. If two or more sources define tags with the same name (e.g., {\tt\small foo.pytex} and {\tt\small bah.pytex} both define {\tt\small\verb|\pytag{ans.101}|}) then the last definition will be used. This problem does not arise when the shared tag name, e.g., {\tt\small rhs.101}, occurs in two different languages, such as one in Python and the other in Maple. This can be seen below where the first and last examples both refer to the name {\tt\small ans.102}.
\vspace{10pt}
Note also that the lines
\vspace{5pt}
\begin{latex}
\usepackage{pylatex} % so that we can use \py{foo}
\usepackage{mpllatex} % so that we can use \mpl{bah}
\end{latex}
\vspace{5pt}
are not essential -- they can be left out but only if the {\tt\small -I} option was supplied when compiling the source. For example, if the {\tt\small pylatex.sty} and {\tt\small mpllatex.sty} files were located in {\tt\small /users/foo/tex/} then the file {\tt\small bah.tex} (containing both Python and Maple code) could be compiled using
\vspace{5pt}
\begin{lstlisting}
pylatex.sh -xi bah -I/users/foo/tex/pylatex.sty
mpllatex.sh -xi bah -I/users/foo/tex/mpllatex.sty
\end{lstlisting}
\vspace{10pt}
This will produce {\tt\small bah.pytex} and {\tt\small bah.mpltex} each containing not only the selected output from {\tt\small bah.tex} but also the definitions of the \pyLaTeX\ and \mplLaTeX\ macros. The {\tt\small -x} option excludes processing of the output by LaTeX.
\section*{Example 1}
\input{example-01.pytex}
\begin{align*}
&\py*{ans.102}\\
&\py*{ans.302}\\
&\py*{ans.303}\\
&\py*{ans.305}\\
&\py*{ans.401}\\
&\py*{ans.402}\\
&\py*{ans.403}\\
&\py*{ans.404}
\end{align*}
\section*{Example 2}
\input{example-02.pytex}
\begin{align*}
\py {CalculusTable}
\end{align*}
\section*{Example 3}
\input{example-03.pytex}
\begin{align*}
\py{lhs.01} &= \py{rhs.01}\\
&= \py{rhs.02}\\
&= \py{rhs.03}\\
&= \py{rhs.04}\\
&\approx \py{rhs.05}
\end{align*}
\clearpage
\section*{Example 4}
\begin{minipage}{\textwidth}
\centering
\IfFileExists{example-04-fig.pdf}%
% {\scalebox{0.75}{\input{example-04-fig.pdf}}}{Failed to create plot.}
{\includegraphics[width=6.4in]{example-04-fig.pdf}}{Failed to create plot.}
\captionof{figure}{The first six Bessel functions.}
\end{minipage}
\section*{Example 5}
\input{example-05.pytex}
\begin{dgroup*}[spread={5pt}]
\begin{dmath*} f(x) = \Py*{ans.511} \end{dmath*}
\begin{dmath*} {}= \Py*{ans.512} \end{dmath*}
\begin{dmath*} {}= \Py*{ans.513} \end{dmath*}
\begin{dmath*} {}= \Py*{ans.514} \end{dmath*}
\begin{dmath*} {}= \Py*{ans.514} \end{dmath*}% LCB: do we need extar space for the tag?
\end{dgroup*}
\section*{Example 6}
\input{example-06.pytex}
\def\eps{\epsilon}
\def\RuleA{\vrule depth0pt width0pt height14pt}
\def\RuleB{\vrule depth8pt width0pt height14pt}
\def\RuleC{\vrule depth10pt width0pt height16pt}
\setlength{\tabcolsep}{0.025\textwidth}%
\vspace{20pt}
\begin{center}
\begin{tabular}{cccc}%
\noalign{\hrule height 1pt}
\multicolumn{4}{c}{\RuleC\rmfamily\bfseries%
Newton-Raphson iterations \quad%
$x_{n+1} = x_n - f_n/f'_n\ ,\quad f(x) = x-e^{-x}$}\\
\noalign{\hrule height 1pt}
\RuleB$ n$&$ x_n$&$ \eps_{n} = x_{n} - e^{-x_{n}}$&$\eps_{n}/\eps_{n-1}^2$\\
\noalign{\hrule height 0.5pt}
\py{table}
\noalign{\hrule height 1pt}
\end{tabular}
\end{center}
\section*{Example 7}
\input{example-07.pytex}
\bgroup\tt
\begin{tabular}{rl}
\py{info}
\end{tabular}
\egroup
\clearpage
\section*{Python and Maple}
\input{mixed.pytex}
\input{mixed.mpltex}
\subsection*{The Maple output}
The general solution of the differential equation is
\begin{equation*}
\Mpl{ans.102}
\end{equation*}
while the particular solution satifying the boundary conditions is given by
\vspace{5pt}
\begin{align*}
y(x) &= \mpl{ans.104}\\
\end{align*}
\subsection*{The Python output}
\begin{minipage}{\textwidth}
\centering
\IfFileExists{mixed-fig.pdf}%
{\includegraphics[width=0.75\textwidth]{mixed-fig.pdf}}{Failed to create pdf plot.}
\captionof{figure}{The function and its derivative.}
\end{minipage}
\end{document}
| {
"alphanum_fraction": 0.6681303834,
"avg_line_length": 28.6038647343,
"ext": "tex",
"hexsha": "af2b1891af83ce11d7fc1b0a9bd472c3b30396fe",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-03-30T17:17:18.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-06-27T03:29:40.000Z",
"max_forks_repo_head_hexsha": "2debaf3f97eb551928d08dc4baded7ef7a4ab29a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "leo-brewin/hybrid-latex",
"max_forks_repo_path": "python/examples/summary.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2debaf3f97eb551928d08dc4baded7ef7a4ab29a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "leo-brewin/hybrid-latex",
"max_issues_repo_path": "python/examples/summary.tex",
"max_line_length": 544,
"max_stars_count": 16,
"max_stars_repo_head_hexsha": "2debaf3f97eb551928d08dc4baded7ef7a4ab29a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "leo-brewin/hybrid-latex",
"max_stars_repo_path": "python/examples/summary.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T23:16:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-10-12T06:31:49.000Z",
"num_tokens": 2002,
"size": 5921
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.