Search is not available for this dataset
text
string | meta
dict |
---|---|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% LaTeX book template %%
%% Author: Amber Jain (http://amberj.devio.us/) %%
%% License: ISC license %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[a4paper,11pt]{book}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{lmodern}
\usepackage{subcaption}
\usepackage[normalem]{ulem}
\usepackage{enumitem,kantlipsum}
\usepackage{subfiles} % Best loaded last in the preamble
%%%%%%%%%%%question block
\usepackage[tikz]{bclogo}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Source: http://en.wikibooks.org/wiki/LaTeX/Hyperlinks %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{hyperref}
\usepackage{graphicx}
\usepackage[english]{babel}
\usepackage{graphicx,amssymb,amstext,amsmath}
% \usepackage{tikz}
\usepackage{cancel}
\usepackage{mathtools}
\DeclarePairedDelimiter\ceil{\lceil}{\rceil}
\DeclarePairedDelimiter\floor{\lfloor}{\rfloor}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% 'dedication' environment: To add a dedication paragraph at the start of book %
% Source: http://www.tug.org/pipermail/texhax/2010-June/015184.html %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newenvironment{dedication}
{
\cleardoublepage
\thispagestyle{empty}
\vspace*{\stretch{1}}
\hfill\begin{minipage}[t]{0.66\textwidth}
\raggedright
}
{
\end{minipage}
\vspace*{\stretch{3}}
\clearpage
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% The setting for the exercise part: we can write the problems and the solutions at the same place, but can be displayed in the pdf at another place %
% Source: https://tex.stackexchange.com/questions/369265/math-book-how-to-write-exercise-and-answers %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{multicol}
\usepackage{multirow}
\usepackage{ifthen}
\newboolean{firstanswerofthechapter}
\usepackage[pdf]{graphviz} %needed package with pdf option
\usepackage{xcolor}
\colorlet{lightcyan}{cyan!40!white}
\usepackage{chngcntr}
\usepackage{stackengine}
\usepackage{tasks}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% The setting for the chapter style %
% Source: https://texblog.org/2012/07/03/fancy-latex-chapter-styles/ %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage[Sonny]{fncychap}
% \usepackage{titlesec}
%%%%%%%%%%%%%
%%%%%%%%%%%%%%%
% use underline in the lstlisting%
%%%%%%%%%%%%%%
%%%%%%%%%%%
\usepackage{upquote}
% \titleformat
% {\chapter} % command
% [display] % shape
% {\bfseries\Large}%\itshape} % format
% {Chapter No. \ \thechapter} % label
% {0.5ex} % sep
% {
% \rule{\textwidth}{1pt}
% \vspace{1ex}
% \centering
% } % before-code
% [
% \vspace{-0.5ex}%
% \rule{\textwidth}{0.3pt}
% ] % after-code
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% The setting for the examples style %
% Source: https://tex.stackexchange.com/questions/295589/how-to-enumerate-a-problem-set-in-a-book-accordingly-with-the-chapter-number %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{enumitem}
\newlist{examples}{enumerate}{1}
\setlist[examples]{label={\thechapter.\arabic*}}
% \BeforeBeginEnvironment{example}{\vspace{\baselineskip}}
% \AfterEndEnvironment{example}{\vspace{\baselineskip}}
% \BeforeBeginEnvironment{sourcecode}{\vspace{\baselineskip}}
% \AfterEndEnvironment{sourcecode}{\vspace{\baselineskip}}
\newlength{\longestlabel}
\settowidth{\longestlabel}{\bfseries viii.}
\settasks{counter-format={tsk[r].}, label-format={\bfseries}, label-width=\longestlabel,
item-indent=0pt, label-offset=2pt, column-sep={10pt}}
\usepackage[lastexercise,answerdelayed]{exercise}
\counterwithin{Exercise}{chapter}
\counterwithin{Answer}{chapter}
\renewcounter{Exercise}[chapter]
\newcommand{\QuestionNB}{\bfseries\arabic{Question}.\ }
\renewcommand{\ExerciseName}{EXERCISES}
\renewcommand{\ExerciseHeader}{\noindent\def\stackalignment{l}% code from https://tex.stackexchange.com/a/195118/101651
\stackunder[0pt]{\colorbox{cyan}{\textcolor{white}{\textbf{\LARGE\ExerciseHeaderNB\;\large\ExerciseName}}}}{\textcolor{lightcyan}{\rule{\linewidth}{2pt}}}\medskip}
\renewcommand{\AnswerName}{Exercises}
\renewcommand{\AnswerHeader}{\ifthenelse{\boolean{firstanswerofthechapter}}%
{\bigskip\noindent\textcolor{cyan}{\textbf{CHAPTER \thechapter}}\newline\newline%
\noindent\bfseries\emph{\textcolor{cyan}{\AnswerName\ \ExerciseHeaderNB, page %
\pageref{\AnswerRef}}}\smallskip}
{\noindent\bfseries\emph{\textcolor{cyan}{\AnswerName\ \ExerciseHeaderNB, page \pageref{\AnswerRef}}}\smallskip}}
\setlength{\QuestionIndent}{16pt}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% design the code listing
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{listings}
\usepackage{float}
\usepackage{color}
\definecolor{codegreen}{rgb}{0,0.6,0}
\definecolor{codegray}{rgb}{0.5,0.5,0.5}
\definecolor{codepurple}{rgb}{0.58,0,0.82}
\definecolor{backcolour}{rgb}{0.95,0.95,0.92}
\lstdefinestyle{mystyle}{
backgroundcolor=\color{backcolour},
commentstyle=\color{codegreen},
keywordstyle=\color{magenta},
numberstyle=\tiny\color{codegray},
stringstyle=\color{codepurple},
basicstyle=\footnotesize,
breakatwhitespace=false,
breaklines=true,
captionpos=b,
keepspaces=true,
numbers=left,
numbersep=5pt,
showspaces=false,
showstringspaces=false,
showtabs=false,
tabsize=2
}
\lstset{style=mystyle}
%%%%%%%%%%%%%%%theorem, corollary and lemma
\newtheorem{theorem}{}[section]
\newtheorem{corollary}{Corollary}[theorem]
\newtheorem{lemma}[theorem]{Lemma}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% package enumberate with different style %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{enumitem} %[label=(\alph*)], [label=(\Alph*)], [label=(\roman*)]
\usepackage{titlesec}
\usepackage[utf8]{inputenc}
%\setcounter{secnumdepth}{3} %subsubsection and paragraph
\newlist{inparaenum}{enumerate}{2}% allow two levels of nesting in an enumerate-like environment
\setlist[inparaenum]{nosep}% compact spacing for all nesting levels
\setlist[inparaenum,1]{label=\bfseries\arabic*.}% labels for top level
\setlist[inparaenum,2]{label=\arabic{inparaenumi}\emph{\alph*})}% labels for second level
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% better align the equation %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{notes}
\usepackage{amsmath}
\usepackage{subfiles}
\usepackage{subcaption}
\usepackage{dramatist}
% \usepackage{blindtext}
\setcounter{chapter}{-1}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Chapter quote at the start of chapter %
% Source: http://tex.stackexchange.com/a/53380 %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\makeatletter
\renewcommand{\@chapapp}{}% Not necessary...
\newenvironment{chapquote}[2][2em]
{\setlength{\@tempdima}{#1}%
\def\chapquote@author{#2}%
\parshape 1 \@tempdima \dimexpr\textwidth-2\@tempdima\relax%
\itshape}
{\par\normalfont\hfill--\ \chapquote@author\hspace*{\@tempdima}\par\bigskip}
\makeatother
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% First page of book which contains 'stuff' like: %
% - Book title, subtitle %
% - Book author name %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Book's title and subtitle
% \title{\Huge \textbf{Comprehensive Handbook for the Coding Interview } \footnote{This is a footnote.} \\ \huge Cracking LeetCode Problems Using Python \footnote{This is yet another footnote.}}
\title{\Huge \textbf{The Comprehensive Coding Interview Guide} \\ \huge Learning Data Structures and Algorithms with LeetCode }
\title{\Huge \textbf{The Comprehensive Manual to Algorithms in Python} \\ \huge And a Coding Interview Guidebook that Refers to LeetCode Questions}
\title{\Huge \textbf{RAPID Manual to Coding Interviews} \\ \huge Master \textbf{D}ata Structures, \textbf{A}lgorithms, \textbf{P}roblem-patterns with \textbf{R}ational Explanation and \textbf{I}nteractive Python Code}
\title{\Huge \textbf{The Big Coding Interview Book} \\ \huge Mastering Data Structures, Algorithms, Problem-patterns and Python along Cracking the Coding Interview }
\title{\Huge \textbf{Preparing for the Real-world Software Engineering} \\ \huge A Fun Ride to Master Data Structures, Algorithms, Problem-patterns and Python along Cracking the Coding Interview }
\title{\Huge \textbf{One Plus One Equals Four} \\ \huge Creates Passion and Confidence from Mastering Algorithmic Problem Solving and Problem Patterns of Real Interview Questions }
\title{\Huge \textbf{One Plus Two} \\ \huge Algorithmic Problem Solving, Python Modules, and Interview Problem Patterns }
\title{\Huge \textbf{Algorithmic Problem Solving Plus Two} \\ \huge Python Modules and Interview Problem Patterns }
\title{\Huge \textbf{Hands-on Algorithmic Problem Solving} \\ \huge Data Structures, Algorithms, Python Modules and Coding Interview Problem Patterns }
% \title{\Huge \textbf{The Book of Software Engineers} \\ \huge Mastering Data Structures, Algorithms, Problem-patterns and Python along Cracking the Coding Interview }
% \title{\Huge \textbf{The Comprehensive Coding Interview Guide} \footnote{This is a footnote.} \\ \huge Cracking LeetCode Problems Using Python \footnote{This is yet another footnote.}}
% \title{\Huge \textbf{Notebook of Data Structures and Algorithms for Coding Interview } \footnote{This is a footnote.} \\ \huge Cracking LeetCode Problems Using Python \footnote{This is yet another footnote.}}
% Author
\author{\textsc{Li Yin}\thanks{\url{https://liyinscience.com}}}
\begin{document}
\frontmatter
\maketitle
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Add a dedication paragraph to dedicate your book to someone %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% \begin{dedication}
% To my loving mom, who taught me division with her three-years-long education. \\
% To my dear father, for his everlasting support. \\
% To my lovely dog, Apple, who supported me by giving me lots of kisses. \\
% To myself, cause writing of this book is a hell load of work.
% %Dedicated to my parents, my friend Yao Zhang, and mostly to myself.
% \end{dedication}
%\usepackage{float} % make images stay where they are
%%%%% get quote%%%%%%%%%%
% \makeatletter
% \renewcommand{\@chapapp}{}% Not necessary...
% \newenvironment{chapquote}[2][2em]
% {\setlength{\@tempdima}{#1}%
% \def\chapquote@author{#2}%
% \parshape 1 \@tempdima \dimexpr\textwidth-2\@tempdima\relax%
% \itshape}
% {\par\normalfont\hfill--\ \chapquote@author\hspace*{\@tempdima}\par\bigskip}
% \makeatother
%\usepackage{amsmath}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Auto-generated table of contents, list of figures and list of tables %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\tableofcontents
\listoffigures
\listoftables
\mainmatter
%%%%%%%%%%%%%%%%
% Preface %
%%%%%%%%%%%%%%%%
\chapter{Preface}
\subfile{chapters/preface}
\subfile{chapters/chapter_reading_of_this_book}
%%%%%%%%%%%%%%%%
% Part I: Introduction %
%%%%%%%%%%%%%%%%
\part{Introduction}
\label{part_introduction}
%chapter 1
% chapter 2
\chapter{The Global Picture of Algorithmic Problem Solving}
\subfile{chapters/chapter_2_introduction_algo}
\label{chapter_introduction_algorithm}
\chapter{Coding Interviews and Resources}
\subfile{chapters/chapter_1_introductionCodingInterview}
\label{part_introduction_coding_interview}
%%%%%%%%%%%%%%%%
% Part Two: Warm-up %
%%%%%%%%%%%%%%%%
\part{Warm Up: Abstract Data Structures and Tools}
\label{part_abstract_data_structure_and_tools}
We warm up our ``algorithmic problem solving''-themed journey with knowing the abstract data structures--representing data, fundamental problem solving strategies--searching and combinatorics, and math tools--recurrence relations and useful math functions, which we decide to dedicate a standalone chapter to it due to its important role both in algorithm design and analysis, as we shall see in the following chapters.
%%%%%%%%%Combinatoris and usefull function%%%%%%%%%%%%
% chapter 5 Complexity analysis
\subfile{chapters/chapter_abstract_data_strctures}
\label{chapter_abstract_data_strctures}
\subfile{chapters/chapter_combinatorics}
\label{chapter_combinatorics}
\subfile{chapters/chapter_recurrence_relation}
\label{chapter_recurrence_relation}
%%%%%%%%%%%%%%%%
% Part Three: Warm-up %
%%%%%%%%%%%%%%%%
\part{Get Started: Programming and Python Data Structures}
\label{part_program_and_python}
After the warm up, we prepare ourselves with hands-on skills--basic programming with Python 3, including two function type--iteration and recursion, and connecting dots between the abstract data structures with Python 3 built-in data types and commonly used modules. \includegraphics[width=0.3\columnwidth]{fig/bst_duplicate_counter.png}
\includegraphics[width=0.3\columnwidth]{fig/bst_duplicate_counter.png}
Python is object-oriented programming language and its underlying implementation is \texttt{C++}, which has a good mapping with the abstract data structures we discussed. Learn how to use Python data type can be learned from the official Python tutorial: \url{https://docs.python.org/3/tutorial/}. However, in order to grasp the efficiency of data structures needs us to examine its C++ source code (\url{https://github.com/python/cpython}) that relates easily to abstract data structures.
\subfile{chapters/chapter_3_iteration_recursion}
\label{chapter_iteration_recursion}
\subfile{chapters/chapter_15_bit-manipulation}
\label{chapter_bit}
\subfile{chapters/chapter_code_data_structure}
\label{chapter_code_data_strucutres}
% \subfile{chapters/chapter_code_graph_and_tree}
% \label{chapter_code_graph_and_tree}
% \subfile{chapters/chapter_searching_strategies}
% \label{chapter_searching_strategies}
%%%%%%%%%%%%%%%%
% Part Two: Principle %
%%%%%%%%%%%%%%%%
\part{Core Principle: Algorithm Design and Analysis}
\label{part_core_principles}
This part embodies the principle of algorithm design and analysis techniques--the central part of this book.
Before we start, I wanna emphasize that \textbf{tree} and \textbf{graph} data structure, especially tree, is a great visualization tool to assist us with algorithm design and analysis. Tree is a recursive structure, it can almost used to visualize any recursive based algorithm design or even computing the complexity in which case it is specifically called \textit{recursion tree}.
The next three chapters we introduce the principle of algorithm analysis(chapter~\ref{chapter_algorithm_analysis}) and fundamental algorithm design principle--Divide and conquer(Chapter.~\ref{chapter_divide_conquer}) and Reduce and conquer(Chapter.~\ref{chapter_decrease_and_conquer}). In Algorithm Analysis, we familiarize ourselves with common concepts and techniques to analyze the performance of algorithms -- running time and space complexity. Divide and conquer is a widely used principle in algorithm design, in our book, we dedicate a whole chapter to its sibling design principle --reduce and conquer, which is essentially a superset of optimization design principle--dynamic programming and greedy algorithm--that is further detailed in Chapter.~\ref{chapter_dynamic-programming} and Chapter.~\ref{chapter_greedy}.
\subfile{chapters/chapter_5_algorithm_analysis}
\label{chapter_complexity_analysis}
\subfile{chapters/chapter_searching_strategies}
\label{chapter_searching_strategies}
\subfile{chapters/chapter_combinatorial_search}
\label{chapter_search_problem_combinatorics}
%chapter 4 divide and conquer
\subfile{chapters/chapter_reduce_and_conquer}
\label{chapter_reduce_and_conquer}
% \subfile{chapter/chapter_searching_strategies}
% \label{chapter_searching_strategies}
\subfile{chapters/chapter_decrease_and_conquer}
\label{chapter_decrease_and_conquer}
\chapter{Sorting and Selection Algorithms}
\label{chapter_sorting}
\subfile{chapters/chapter_14_sorting}
\label{chapter_dynamic-programming}
\subfile{chapters/chapter_12_dynamicprogramming}
% chapter 13
\chapter{Greedy Algorithms}
\subfile{chapters/chapter_13_greedy_algo}
\label{chapter_greedy}
\subfile{chapters/chapter_hands_on_problem_solving}
\label{chapter_hands_on_problem_solving}
%%%%%%%%%%%%%%%%
% Part 4: Advanced Search %
%%%%%%%%%%%%%%%%
\part{Classical Algorithms}
\label{part_classical_algorithms}
In this part, we focus on application through solving a few families of classical real-problems, ranging from advanced search algorithms on linear data structures, advanced graph algorithms, to typical string pattern matching. By studying and analyzing each problem's representative algorithm whereby the fundamental algorithm design and analysis principles are leveraged, we further enforce our skills to algorithmic problem solving.
% \label{part_complete_searching}
% \subfile{chapters/part_complete_search_introduction}
\chapter{Advanced Search on Linear Data Structures}
\subfile{chapters/chapter_advanced_linear_search}
\label{chapter_advanced_linear_search}
\chapter{Advanced Graph Algorithms}
\label{chapter_advanced_searching}
\subfile{chapters/chapter_advanced_graph_algorithm}
\chapter{Advanced Data Structures}
\label{chapter_advanced_data_structures}
\subfile{chapters/chapter_advanced_data_structures}
\chapter{String Pattern Matching Algorithms}
\label{topic_string_processing}
\subfile{chapters/question_2_string_matching}
% \part{Heuristic Search}
% \chapter{Heuristic Search}
% \label{chapter_heuristic_search}
% \subfile{chapters/chapter_heuristic_search}
%%%%%%%%%%%%%%%%
% Part 6: Math abd bit Manipulation %
%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%% Sorting
% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% \part{Combinatorics}
% \label{part_combinatorial_problems}
% % chapter 14
% \part{Solutions for Exercises}
% \subfile{chapters/solutions}
% \label{part_solutions}
% \part{Advanced Algorithms and Data Structures}
% \label{part_advanced_topics}
% In this part, we would include more detailed topics such as advanced graph algorithms, string process, dynamic programming, backtracking.
% chapter tree
% \chapter{Advanced Tree Algorithms}
% \label{chapter_advanced_tree_algorithms}
% \subfile{chapters/chapter_12_tree_algorithm}
% chapter 11
% % chapter 15
% \chapter{Bit Manipulation}
% \label{chapter_bit}
% \subfile{chapters/chapter_15_bit-manipulation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%% Math and Probability Problems
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\part{Math and Geometry}
\label{part_math_bit_manipulation}
% chapter 15
% chapter 16
\chapter{Math and Probability Problems}
\subfile{chapters/chapter_16_math}
\label{chapter_math_probability}
\part{Problem-Patterns}
\label{part_question}
\chapter{Array Questions(15\%)}
\subfile{chapters/question_3_array_question}
\label{array_problem}
\chapter{Linked List, Stack, Queue, and Heap Questions (12\%)} %(44+34+9+31)
\label{other_linear_datastrcutre_problem}
\subfile{chapters/question_4_linked_list_question}
\chapter{String Questions (15\%)}
\label{chapter_string_problem}
\subfile{chapters/question_5_pattern-matching}
\chapter{Tree Questions(10\%)}
\label{chapter_tree_problem}
\subfile{chapters/chapter_13_tree_algorithm}
\chapter{Graph Questions (15\%)}
\label{chapter_graph_problem}
\subfile{chapters/question_7_specific_algorithms_for_graph}
% chapter 1
\chapter{Dynamic Programming Questions (15\%)}
\subfile{chapters/question_1_dynamic_programming}
\label{dp_problem}
\part{Appendix}
\chapter{Cool Python Guide}
\subfile{chapters/chapter_introduction_to_python}
\label{appendix_python}
% \suffile{chapters/chapter_introduction_to_python}
% \label{chapter_introduction_to_python}
\nocite{*}
\bibliographystyle{IEEEbib}
{\bibliography{refer}}
\end{document}
| {
"alphanum_fraction": 0.6727951738,
"avg_line_length": 37.9056261343,
"ext": "tex",
"hexsha": "96e4f22a99ad7e6904b32f3336545fdf1fa94240",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "131199fea0b082d92c0f272a495c7a56a3242b71",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "stungkit/Algorithms-and-Coding-Interviews",
"max_forks_repo_path": "Easy-Book/main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "131199fea0b082d92c0f272a495c7a56a3242b71",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "stungkit/Algorithms-and-Coding-Interviews",
"max_issues_repo_path": "Easy-Book/main.tex",
"max_line_length": 825,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "131199fea0b082d92c0f272a495c7a56a3242b71",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "stungkit/Algorithms-and-Coding-Interviews",
"max_stars_repo_path": "Easy-Book/main.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5015,
"size": 20886
} |
\documentclass[aspectratio=169]{beamer}
\usetheme{metropolis} % Use metropolis theme
\usepackage[utf8]{inputenc}
\usepackage{graphicx}
\usepackage{eso-pic}
\usepackage{graphics}
\usepackage{tikz}
\usepackage[export]{adjustbox}
\usepackage{multicol}
\usepackage{listings}
\usepackage{helvet}
\usepackage{booktabs}
\usepackage{threeparttable}
\usepackage{marvosym}
\usepackage{hyperref}
\usepackage{soul} % For strike-through
\usepackage{tcolorbox} % For color box
\title{Introduction to Python\\for Stata Users}
\date{}
\author{Luis Eduardo San Martin} % Name of author(s) of session here
\institute{Development Impact Evaluation (DIME) \newline The World Bank }
\setbeamercolor{background canvas}{bg=white} % Sets background color
% The below command places the World Bank logo and DIME logo to the right corner
\titlegraphic{%
\begin{picture}(0,0)
\put(330,-180){\makebox(0,0)[rt]{\includegraphics[width=3cm]{img/WB_logo}}}
\end{picture}%
\begin{picture}(0,0)
\put(390,-180){\makebox(0,0)[rt]{\includegraphics[width=1.5cm]{img/i2i}}}
\end{picture}%
}
%%% Section page with picture of Light bulb
\makeatletter
\defbeamertemplate*{section page}{mytheme}[1][]{
\centering
\begin{minipage}{22em}
\raggedright
\usebeamercolor[fg]{section title}
\usebeamerfont{section title}
\par
\ifx\insertsubsectionhead\@empty\else%
\usebeamercolor[fg]{subsection title}%
\usebeamerfont{subsection title}%
\fi
\ifstrempty{#1}{}{%
\includegraphics[width=100mm, height=60mm]{#1}%
}
\\
\insertsectionhead\\[-1ex]
\insertsubsectionhead
\usebeamertemplate*{progress bar in section page}
\end{minipage}
\par
\vspace{\baselineskip}
}
\makeatother
%%% Define a command to include picture in section,
%%% make section, and revert to old template
\newcommand{\sectionpic}[2]{
\setbeamertemplate{section page}[mytheme][#2]
\section{#1}
\setbeamertemplate{section page}[mytheme]
}
%%% The command below allows for the text that contains Stata code
\lstset{ %
backgroundcolor=\color{white},
basicstyle=\tiny,
breakatwhitespace=false,
breaklines=true,
captionpos=b,
commentstyle=\color{green},
escapeinside={\%*}{*)},
extendedchars=true,
frame=single,
numbers=left,
numbersep=5pt,
numberstyle=\tiny\color{gray},
rulecolor=\color{black},
showspaces=false,
showstringspaces=false,
showtabs=false,
stringstyle=\color{mauve},
tabsize=2,
title=\lstname,
morekeywords={not,\},\{,preconditions,effects },
deletekeywords={time}
}
%% The below command creates the ligh bulb logos in the top right corner of the
\begin{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Title slide
{
\usebackgroundtemplate{\includegraphics[height=55mm,right]{img/top_right_corner.pdf}}
\maketitle
}
\begin{frame}
\frametitle{Overview} % Table of contents slide, comment this block out to remove it
\tableofcontents % Throughout your presentation, if you choose to use \section{} and \subsection{} commands, these will automatically be printed on this slide as an overview of your presentation
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Section title slide
\sectionpic{Introduction}{img/section_slide}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Regular slides
\begin{frame}{Introduction}
\begin{itemize}
\item This session will introduce you to the basics of Python
\item In the end we will apply this to a web scraping exercise
\item After this session, you'll be able to write and review \textbf{basic} Python code
\item This session does not include how to use datasets in Python
-- instead it will focus on the fundamental building blocks to everything in Python,
data types
\end{itemize}
\end{frame}
\begin{frame}{Introduction - Python for Stata users}
\begin{itemize}
\item There are many great Python courses available for free on the internet -- so why is DIME Analytics making yet another one?
\item This session makes two assumptions not common among the courses already available:
\begin{itemize}
\item We assume that you will use Python for research and not computer science
\item We assume that you are coming from a Stata background
\end{itemize}
\item Many concepts will be explained by referencing concepts in Stata
\end{itemize}
\end{frame}
\begin{frame}{Introduction - Why Python if I already use Stata?}
\begin{itemize}
\item Versatility: you can solve almost any programming task with Python:
\begin{itemize}
\item Web scraping, text analysis, web applications to retrieve data, machine learning
\end{itemize}
\item Much bigger user base
\item Python is open source and free to use!
\item Since it's open source it is easier to run everywhere -- for example on big data servers
\end{itemize}
However, a big part of the user base does not do research or data science,
and libraries for some less frequently used statistical operation
have not yet been developed
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Section title slide
\sectionpic{Getting started}{img/section_slide}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Regular slides
\begin{frame}{Getting started}
\begin{itemize}
\item We'll use Google Colab for this session: https://colab.research.google.com
\item Colab is similar to a Google doc for coding, and it runs Python by default
\end{itemize}
\end{frame}
\begin{frame}{Getting started - Colab}
\begin{itemize}
\item Go to https://colab.research.google.com
\item Click on \texttt{NEW NOTEBOOK} if you're already logged in, or go to \texttt{File > New notebook} if you're not
\end{itemize}
\begin{multicols}{2}
\begin{figure}
\centering
\includegraphics[width=0.9\linewidth]{img/new_nb_logged_in.png}
\caption{Do this if you're already logged in}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/new_nb_not_logged_in.png}
\caption{Do this if you're not -- you'll be prompted to log in}
\end{figure}
\end{multicols}
\end{frame}
\begin{frame}{Getting started - Colab}
You should end up with something like this in your browser:
\begin{figure}
\centering
\includegraphics[width=0.9\linewidth]{img/colab.png}
\end{figure}
\end{frame}
\begin{frame}{Getting started - Colab}
\begin{itemize}
\item Colab organizes code in blocks -- each block is like its own script
\item To run the code in a block,
click the $\blacktriangleright$ symbol or press \texttt{Ctrl} + \texttt{Enter}
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/block_of_code.png}
\end{figure}
\end{frame}
\begin{frame}{Getting started - Colab}
\begin{itemize}
\item Click on \texttt{+ Code} to add new blocks of code
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/add_code.png}
\end{figure}
\textbf{Important:} Code blocks are a feature specific to Colab. Most Python distributions don't have this feature
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Section title slide
\sectionpic{Python variables}{img/section_slide}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Regular slides
\begin{frame}{Python variables}
\begin{itemize}
\item In Stata, variables are columns of a dataframe
\item In Python, variables are everything that we define with a name to be referenced -- more similar to Stata's locals or globals (macros)
\item Nonetheless, while macros in Stata are "nice to have" and useful, variables in Python are the building block of everything and you cannot write code without them
\item Variables are also broader than locals, globals or columns in Stata; for example, functions and datasets can be a variable in Python
\end{itemize}
\end{frame}
\begin{frame}{Python variables}
Just like in Stata, in Python we use the \texttt{=} operator to create variables
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/assignation.png}
\end{figure}
\end{frame}
\begin{frame}{Python variables}
This also works when we're trying to replace an existing variable
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/replace.png}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Section title slide
\sectionpic{Python basic data types}{img/section_slide}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Regular slides
\begin{frame}{Python basic data types}
\begin{itemize}
\item Every Python variable has a data type
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/data_type.png}
\end{figure}
\begin{itemize}
\item Today we will cover the most basic data types:
int/float (numbers), strings, booleans and lists
\item Variables in Python do more than just store data.
They provide operations related to their data type,
for example: add and remove item from a list, make a string upper case, etc.
\end{itemize}
\end{frame}
\begin{frame}{Python - more on data types}
\begin{itemize}
\item Python has thousands of other data types
\item This is because users can build their own data types based on the built-in types
-- you will frequently use such data types \scriptsize(and we'll use some of them later today) \normalsize
\item For example, a dataset in Python is a variable from the \texttt{pandas dataset} type, a custom data type implemented by the Python community
\item All of these custom data types store data
and provide built-in functionality specially implemented for the intended context
\end{itemize}
\end{frame}
\begin{frame}{Python basic data types - \texttt{int}}
The \texttt{x} and \texttt{y} variables we just defined have the data type \texttt{int}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/type_int.png}
\end{figure}
\texttt{int} variables are integer numbers. We can do mathematical operations with them
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/math_integers.png}
\end{figure}
\end{frame}
\begin{frame}{Python basic data types - \texttt{float}}
\texttt{float} variables, on the other hand, represent real numbers
-- we can do mathematical operations with floats as well
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/type_float.png}
\end{figure}
\begin{itemize}
\item Python is what's called "\textit{dynamically typed}",
which means that you do not need to indicate what data type you want
\item It detects when a variable is an integer, floating point (decimal number),
text, etc. as long as it is a built-in data type.
\end{itemize}
\end{frame}
\begin{frame}{Python basic data types - \texttt{str}}
\texttt{str} variables are strings with text
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/string_type.png}
\end{figure}
\textbf{Note:} A variable can be used across code blocks
-- this is common in all notebook styled python interfaces, like Colab
\end{frame}
\begin{frame}{Python basic data types - \texttt{str}}
Python allows two types of "mathematical" operations with \texttt{str}: \texttt{+} and \texttt{*}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/string_operations.png}
\end{figure}
\end{frame}
\begin{frame}{Python basic data types - Lists}
\begin{itemize}
\item A list is a variable that groups other variables
\item Lists can have different data types in them at the same time. They can even include other lists!
\item Lists are defined enclosed in brackets and separating its values with commas
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=0.8\linewidth]{img/list_type.png}
\end{figure}
\end{frame}
\begin{frame}{Python basic data types - Lists}
We can index lists
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/list_index.png}
\end{figure}
\textbf{Important:} Python starts indexing at zero, not at one
\end{frame}
\begin{frame}{Python basic data types - Lists}
We can subset lists
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/list_subset.png}
\end{figure}
\textbf{Important:} When subsetting a list with \texttt{[a:b]}, Python will include the element at position \texttt{a} \textbf{but will exlude the one at position \texttt{b}}
\newline \scriptsize hence \texttt{my\_list[1:4]} returns the elements at positions 1, 2, 3 \normalsize
\end{frame}
\begin{frame}{Python basic data types - Lists}
We can also use negative indices: they represent the elements of a list starting by the end
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/list_subset_negative.png}
\end{figure}
\end{frame}
\begin{frame}{Python basic data types - Lists}
To add new elements to existing lists, we use \texttt{.append()}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/list_append.png}
\end{figure}
Note that this will modify our list variable in-place -- it's not necessary to define the result as a new variable with \texttt{=} when we use \texttt{.append()}
\end{frame}
\begin{frame}{Python basic data types - Lists}
We can use the \texttt{+} and \texttt{*} operators with lists
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/list_operations.png}
\end{figure}
\end{frame}
\begin{frame}{Python basic data types - Booleans}
Booleans (\texttt{bool}) are variables representing boolean values -- either \texttt{True} or \texttt{False}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/bool.png}
\end{figure}
\end{frame}
\begin{frame}{Python basic data types - Booleans}
\begin{itemize}
\item We can create booleans by direct assignation or with boolean expressions
\item When using direct assignation, Python recognizes booleans when they are written without quotes and with the first character in uppercase and the rest in lowercase
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=0.5\linewidth]{img/bool_assignation.png}
\end{figure}
\end{frame}
\begin{frame}{Python basic data types - Booleans}
Some operators for boolean expressions are \texttt{==}, \texttt{>}, \texttt{>=}, \texttt{<}, \texttt{<=}, and \texttt{in} (to check if an element is part of a list)
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/bool_in.png}
\end{figure}
\end{frame}
\begin{frame}{Python basic data types - Booleans}
We can do logical operations with booleans using \texttt{and}, \texttt{or}
\begin{multicols}{2}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/and.png}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/or.png}
\end{figure}
\end{multicols}
\end{frame}
\begin{frame}{Python basic data types}
\begin{itemize}
\item Until now, we've reviewed what Python variables and basic data types are
\item Importantly, these are the building blocks of everything you do in Python
\item It is simply impossible to do perform any task if you do not know how to work with the basic data types first
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Section title slide
\sectionpic{Python basic syntax}{img/section_slide}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Regular slides
\begin{frame}{Basic syntax - Attributes}
\begin{itemize}
\item Attributes are very often used when programming in Python
\item They do one of two things:
\begin{enumerate}
\item Attributes transform a variable in-place
\begin{itemize}
\item For example: \texttt{.append()}, an attribute of list variables
\end{itemize}
\end{enumerate}
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/attributes_append.png}
\end{figure}
\end{frame}
\begin{frame}{Basic syntax - Attributes}
2. Other attributes, by contrast, return a transformation of a variable without modifying the original
\begin{itemize}
\item For example: \texttt{.lower()} and \texttt{upper()}, attributes of string variables
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/string_lower_upper.png}
\end{figure}
\end{frame}
\begin{frame}{Basic syntax - Attributes}
\begin{itemize}
\item Each data type has specific attributes. They relate to the built-in functionalities each data type has
\item The syntax of attributes is \textit{almost} always: \texttt{VARIABLE\_NAME.ATTRIBUTE\_NAME(INPUTS\_IF\_ANY)}
\end{itemize}
\end{frame}
\begin{frame}{Basic syntax - Looping}
\begin{itemize}
\item Many data types in Python belong to a group called iterables -- variables you can loop through
\item Lists are the most commonly used iterable: if we put a list in a loop, Python will loop through every one of its elements
\item \texttt{int} and \texttt{float} are examples of non-iterable data types
\end{itemize}
\end{frame}
\begin{frame}{Basic syntax - Looping}
\begin{figure}
\centering
\includegraphics[width=0.6\linewidth]{img/list_loop.png}
\end{figure}
\end{frame}
\begin{frame}{Basic syntax - Looping}
\textbf{Important:}
Python knows what is inside the loop and where it ends with an indentation space -- it works similar to the \texttt{\{ \}} symbols you use to open and close a loop in Stata
\begin{multicols}{2}
\begin{figure}
\centering
\includegraphics[width=0.86\linewidth]{img/list_loop.png}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=0.55\linewidth]{img/loop_stata.png}
\end{figure}
\end{multicols}
\end{frame}
\begin{frame}{Basic syntax - Looping}
\textbf{Important:}
\begin{multicols}{2}
\begin{itemize}
\item Indentation can have two or four spaces depending on your Python interface. In any case, you can also press the \texttt{tab} key to create indented space
\item If you ever run the script of a colleague who uses different indentation, Python will automatically know the correct one. All that matters is that indentation is consistent within the same script
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/list_loop.png}
\end{figure}
\end{multicols}
\end{frame}
\begin{frame}{Basic syntax - Looping}
Strings are also iterables: Python loops through every character with them
\begin{figure}
\centering
\includegraphics[width=0.55\linewidth]{img/string_loop.png}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Section title slide
\sectionpic{Annex}{img/section_slide}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Regular slides
\begin{frame}{Python basic data types - Tuples}
Tuples are lists of variables. They are defined in parentheses and separate their elements by commas.
\begin{figure}
\centering
\includegraphics[width=0.55\linewidth]{img/tuple.png}
\end{figure}
\end{frame}
\begin{frame}{Python basic data types - Tuples}
Tuples are very similar to lists in that both use indices and subsets
\begin{figure}
\centering
\includegraphics[width=0.55\linewidth]{img/tuple_index_subset.png}
\end{figure}
\end{frame}
\begin{frame}{Python basic data types - Tuples}
The crucial difference between them is that tuples are inmutable: once defined, we can't add new elements to them or replace the existing ones
\begin{figure}
\centering
\includegraphics[width=0.55\linewidth]{img/tuple_error.png}
\end{figure}
\end{frame}
\begin{frame}{Basic syntax - Conditional expressions}
\textbf{Conditional expressions:}
\texttt{if}, \texttt{elif}, and \texttt{else} are used to define conditional operations. They also use idented space
\begin{multicols}{3}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/if.png}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/elif.png}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/else.png}
\end{figure}
\end{multicols}
\end{frame}
\begin{frame}{Basic syntax - Conditional expressions}
\begin{itemize}
\item Instead of a boolean expression we can use a boolean value with \texttt{if} or \texttt{elif}
\item \texttt{if} doesn't necessarily need to be used with \texttt{elif} of with \texttt{else}, we can use it alone
\end{itemize}
\begin{multicols}{2}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/boolean_variables.png}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/if_alone.png}
\end{figure}
\end{multicols}
\end{frame}
\begin{frame}{Basic syntax - Conditional expressions}
We can also use \texttt{if} and \texttt{elif} without \texttt{else}
\begin{multicols}{2}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/boolean_variables.png}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/if_elif.png}
\end{figure}
\end{multicols}
\end{frame}
\begin{frame}{Basic syntax - Conditional expressions}
And we can use \texttt{if} and \texttt{else} without \texttt{elif}
\begin{multicols}{2}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/boolean_variables.png}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/if_else.png}
\end{figure}
\end{multicols}
\end{frame}
\begin{frame}{Basic syntax - Conditional expressions}
\begin{itemize}
\item If a boolean expression returned \texttt{True} for conditions in both \texttt{if} and \texttt{elif}, only the operations under \texttt{if} would be executed
\item If more than one boolean expression under several \texttt{elif} conditions were to return \texttt{True}, only the operations under the first \texttt{elif} condition evaluated to \texttt{True} would be executed
\end{itemize}
\begin{multicols}{2}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/if_and_elif_true.png}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{img/elif_and_elif_true.png}
\end{figure}
\end{multicols}
\end{frame}
\end{document}
| {
"alphanum_fraction": 0.7219202899,
"avg_line_length": 28.1992337165,
"ext": "tex",
"hexsha": "dde7db306b29624a3770c55d74d42e88f0c0792f",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-06-04T01:04:21.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-11T22:45:05.000Z",
"max_forks_repo_head_hexsha": "31ed34d7053eedbf5b73bc4af22c2f819ca607a9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "buscandoaverroes/dime-python-training",
"max_forks_repo_path": "I - Introduction/slides/intro-to-python-CE-part1.tex",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "31ed34d7053eedbf5b73bc4af22c2f819ca607a9",
"max_issues_repo_issues_event_max_datetime": "2022-01-28T16:23:13.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-02T21:58:25.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "buscandoaverroes/dime-python-training",
"max_issues_repo_path": "I - Introduction/slides/intro-to-python-CE-part1.tex",
"max_line_length": 217,
"max_stars_count": 14,
"max_stars_repo_head_hexsha": "31ed34d7053eedbf5b73bc4af22c2f819ca607a9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "buscandoaverroes/dime-python-training",
"max_stars_repo_path": "I - Introduction/slides/intro-to-python-CE-part1.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-16T10:44:02.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-04T14:44:20.000Z",
"num_tokens": 6396,
"size": 22080
} |
\documentclass{article}
\makeatletter
\newcommand*{\rom}[1]{\expandafter\@slowromancap\romannumeral #1@}
\makeatother
\usepackage[utf8x]{inputenc}
\setlength{\parindent}{0em}
\textwidth=6.5in
\oddsidemargin=0pc
\evensidemargin=0pc
\topmargin=-2pc
\headsep=0.05in
\headheight=0pc
\textheight=9.0in
\setlength{\parskip}{1em}
%hopefully I can draw a tree with this
\usepackage[shortlabels]{enumitem}
\usepackage{array}
\usepackage{wrapfig}
\usepackage{multirow}
\usepackage{tabu}
\usepackage{comment}
\usepackage{amsmath}
\usepackage{algorithm}
\usepackage{algpseudocode}
\usepackage[table]{xcolor}
\usepackage{verbatim}
%this I used to draw graph
\usepackage{tikz}
\usetikzlibrary{positioning}
\usepackage{changes}
\usepackage{adjustbox}
\usepackage{float}
\usepackage{caption}
\newcommand\x{\times}
\newcommand\y{\cellcolor{olive!10}}
\usepackage{color,soul}
\newcommand{\hlc}[2][yellow]{ {\sethlcolor{#1} \hl{#2}} }
\title{Data Structure and algorithm \rom{2}\\Homework 5}
\author{Jue,Guo}
\date{April 2019}
\begin{document}
\maketitle
\renewcommand{\thesection}{\Roman{section}}
Reference: Algorithm Design By Jon Kleinberg, Eva Tardos
\section{Efficient Algorithm For Both?}
Since I got quite confused in class, I would like to use this homework as a chance to review and see it is as a opportunity to understand it my way and hopefully in the correct way.
So,I think to solve this question 1, we need to refer to the idea of polynomial-time reduction. The key technique here is to compare the difficulty of different problems. Often, we say "Problem $X$ is at least as hard as Problem $Y$". We will formalize this using the idea "reduction".Basically, we are saying that $X$ is powerful enough to solve $Y$.
To be precise, we add the assumption that X can be solved in polynomial time using our model of computation,like the H we talked about in class, which will "magically" return the correct answer.
Since we have magically solve problem $X$ in a polynomial time, We ask our self, can we use this magical box when put in question X and gives your the right answer to X and also done it in polynomial time to solve Y. How it is related to Y?
In short, we are asking can arbitrary instances of problem Y be solved using a polynomial of standard computational step, plus a polynomial number of calls to a black box that solves problem X?
And here, I think, the notion used in Reference and Introduction to algorithms helps me understand better$\rightarrow$ $Y \leq_{P} X$.
This means that if we answer yes to question, we are saying Y is polynomial time reducible to X, or X is at least as hard as Y.
One understanding makes the wording easier, would be When we ask about reductions to a problem X. It is like we make our computational model into a embedded PC and its job is to solve the instance of X in polynomial time and some people like to use this analogy saying this piece hardware can solve X in a single step, which I think it is pretty much the same thing. In the end, we are asking for our upper limit of this wondrous piece of hardware.
Suppose, $Y \leq_{P} X$ and there exists a polynomial time algorithm to solve X. We now replace that embedded PC with the actual polynomial time algorithm for X. Consider what happens to our algorithm for problem Y that involved a polynomial number of steps plus a polynomial number of calls to the embedded PC. It now becomes an algorithm that involves a polynomial number of steps, plus a polynomial number of calls to a subroutine that runs in polynomial time; in other words, it has become a polynomial-time algorithm.
Then we have following Fact:
\begin{equation}
\text{Suppose $Y \leq_{P}X$. If X can be solved in polynomial time, then Y can be solved in polynomial time.}
\end{equation}
After we got the basics out of the way,I would like to paraphrase question(a) and (b):\\
(a).\textit{Given a graph \(G,\) and a number \(k,\) does \(G\) contain an independent set of size at least \(k ?\)} \\
(b).\textit{Given a graph \(G\) and a number \(l,\) does \(G\) contain a vertex cover of size at most \(l ?\)} \\
\begin{center}
\begin{tikzpicture}[
roundnode/.style={circle, draw=black!60, fill=white!5, very thick, minimum size=7mm},
]
%Nodes
\node[roundnode](node4){4};
\node[roundnode](node3)[left=of node4] {3};
\node[roundnode](node5)[right=of node4] {5};
%lower level nodes
\node[roundnode](node7)[below=of node4,xshift=10mm] {7};
\node[roundnode](node6)[below=of node4,xshift=-10mm] {6};
%make the upper the nodes
\node[roundnode](node1)[above=of node4,xshift=-10mm] {1};
\node[roundnode](node2)[above=of node4,xshift=10mm] {2};
%Lines
\draw[-] (node2) -- (node4) -- (node7);
\draw[-] (node3) -- (node2) -- (node5);
\draw[-] (node3) -- (node7) -- (node5);
\draw[-] (node3) -- (node1) -- (node2);
\draw[-] (node3) -- (node6) -- (node7);
\end{tikzpicture}
\end{center}
Recall, that the definition of an independent set
{\text {In a graph } G=(V, E), \text { we say a set of nodes }}{$S\subseteq V$ \text {is independent if no two nodes in } S \text { are joined by an edge.}} And we know it is to find a small independent set, a single node is simply an independent set. And in the graph above, we notice that {3,4,5} is an independent set and its size is 3, but can we get an independent bigger than 3, and indeed we can find a larger independent set with the set of nodes {1,4,5,6}.
Note: No difference between an \textit{optimization version} of the problem: Find the largest independent set and the \textit{decision version} of problem : whether G has an independent set of size of at least some integer K. If we come up with algorithm to solve our \textit{optimization version}, then we automatically solve our \textit{decision version}. And this is a no brainer statement. Something less obvious but important, which if we solve \textit{decision version} for some integer k, then we can find a maximum independent set. In English, question like what is the size of the maximum independent set, you write an polynomial algorithm and you return 4 in our case. Another question: Is the size of the maximum size of independent 4? You would probably call the same algorithm or subroutine to solve this, an addition of checking (Boolean,it can be done in linear time), return true/false. All this is just to show the simple equivalence between decision and optimization.
In the "Note", we specify the relationship with \textit{decision} and \textit{optimization}. Now we move on problem b). and smallest vertex cover problem, in English, means can you find the minimum number of vertex that can cover all the edges of a given graph. And as before, easy to find the largest one, which is including all the vertices, but hard in terms of finding the smallest one.
Now, we know they are hard problem. But how do we show their relative difficulty? We need to show that Vertex Cover $\leq_{p}$ Independent Set and also Independent Set $\leq_{p}$ Vertex Cover.
$//$ This can be seem as the f, the transformation between the two problems\\
To do this, we need a relationship between Independent Set and Vertex Cover. And the this relationship can be easily found in Our text book, But we need to prove it to agree with it.
\begin{equation}
\begin{array}{l}{\text {Let } G=(V, E) \text { be a graph. Then } S \text { is an independent set if and only if}} \\ {\text {its complement } V-S \text { is a vertex cover. }}\end{array}
\end{equation}
\textbf{Proof.} Suppose that S is an independent set. Consider an edge $e=(u,v)$. Since S is independent, $u$ and $v$ can not both be in a S. So one of them must be in $V/S$. Following this idea we know that every single edge we are looking at, at least one end is in $V/S$, so $V/S$ is a vertex cover.
Conversely, If $V/S$ is a vertex cover. Consider any nodes $u$ and $v$ in our $S$. If $u$ and $v$ is joined by edge $e$, then neither end of $e$ will fall into $V/S$, this contradict the assumption that $V/S$ is a vertex cover. So no two nodes in $S$ is joined by an edge, so $S$ is an independent set.
$//$ The relationship here can be seem as an "h", show there is a
solution going both ways. \\
Therefore, our reduction is clear from (2): (we want to show the possibility of reducing the problem from either way)
\begin{enumerate}
\item Independent Set \(\leq_{P}\) Vertex Cover.\\
\textbf{Proof.} If we have a embedded PC to solve Vertex Cover, then we can decide whether G has an independent set of size at least $k$ by asking the the embedded PC whether G has a vertex cover of size at most $n − k$.
\item Vertex Cover \(\leq_{P}\) Independent Set.\\
\textbf{Proof.} If we have a embedded PC to solve Independent Set, then we can decide whether G has a vertex cover of size at most $l$ by asking the embedded PC whether G has an independent set of size at least $n − l$.
\end{enumerate}
From Homework 3, we do know that we have a polynomial time algorithm for Maximum Independent Set. So we see that there is an efficient algorithm for both of them.
\section{Exact 4-SAT is NP-complete}
Recall From class the steps of proving a problem X is NP-complete.
\begin{enumerate}
\item Prove X $\in$ NP;
\item Pick an known NP-complete problem Y and show that it reduces to X;
\begin{enumerate}
\item define f to convert instance of Y to instance of X;
\item define h to convert X solution to Y solution;
\item solution to Y $\Rightarrow$ solution to X
\item solution to X $\Rightarrow$ solution to Y
\end{enumerate}
\end{enumerate}
In class, we proved that a 3 SAT is NP-complete. Some people find it easy to see, but I don't think this is the case for me. So I will walk through it again.
SATISFIABILITY(or SAT) is the problem of determining if there exists an interpretation that satisfies a given Boolean formula. In other word, it asks whether the variable of a given Boolean formula can be consistently replaced by the value TRUE or False in such a way that the formula evaluates to True. If this is the case, the formula is called satisfiable.
SAT is NP-complete. Because Cook and Levin said so.
Because our SAT is NP-complete, we can prove the completeness of 3 SAT.
\begin{enumerate}
\item 3SAT $\in$ NP, because to determine whether a boolean expression E in CNF is satisfiable, nondeterministically guess values for all the variables and then evaluate the expression. If E turns out to be true, then accept. This can be carried out in nondeterministic polynomial time. Thus 3SAT is in NP.
\item We know that SAT is a NP-complete problem we need to show that it can be reduced to 3SAT, therefore Proving it is also NP-complete.
\begin{enumerate}
\item A SAT instance has m clauses, consider an arbitrary clause.
$(a_1\vee a_2 \vee a_3 \vee ... \vee a_k)$
\begin{enumerate}
\item the function f converts this clause to
$$(a_1\vee a_2 \vee y_2)(\Bar{y_2}\vee a_3\vee y_3)...(\Bar{y_{k-3}} \vee a_{k-2} \vee y_{k-2})(\Bar{y_{k-2}}\vee a_{k-2} \vee a_k)$$
\end{enumerate}
\item The function h takes a 3SAT truth assignment and ignores the auxiliary variables.
\begin{enumerate}
\item Suppose the SAT instance has a solution.Then our arbitrary clause must be satisfied, so some $a_i$ is True.
\begin{enumerate}[-]
\item Consider $(\Bar{y_{i-2}}\vee a_i \vee y_i)$
So, let's set $Y_{i-1}$ = T, $y_i$ = F, look at the CNF above and setting auxiliary variable to the left half as true and set the right half as false, and keep doing this we will have a 3SAT solution.
\end{enumerate}
\item Suppose 3SAT instance has a solution, Claim $a_i$ = T for some i. Suppose not: all $a_i=F$, then 3SAT clause means only $y_i$ matters and this is not satisfiable. So $a_i=T$ for some i so there is a solution for SAT.
\end{enumerate}
\end{enumerate}
\end{enumerate}
After showing the steps and the idea presented in class, now we know that 3SAT is NP-complete. So now we can move on prove that EXACT 4 SAT is NP-complete using the same idea.
\begin{enumerate}
\item First, 4-SAT is in NP, we can write a non-deterministic polynomial-time algorithm which takes a 4-SAT instance and a proposed truth assignment as
input. This algorithm evaluates the 4-SAT instance with the truth assignment. If the 4-SAT instance evaluates to true, the algorithm outputs yes;
otherwise, the algorithm outputs no. This runs in polynomial time.
\item We know that 3SAT is a NP-complete problem we need to show that it can be reduced to EXACT 4SAT, therefore Proving it is also NP-complete.
\begin{enumerate}
\item A 3SAT Instance has m clause, consider an arbitrary clause $(a_1 \vee a_2 \vee a_3)$
\begin{enumerate}
\item the function f convert this clause to
$$(a_1 \vee a_2 \vee a_3 \vee y) \wedge(a_1 \vee a_2 \vee a_3 \vee \Bar{y})$$
and y is the auxiliary variable.
\end{enumerate}
\item The function h takes a 4SAT truth assignment and ignores the auxiliary variables.
\begin{enumerate}
\item Suppose 3SAT instance has a solution.Then our arbitrary clause must be satisfied, so some literal is True. Then we have a solution for our 4SAT since $a_1$ or $a_2$ or $a_3$ is true and connected by "OR" so the auxiliary is irrelevant in this case.
\item Suppose 4SAT instance has a solution. Claim some literals is True. Suppose not. All literal is false.Then we only care about our auxiliary and it will not be satisfiable, because y $\wedge$ $\Bar{y}$ returns False no matter what. So it is not satisfiable. So some literals equal to T, so there is a solution to 3SAT.
\end{enumerate}
\end{enumerate}
\end{enumerate}
In conclusion, EXACT 4 SAT is NP-complete.
\newpage
\section{Graph Coloring and Course Scheduling}
We know K-coloring is NP-complete from the question. So the questions :given a fixed number of time slot, and the time slot can be seem as our "k".\\
\begin{center}
%this is figure 1
\begin{minipage}[b]{.3\linewidth}
\begin{tikzpicture}[
roundnode/.style={circle, draw=black!60, fill=white!5, very thick, minimum size=7mm},
]
%Nodes
\node[roundnode](nodeT){T};
\node[roundnode](nodeF)[left=of nodeT] {F};
%make the upper the nodes
\node[roundnode](nodeBase)[above=of node4,xshift=-10mm] {B};
%Lines
\draw[-] (nodeT) -- (nodeF) -- (nodeBase)--(nodeT);
\end{tikzpicture}
\captionof{figure}{3-coloring(example)}
\end{minipage}
%Figure 2
\begin{minipage}[b]{.3\linewidth}
\begin{tikzpicture}[
roundnode/.style={circle, draw=black!60, fill=white!5, very thick, minimum size=7mm},
]
%Nodes
\node[roundnode](nodeT){$Class$};
\node[roundnode](nodeF)[left=of nodeT] {$\Bar{Class}$};
%make the upper the nodes
\node[roundnode](nodeBase)[above=of node4,xshift=-10mm] {B};
%Lines
\draw[-] (nodeT) -- (nodeF) -- (nodeBase)--(nodeT);
\end{tikzpicture}
\captionof{figure}{course schedule graph}
\end{minipage}
\end{center}
\begin{enumerate}
\item We can easily see that Course Scheduling is a NP problem. Because we are simply assigning proposed truth assignment and see if there is a conflict or not and eventually see if it is right or wrong. So this can be run in polynomial time.
\item We know from the problem that k-coloring is a NP-complete problem, we will prove it by reducing k-coloring problem to the course scheduling problem.
\begin{enumerate}
\item The "f" from my understanding by talking more in-depth with professor Ballard, well from my perspective, probably not so for him, is converting our know problem,k-coloring,to something resemble our asked problem,course scheduling. In this case I think it is pretty obvious. We can imagine a graph where the vertex is the classes with conflicts and they are connected by the edges. And in order to avoid a time conflict we need to assign different colors, or time slot, to the classes. And no two classes can have the same time slot, meaning, two vertex connect by the same edge can not have the same color.
\item Now,we need to find the "h".
\begin{enumerate}
\item Suppose that we that have a solution for our K-coloring instance, meaning that no vertex of the same edge share the same color. Then we will also have a solution to the course scheduling instance, since no two conflict connecting the same classes will have the the same time slot(color).
\item The same argument also goes from course scheduling to k-coloring. No two two conflicted class have the same time slot$\rightarrow$ No two vertices connected by the same edge have the same color.
\end{enumerate}
\end{enumerate}
\end{enumerate}
In conclusion, course scheduling is NP-complete.
\end{document}; | {
"alphanum_fraction": 0.7325830654,
"avg_line_length": 67.7177419355,
"ext": "tex",
"hexsha": "d8c3518c1b57f6ed1c6c783f96ffbb137f2d82ac",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cd1ae5128c9206a220fb3d2c5457aa1fee9816fe",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "BobGuo-1995/More-tex-code",
"max_forks_repo_path": "CSC222/HW5.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cd1ae5128c9206a220fb3d2c5457aa1fee9816fe",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "BobGuo-1995/More-tex-code",
"max_issues_repo_path": "CSC222/HW5.tex",
"max_line_length": 986,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cd1ae5128c9206a220fb3d2c5457aa1fee9816fe",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "BobGuo-1995/More-tex-code",
"max_stars_repo_path": "CSC222/HW5.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4554,
"size": 16794
} |
%-------------------------------------------
%YOUR THESIS
%-------------------------------------------
\documentclass[12pt,titlepage, dvipsnames]{article}
\input{01_settings.tex}
\begin{document}
%-------------------------------------------
%TITLEPAGE
%-------------------------------------------
\begin{titlepage}
\centering
\includegraphics[width=8cm]{images/yourlogo.png}\par\vspace{1cm}
\linespread{1}\Large{\scshape Your Institute\par}
\vspace{1.5cm}
{\scshape\Large\bfseries Bachelor Thesis\par}
{\huge\bfseries The Little Prince Template \par
\Large\bfseries *insert subtitle here*\par}
\vspace{1.5cm}
\linespread{0.75}\Large{Your Name\par Your Number\par Street\par ZIP Code}
\vfill
\linespread{0.75}\large{Submitted to:\par
Prof. Test}
\vfill
% Bottom of the page
{\large 23.02.2042\par}
\end{titlepage}
%-------------------------------------------
%ABSTRACT
%-------------------------------------------
\pagenumbering{roman}
\section*{Abstract}
\lipsum[1] %write you abstract here
\phantomsection %command is nessesary for hyperref to jump to the correct page
\addcontentsline{toc}{section}{Abstract}
%-------------------------------------------
%TOC, FIGURES, TABELES, ABBREVIATIONS
%-------------------------------------------
\newpage
\phantomsection
\renewcommand{\contentsname}{Table of Contents}
\addcontentsline{toc}{section}{Table of Contents}
\tableofcontents\thispagestyle{fancy}
\newpage
\listoffigures\thispagestyle{fancy}
\phantomsection
\addcontentsline{toc}{section}{List of Figures}
\newpage
\listoftables\thispagestyle{fancy}
\phantomsection
\addcontentsline{toc}{section}{List of Tables}
\newpage
\input{02_abbreviations.tex}
\printglossary[title = List of Abbreviations]\thispagestyle{fancy}
\phantomsection
\addcontentsline{toc}{section}{List of Abbreviations}
\newpage
\setcounter{savepage}{\arabic{page}}
\pagenumbering{arabic}
%-------------------------------------------
%MAIN BODY
%-------------------------------------------
\input{main_textbody/01_introduction.tex}
\input{main_textbody/02_chapter1.tex}
\input{main_textbody/03_chapter2.tex}
%-------------------------------------------
%REFERENCES, APPENDIX, DECLARATION
%-------------------------------------------
\input{05_references.tex}
\input{06_appendix.tex}
\input{07_declaration.tex}
\end{document}
| {
"alphanum_fraction": 0.5860484545,
"avg_line_length": 26.6,
"ext": "tex",
"hexsha": "ca5fceae0ef106721ee6bf87290a006db86a4567",
"lang": "TeX",
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2021-12-26T05:26:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-02-28T13:24:52.000Z",
"max_forks_repo_head_hexsha": "bfa23aa9bfef7eef8b13f7ad85b5f27f9d588314",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "severinlandolt/latex-thesis-template",
"max_forks_repo_path": "01_thesis-template/00_your_thesis_rootfile.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "bfa23aa9bfef7eef8b13f7ad85b5f27f9d588314",
"max_issues_repo_issues_event_max_datetime": "2021-03-28T16:20:07.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-28T16:20:07.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "severinlandolt/latex-thesis-template",
"max_issues_repo_path": "01_thesis-template/00_your_thesis_rootfile.tex",
"max_line_length": 79,
"max_stars_count": 57,
"max_stars_repo_head_hexsha": "bfa23aa9bfef7eef8b13f7ad85b5f27f9d588314",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "severinlandolt/latex-thesis-template",
"max_stars_repo_path": "01_thesis-template/00_your_thesis_rootfile.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-14T13:17:27.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-11T07:59:06.000Z",
"num_tokens": 623,
"size": 2394
} |
\chapter{Proofs for Chapter 2}
By Slusky theory and the result of Lemma 2 and 3 above, we have
| {
"alphanum_fraction": 0.7525773196,
"avg_line_length": 32.3333333333,
"ext": "tex",
"hexsha": "26c53d29f8599a8fecc743d9689aefef1c1c4f69",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-03-18T02:52:06.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-03-18T02:52:06.000Z",
"max_forks_repo_head_hexsha": "c0f3adaebce39e4b35d51b4e4e0517a73cb22421",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "monchewharry/Phd_thesis",
"max_forks_repo_path": "appendix/app2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c0f3adaebce39e4b35d51b4e4e0517a73cb22421",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "monchewharry/Phd_thesis",
"max_issues_repo_path": "appendix/app2.tex",
"max_line_length": 65,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c0f3adaebce39e4b35d51b4e4e0517a73cb22421",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "monchewharry/Phd_thesis",
"max_stars_repo_path": "appendix/app2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 30,
"size": 97
} |
\chapter{Selection Statements}
Oftentimes, programs will need to perform different actions in different
scenarios. For instance, a program might need to test input to see if
it is erroneous, or check which menu option a user selected. Many
algorithms also rely on checking conditions to achieve their results.
If we continue the programs as recipes metaphor, we can consider recipes
that call for different baking temperatures depending on whether the pan
you are using is metal or glass, or even depending on whether you live in
a high altitude area.
In this chapter, we will learn how to write programs to test for conditions
and do different things based on whether they have occurred or not.
\section{If statements}
Tetra uses the keyword \texttt{if} to check if a condition has occurred.
For instance, if we want the user to enter a positive number, we can use
an \texttt{if} statement for this purpose:
\def \codelabel {ch3if1}
\def \codecaption{Using if to check input}
\input{chapter3/if1.ttr.tex}
The output of this program can be seen below:
\input{chapter3/if1.ttr.out}
\section{Conditions}
\section{Boolean Variables}
\section{Boolean Expressions}
\section{Else Statements}
\section{Elif Statements}
\section{Summary}
\section{Exercises}
| {
"alphanum_fraction": 0.7836990596,
"avg_line_length": 24.5384615385,
"ext": "tex",
"hexsha": "3eb0ed2a7b7dd3c838ced323d8ed7ca0b09338ad",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2015-04-16T22:29:48.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-02-24T16:40:48.000Z",
"max_forks_repo_head_hexsha": "10f5e6e2bf52340b5162850d32c480400d6d7521",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "IanFinlayson/tetra",
"max_forks_repo_path": "book/chapter3/chapter3.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "10f5e6e2bf52340b5162850d32c480400d6d7521",
"max_issues_repo_issues_event_max_datetime": "2019-01-28T21:34:10.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-02-21T13:23:06.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "IanFinlayson/tetra",
"max_issues_repo_path": "book/chapter3/chapter3.tex",
"max_line_length": 75,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "10f5e6e2bf52340b5162850d32c480400d6d7521",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "IanFinlayson/tetra",
"max_stars_repo_path": "book/chapter3/chapter3.tex",
"max_stars_repo_stars_event_max_datetime": "2021-06-20T04:25:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-01-21T01:53:30.000Z",
"num_tokens": 297,
"size": 1276
} |
% 20131014-135722
% B8.0/W11.0
\section{B8.0/W11.0 - 110 krpm}
\label[secinapp]{sec:bwp-exp-details-B8.0/W11.0}
This test has been performed on October 14\th{} 2013, at 13:57:22,
just before the break of the compression unit.
\begin{table}[htbp]
\footnotesize
\begin{center}
\input{bwp-results/bwp-table-PThs-break.ins}
\end{center}
\caption{B8.0/W11.0 -- Thermodynamic points of the heat pump cycle}
\label{tab:B8.0/W11.0-PThs}
\end{table}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\textwidth]{bwp-energy-sankey-bwp-break}
\caption{B8.0/W11.0 -- Sankey diagram for heat pump energy balance (internal frontier)}
\label{fig:bwp-B8.0/W11.0-sankey-energy}
\end{figure}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\textwidth]{bwp-energy-sankey-cp-break}
\caption{B8.0/W11.0 -- Sankey diagram for the compressor unit energy balance}
\label{fig:bwp-B8.0/W11.0-sankey-cp}
\end{figure}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\textwidth]{bwp-energy-sankey-motor-break}
\caption{B8.0/W11.0 -- Sankey diagram for the motor energy balance}
\label{fig:bwp-B8.0/W11.0-sankey-motor}
\end{figure}
\begin{table}[htbp]
\footnotesize
\begin{center}
\input{bwp-results/bwp-table-performances-break.ins}
\end{center}
\caption{B8.0/W11.0 -- Performance indicators}
\end{table}
\begin{figure}[htbp]
\centering
\subfloat[Absolute pressures at the bearings cavity]
{\label{fig:bwp-bearings-P}\includegraphics[width=0.45\textwidth]{bwp-bearings-P}}
\hspace{1em}
\subfloat[Temperatures at the bearings cavity]
{\label{fig:bwp-bearings-T}\includegraphics[width=0.45\textwidth]{bwp-bearings-T}}
\caption[Pressures and temperatures at the bearings cavity when the
breakdown happened]{Absolute pressures and temperatures at the
bearings cavity when the breakdown happened}
\label{fig:bwp-bearings-P-T}
\end{figure}
\begin{figure}[htbp]
\centering
\subfloat[Absolute pressures at the compression unit]
{\label{fig:bwp-cp-P}\includegraphics[width=0.45\textwidth]{bwp-cp-P}}
\hspace{1em}
\subfloat[Temperatures at the compression unit]
{\label{fig:bwp-cp-T}\includegraphics[width=0.45\textwidth]{bwp-cp-T}}
\caption[Pressures and temperatures at compression unit
inlets/outlets when the breakdown happened]{Absolute pressures and
temperatures at compression unit inlets/outlets when the breakdown
happened}
\label{fig:bwp-cp-P-T}
\end{figure}
\begin{figure}[htbp]
\centering
\subfloat[Power consumption]
{\label{fig:bwp-cp-P}\includegraphics[width=0.45\textwidth]{bwp-power}}
\hspace{1em}
\subfloat[Speed]
{\label{fig:bwp-cp-T}\includegraphics[width=0.45\textwidth]{bwp-speed}}
\caption{Power comsumption and rotational speed records when the breakdown happened}
\label{fig:bwp-cp-P-T}
\end{figure}
\FloatBarrier
| {
"alphanum_fraction": 0.7369709689,
"avg_line_length": 32.1235955056,
"ext": "tex",
"hexsha": "73727b2a4bd4fef525c486a123f7cef3a3f851d8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d3cea3a238c7cccc309400b5686b6ef8ad1d72af",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "speredenn/epfl-leni-oilfree-radial-cp-hp",
"max_forks_repo_path": "tex/bwp-b-p080-w-p110.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d3cea3a238c7cccc309400b5686b6ef8ad1d72af",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "speredenn/epfl-leni-oilfree-radial-cp-hp",
"max_issues_repo_path": "tex/bwp-b-p080-w-p110.tex",
"max_line_length": 89,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "d3cea3a238c7cccc309400b5686b6ef8ad1d72af",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "speredenn/epfl-leni-oilfree-radial-cp-hp",
"max_stars_repo_path": "tex/bwp-b-p080-w-p110.tex",
"max_stars_repo_stars_event_max_datetime": "2017-09-01T13:30:55.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-09-01T13:30:55.000Z",
"num_tokens": 942,
"size": 2859
} |
\section{Work Experie\textcolor{mycolor}{nce}}
\subsection{iDalko - Premium Atlassian Partner}
Security product engineer at {\href{https://www.idalko.com}{iDalko}}.{\textcolor{mygrey}{\hspace*{\fill}2020 - present}}
\subsection{Escape Rooms}
Software/Hardware Development at {\textcolor{mycolor}{Big Moose Projects}}\footnote{This company is no longer active}.{\textcolor{mygrey}{\hspace*{\fill}August - September, 2017}}
\subsection{Teaching}
Teaching programming for children at {\href{https://www.codefever.be/nl}{Codefever}}\footnote{This website is only displayed in dutch}.{\textcolor{mygrey}{\hspace*{\fill}2017 - 2019}}
| {
"alphanum_fraction": 0.7476780186,
"avg_line_length": 80.75,
"ext": "tex",
"hexsha": "ce676e415ceeaa61c218608c5f3fbe37920a4993",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8c25302d6c7f276f7afdccbaf886949621385979",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "F0xedb/Resume",
"max_forks_repo_path": "work_experience.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "8c25302d6c7f276f7afdccbaf886949621385979",
"max_issues_repo_issues_event_max_datetime": "2019-11-11T13:02:24.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-11-11T13:02:22.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "F0xedb/Resume",
"max_issues_repo_path": "work_experience.tex",
"max_line_length": 187,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8c25302d6c7f276f7afdccbaf886949621385979",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "F0xedb/Resume",
"max_stars_repo_path": "work_experience.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 191,
"size": 646
} |
\section{Learning Methods}
\label{chap:prior:sec:cl}
In this section, we first briefly review some learning methodologies for neural networks in Section \ref{chap:prior:sec:cl:overview} to formalize what learning methods seek to achieve. Then, in Section \ref{chap:prior:sec:cl} we provide a more in-depth description of curriculum learning and cover some ways it has been using in NLP.
\subsection{Brief Overview of learning methodologies}
\label{chap:prior:sec:cl:overview}
Neural Networks are usually trained by randomly selecting batches of data in a training corpus. This method has proven to be incredibly robust as it allows the model to learn the data distribution gradually. While useful in eventually learning data distribution, random sampling is unable to build any natural hierarchy or structure quickly. Methods like curriculum learning \cite{Bengio2009CurriculumL}, reinforcement learning \cite{Sutton1998ReinforcementLA}, and active learning \cite{Cohn1994ActiveLW} are alternative methods which try to improve model training and accuracy by sampling training examples in a non-random way. Most methods seek to optimize what kind of information a model has access to at each step in training to find better gradients than a random sample. In domains like Generative Adversarial Networks (GAN) \cite{Goodfellow2014GenerativeAN}, training models that generate large images has proven difficult. GAN is a machine learning framework where two NNs, a generator and a discriminator, compete in a zero-sum game. The discriminator, seeks to classify samples as real or synthetic (created by the generator) while the generator, seeks to creates artificial samples which are close to sample from the training dataset. What is unique about GANs is the generator's goal is not to produce an sample that is similar to some real sample but produce a sample which fools the discriminator. This methodology has proven incredibly effective in producing realistic photos and other complex synthetic data in an unsupervised way. Finding structure in the size of images, researchers have found tremendous improvements by slowly increasing the target output size as training progresses \cite{Karras2017ProgressiveGO}. Initially, the Generator produces 2x2 pixel images. Once it and the Discriminator converge, the target output size is increased to 4x4. This method of scaling continues until 4096x4096 pixel images are synthesized. By training in an increasingly entropic way, the final model can learn a better representation with a higher sample efficiency. This work on progressive learning in GANS inspired this dissertation as LMs, much like GANs, are conceptually simple but can prove difficult to train at scale.
\subsection{Curriculum Learning}
\label{chap:prior:sec:cl:cl}
While the common usage of CL in computer science begins in 2009 \cite{Bengio2009CurriculumL}, the concept of CL is much older. At its core, CL's vision is the idea of choosing examples to be presented in a specific order to guide the agent to learn quicker than if they had seen samples in random order. Early experiments with RNNs \cite{Elman1993LearningAD} focused on learning grammar suggested that learning of complex grammatical structure improves when the initial examples the models learn with are more straightforward. CL guides the optimization process to converge faster and guide the learner to better local minima and can be thought of as a method of re-weighting the data distribution over model training. Unlike CL, regular random batch sampling emphasizes an equal contribution of each data point without any notion of how common the data point is and if the data point can be used to build a foundational understanding.\\
In their 2009 paper \cite{Bengio2009CurriculumL}, Bengio et al., 2009 suggest that CL approaches training may act similarly to unsupervised pretraining. The authors explore the effect of CL in three experiments: using a perceptron to learn an equation, shape recognition, and language modeling. In their language modeling task, they modify the training corpus to make it increasingly difficult. This language model is trained using samples with a windows size of 5 tokens sampled from the 631m token Wikipedia corpus. Initially, they remove samples which contain any word that is not in the N most common words (starts with 5,000). After each pass on the corpus, N is increased by 5,000 which means the training corpus gradually gets larger, more difficult, and more representative of the full corpus distribution. After 1 billion updates, the CL method has a loss of 2.78 vs. the non-CL loss of 2.83. The two main issues discussed by the authors in CL are: the computational cost to assemble the batch and the significant amount of data the model can learn from in early epochs is low. \\
Since this original paper, CL methods have proven effective for many NLP domains, especially in NMT. Wang et al., 2019 \cite{Wang2019LearningAM} expand on the idea of CL as a method of data selection and data augmentation. Their implementation focuses on selecting data relevant to all tasks and disregarding data that may be only applicable to a specific domain and can bring a 2.5 BLEU point improvement vs. non-curriculum implementation. Platanios et al., 2019 \cite{Platanios2019CompetencebasedCL} introduce the notion of competence-based CL, which is the basis for much of our experimentation. The author's main contribution is building a CL method called competence curriculum, which only controls how long the curriculum lasts before regular training occurs. The authors' approach is 2-stepped: assign a difficulty value to each sample in the training data, and train the model with increasingly more data as its competence improves. In the first stage, a heuristic is applied to rank the training data from easiest to hardest. Using a cumulative density function (CDF), each sample is then given a value from 0 to 1, which equates to how difficult the example is. Then, starting with some initial competence $\lambda_0$, the model will train by sampling a training batch from the training data where the sample difficulty is lower than the model's current competence. After each batch is sampled, the model's competence is increased by a preset $\lambda_{increment}$ until it is training on the full dataset. A more detailed description as it applies to this dissertation can be found in Chapter \ref{chap:method}. In their experiments on NMT, the authors explore the effect of competence-based CL using Transformers and BiLSTMs using two difficulty methods (sentence length and word rarity) and two competence step functions (root and linear) and find that all of their CL implementations outperform their non-CL counterparts. Using the competence curriculum method, the Platanios et al., 2019 can reduce training time by up to 70\% and improve BLEU performance by 2.2 points on the WMT dataset compared to non curriculum methods. | {
"alphanum_fraction": 0.8166378814,
"avg_line_length": 631.6363636364,
"ext": "tex",
"hexsha": "434273efc419e15b1ec434b206e90de2a7ef81d6",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6d13c7cfff8ce80c2c31d0c33696eed58294ff50",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "spacemanidol/UWTHESIS",
"max_forks_repo_path": "Thesis/learningmethods.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6d13c7cfff8ce80c2c31d0c33696eed58294ff50",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "spacemanidol/UWTHESIS",
"max_issues_repo_path": "Thesis/learningmethods.tex",
"max_line_length": 2240,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6d13c7cfff8ce80c2c31d0c33696eed58294ff50",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "spacemanidol/UWTHESIS",
"max_stars_repo_path": "Thesis/learningmethods.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1456,
"size": 6948
} |
\chapter{Generating functions}
| {
"alphanum_fraction": 0.7878787879,
"avg_line_length": 8.25,
"ext": "tex",
"hexsha": "f0f667e825f47341c5b2e33c48fa76665f58b58c",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/algebra/generating/00-00-Chapter_name.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/algebra/generating/00-00-Chapter_name.tex",
"max_line_length": 30,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/algebra/generating/00-00-Chapter_name.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7,
"size": 33
} |
\documentclass[public]{beamer}
\usepackage{ifthen}
\usepackage{econtexShortcuts}
\usepackage{verbatim}
\usetheme{Madrid}
\definecolor{orange}{HTML}{FF7F00}
\hypersetup{colorlinks,linkcolor=,urlcolor=orange}
%\usepackage{econtexSetup}
% Redefine footer from
% http://tex.stackexchange.com/questions/83048/change-the-contents-of-footline-in-a-beamer-presentation
\setbeamertemplate{footline}
{
\leavevmode%
\hbox{%
\begin{beamercolorbox}[wd=.333333\paperwidth,ht=2.25ex,dp=1ex,center]{author in head/foot}%
\usebeamerfont{author in head/foot}\insertsection
\end{beamercolorbox}%
\begin{beamercolorbox}[wd=.333333\paperwidth,ht=2.25ex,dp=1ex,center]{title in head/foot}%
\usebeamerfont{title in head/foot}\insertsubsection
\end{beamercolorbox}%
\begin{beamercolorbox}[wd=.333333\paperwidth,ht=2.25ex,dp=1ex,right]{date in head/foot}%
\usebeamerfont{date in head/foot}\insertshortdate{}\hspace*{2em}
\insertframenumber{} / \inserttotalframenumber\hspace*{2ex}
\end{beamercolorbox}}%
\vskip0pt%
}
\makeatother
\providecommand{\ARK}{\href{http://github.com/econ-ark}{github.com/econ-ark}}
\providecommand{\CDC}{\texttt{CDC}}
\providecommand{\NMP}{\texttt{NMP}}
\providecommand{\MNW}{\texttt{MNW}}
\providecommand{\DCL}{\texttt{DCL}}
\providecommand{\JXY}{\texttt{JXY}}
\providecommand{\AMK}{\texttt{AMK}}
\beamerdefaultoverlayspecification{<+->}
\usepackage{natbib}
\begin{document}
\title[\ARK]{Introducing the Computational Economics \\ ``Algorithmic Repository and toolKit'' \\
\href{http://github.com/econ-ark}{github.com/econ-ark}}
\author[Chris Carroll]{Presentation by Chris Carroll at Conference on \\
{Computation in Economics and Finance (CEF), Bordeaux}}
\date{June, 2016}
\begin{frame}
\titlepage
\end{frame}
\section{Who, What, Where, When, Why, How}
\subsection{What}
\begin{frame}
\frametitle{What Is It?}
State-of-the-art set of tools for:
\begin{enumerate} \pause
\item Simulating behavior of populations of agents
\item Solving dynamic stochastic optimization problems
\begin{itemize}
\item Particularly adapted for Bellman problems with `kinks' and quirks
\end{itemize}
\end{enumerate}
\end{frame}
\begin{frame}
\frametitle{What Is It Good For?}
\begin{itemize}
\item Heterogeneous Agent Macro Models
\begin{itemize}
\item Original name: {\bf H}eterogeneous {\bf A}gent {\bf R}esources and tool{\bf K}it
\item HARK!
\end{itemize}
\item Structural Micro Models (e.g., labor, health)
\item IO models with optimizing consumers and firms
\item {\bf N}ot {\bf O}nly {\bf A}bout {\bf H}eterogeneous {\bf s}tuff ...
\item ... {\bf A}lgorithmic {\bf R}esources and tool{\bf K}it
\item :-)
\begin{itemize}
\item Unlike Noah's, our ARK can hold more than two of each kind!
\item Ultimate goal: Get examples on the ARK of all types of animal (model)
\end{itemize}
\end{itemize}
\end{frame}
\subsection{Where}
\begin{frame}
\frametitle{Where Is It?}
\begin{center}
{\ARK} is the project's home
\end{center}
\begin{enumerate}
\item Get a GitHub account. Then, options to access are
\begin{itemize}
\item Install GitHub Desktop App
\item Install `git' command-line tool (if you're hard-core)
\end{itemize}
\item \texttt{{\ARK}/HARK} is a ``public repo''
\begin{itemize}
\item Contains all existing code
\end{itemize}
\item \texttt{{\ARK}/HARK/Documentation/NARK.pdf}
\begin{itemize}
\item Describes variable naming conventions for easy workflow:
\item LaTeX object definitions correspond to HARK definitions
\end{itemize}
\item Similar structure will be used for future contributions
\begin{itemize}
\item {\bf A}gent {\bf A}rchive {\bf R}epository {\bf D}eposit {\bf V}ehicle for ARK?
\end{itemize}
\item Instructions for cloning are in the README.txt
\item You get the whole codebase under the Apache license
\begin{itemize}
\item Basically, no limitations on use
\item But, please credit us, and participate in discussions
\end{itemize}
\end{enumerate}
\end{frame}
\subsection{Who}
\begin{frame}
\frametitle{Who Has Produced It?}
\begin{footnotesize}
\begin{center}
\begin{tabular}{lll}
Name & TLA & Affiliation % & Contact
\\ \hline \hline {\it Christopher D Carroll} & \texttt{{\CDC}} & JHU, CFPB % & \href{mailto:[email protected]}{[email protected]}
\\ {\it David C Low} & \texttt{{\DCL}} & CFPB % & \href{mailto:[email protected]}{[email protected]}
\\ {\it Nathan M Palmer} & \texttt{{\NMP}} & OFR % & \href{malito:[email protected]}{[email protected]}
\\ {\it Matthew N White} & \texttt{{\MNW}} & UDel, CFPB % & \href{mailto:[email protected]}{[email protected]}
\\ \hline {\it Alex Kaufman} & \texttt{{\AMK}} & CFPB $\rightarrow$ ? (Alcatraz?) %& \href{mailto:[email protected]}{No Fixed Address; Expected: Alcatraz}
\\ {\it Jiaxiong Yao} & \texttt{JXY} & JHU $\rightarrow$ IMF %& \href{mailto:[email protected]}{No Fixed Address; Expected: Alcatraz}
\end{tabular}
\end{center}
Nothing herein may be interpreted as reflecing opinions of
\begin{center}
\begin{tabular}{rcl}
CFPB & - & United States Consumer Financial Protection Bureau
\\ JHU & - & Johns Hopkins University
\\ IMF & - & International Monetary Fund
\\ OFR & - & Office of Financial Research, U.S.\ Treasury
\\ UDel & - & University of Delaware
\end{tabular}
\end{center}
\end{footnotesize}
\end{frame}
\begin{frame}
\frametitle{Major credit goes to CFPB - a 21st Century Regulator!}
\begin{itemize}
\item Hired {\CDC} as Chief Economist with this as a key priority
\item Hired {\NMP} as intern to get started
\item Hired {\MNW} as Visiting Scholar to work on it
\item Hired {\DCL} as new economist last year
\item Hired {\AMK} as RA
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Organization Going Forward}
Standard Github tools, esp:
\begin{itemize}
\item Issue Tracker: If You See Something, Say Something
\end{itemize}
\begin{center}
{\bf Topic Czars}
\begin{itemize}
\item Gatekeeper for Contributions
\item Responsible for Setting Out Tests A Module Should Pass
\begin{itemize}
\item e.g.\ Special Cases With Analytical Solutions
\item Metrics for ``closeness'' to ``true'' solution
\end{itemize}
\end{itemize}
\pause
\begin{tabular}{lll}
Name & Topic & Affiliation
\\ \hline Serguei Maliar & Interpolation & Stanford %& \href{mailto:[email protected]}{[email protected]}
\\ Lilia Maliar & Interpolation & Stanford % & \href{}{mailto:[email protected]}
\\ \multicolumn{3}{c}{{\it We're Seeking Volunteers for Czars}}
\end{tabular}
\end{center}
\pause
\begin{tabular}{rcl}
\hline \href{mailto:[email protected]}{[email protected]} & - & General Purpose Questions
\\ \href{mailto:[email protected]}{[email protected]} & - & Volunteer to be a Czar
\\ \href{mailto:[email protected]}{[email protected]} & - & Ideas for Improvement
\end{tabular}
\end{frame}
\subsection{When}
\begin{frame}
\frametitle{Timeline}
\begin{center}
\begin{tabular}{rll}
When & What & Lessons
\\ \hline 2006-2013 & \href{http://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs}{SolvingMicroDSOPs} & Surprisingly popular
\\ 2014-12 & \href{}{IMF-CFPB Workshop} & Lots of enthusiasm
\\ 2015-12 & \href{}{CFPB-IMF Workshop} & Not HARK, ARK!
\\ & & Testing, Replication, Feedback
\\ 2016-06 & Hello! & None yet ...
\end{tabular}
\end{center}
\begin{itemize}
\item The version at \href{github.com/econ-ark}{http://github.com/econ-ark} is our ``public beta''
\item So far as we know, everything works
\item First non-beta: Built-in tests for {\it everything}
\begin{itemize}
\item Aim: This year
\end{itemize}
\end{itemize}
\end{frame}
\subsection{Why?}
\begin{frame}
\frametitle{Why Are Policy Institutions So Interested?}
Participation: CFPB, OFR, IMF
Interest From: FRB, ECB, BLS
\begin{itemize}
\item Policymaking = Applied Theory. Options:
\begin{enumerate}
\item Informal, intuitive, ``wetware'' theory
\item Formal, structural, ``software'' theory
\end{enumerate}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{LATE is Antedeluvian}
`Local Average Treatment Effects' results are
\begin{itemize}
\item {\bf N}ot {\bf E}ven {\bf V}ery {\bf E}mpirically {\bf R}elevant ...
\item UNLESS used to estimate `structural' parameters
\item Because the important question is
\begin{itemize}
\item What does world look like {\it non-locally} ...
\item ... = {\it after} the policy change
\item and maybe not even just ``on average''
\begin{itemize}
\item because distributional/targeted impact may be whole point
\end{itemize}
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Welfare Analysis With Heterogeneity}
Sensible cost-benefit analysis requires:
\begin{itemize}
\item Estimates of distribution of heterogeneous outcomes
\item Utility or other weighting of those outcomes
\item $\rightarrow$ Structure
\end{itemize}
\end{frame}
\subsection{How}
\begin{frame}
\frametitle{{\it The Invention of Science} by David Wootton}
Wrong explanations for the Scientific Revolution: \pause
\begin{itemize}
\item Invention of `the experiment'
\item Invention of the printing press
\item ...
\end{itemize}
\medskip\medskip
\pause
Right explanation:
\begin{itemize}
\item Creation of community of scholars
\item ... whose methods and results were `open source'
\item ... who critcized and improved and debugged each other
\end{itemize}
\medskip
\pause
Alchemy $\rightarrow$ Chemistry
\medskip
\pause
17th and 18th century version of \href{http://github.com}{github.com}!
\end{frame}
\begin{frame}
\frametitle{Economists are People Too ...}
\begin{itemize}
\item We are {\it way} behind many scientific fields in `open source' code
\item Surveys/Experiments: Economics students are more `selfish.'
\item Options: \pause
\begin{enumerate}
\item `selfish' people study economics
\item Studying economics makes you selfish!
\item Economics students are just more honest
\end{enumerate}
\end{itemize}
\pause I prefer (3)!
\end{frame}
\begin{frame}
\frametitle{Lessons Learned from Other Fields About What Works}
\begin{itemize}
\item Not taking the dewy-eyed view: ``Build it and they will come''
\item Emprical fact: Many other open source communities have succeeded
\item Economists can't be {\it that} different ...
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{In Addition to Usual Github Tools}
\begin{itemize}
\item Czars for specific topics
\item Bounties for Best Solution of Specific Problems
\item Time-Stamped Public Mechanism for Staking a Claim to New Idea
\item Stack-Exchange-Like Q\&A Forum
\item Mechanism for Easy Creation of Grad Student Problem Sets
\item Tool for Grad Student Replication Exercises
\item Eventually, a Journal?
\item ... Your Ideas? \href{mailto:[email protected]}{[email protected]}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Join our (Scientific) Revolution!}
\providecommand{\subscribe}{\href{mailto:[email protected]}{[email protected]}}
\providecommand{\letmehelpwith}{\href{mailto:[email protected]}{[email protected]}}
Options:
\begin{itemize}
\item \subscribe
\begin{itemize}
\item Add me to the newsletter/mailing list
\end{itemize}
\item {\it Read the docs and slides} and absorb what exists now. Options:
\begin{enumerate}
\item Add an `issue' that you want to tackle on \ARK
\item \letmehelpwith
\begin{enumerate}
\item Define some area that you'd like to contribute to
\item email us at this address outlining what you propose to do
\item We'll reply with some suggestions
\end{enumerate}
\end{enumerate}
\end{itemize}
\end{frame}
\end{document}
| {
"alphanum_fraction": 0.7398818829,
"avg_line_length": 28.570719603,
"ext": "tex",
"hexsha": "034d5936140b9f8e6fe530f8f6a838b700a053fc",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2020-04-24T16:07:23.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-09-23T01:43:07.000Z",
"max_forks_repo_head_hexsha": "682b29d978ee7d0a24af568e953bed5327550ea8",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "econ-ark/PARK",
"max_forks_repo_path": "source/Intro-To-Econ-ARK/ForEconomists/Intro-To-ARK_2016-06_CEF.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "682b29d978ee7d0a24af568e953bed5327550ea8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "econ-ark/PARK",
"max_issues_repo_path": "source/Intro-To-Econ-ARK/ForEconomists/Intro-To-ARK_2016-06_CEF.tex",
"max_line_length": 164,
"max_stars_count": 8,
"max_stars_repo_head_hexsha": "682b29d978ee7d0a24af568e953bed5327550ea8",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "econ-ark/PARK",
"max_stars_repo_path": "source/Intro-To-Econ-ARK/ForEconomists/Intro-To-ARK_2016-06_CEF.tex",
"max_stars_repo_stars_event_max_datetime": "2021-11-06T12:00:58.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-11-04T07:06:09.000Z",
"num_tokens": 3557,
"size": 11514
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% HIGZ User Guide -- LaTeX Source %
% %
% Chapter: The miscellaneous functions %
% %
% Editor: Olivier Couet / CN-AS %
% Last Mod.: 9 July 1993 oc %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Miscellaneous functions}
\index{miscellaneous functions}
User routines, whose functionality is often needed (e.g. displaying a message),
but which cannot be classified easily in any of the previous chapters will be
described in this chapter.
\section{Display a message on the screen}
\index{message on the screen}
\Shubr{IGMESS}{(N,CHMESS,CHTIT,CHOPT)}
\Action
This routine allows to display a message. The \X11{} version of \HIGZ{} displays
the message in a separated window.
\Pdesc
\begin{DLtt}{1234567}
\item[N] Number of lines in the message.
\item[CHMESS(N)] Message to be displayed.
\item[CHTIT] Window title.
\item[CHOPT] Options.
\begin{DLtt}{12345}
\item['P'] Print the array \Rarg{CHMESS} and open the message window
if necessary.
\item['C'] Close the message window.
\item['T'] Print the array \Rarg{CHMESS} on standard output.
\item['D'] Delete the message window.
\end{DLtt}
\end{DLtt}
\section{Display a colour map}
\index{display!colour map}
\Shubr{IGCOLM}{(X1,X2,Y1,Y2,IC1,IC2,ZMIN,ZMAX,CHOPT)}
\Action
This routine allows to display a colour map on the screen from the
coulour index \Rarg{IC1} to the colour index \Rarg{IC2}.
\Pdesc
\begin{DLtt}{1234567}
\item[X1] X coordinate of 1st corner of the rectangle in \WC.
\item[X2] X coordinate of 2nd corner of the rectangle in \WC.
\item[Y1] Y coordinate of 1st corner of the rectangle in \WC.
\item[Y2] Y coordinate of 2nd corner of the rectangle in \WC.
\item[IC1] First colour index.
\item[IC2] Last colour index
\item[ZMIN] Minimum Z value.
\item[ZMAX] Maximum Z value.
\item[CHOPT] Options.
\begin{DLtt}{12345}
\item['C'] Draw the levels with \Em{C}olours.
\item['B'] Draw the levels with \Em{B}oxes.
\item['A'] Draw the \Em{A}xis.
\item['H'] Draw the map \Em{H}orizontally (default is vertically).
\item['G'] Logarithmic scale is used to draw the axis.
\item['P'] \Rarg{IC1} is the dimension of the \Lit{INTEGER} array
\Rarg{IC2} in which a list of colour indeces is given.
\item['L'] The current palette is used. \Rarg{IC1} and \Rarg{IC2} are
not used.
\end{DLtt}
\end{DLtt}
\newpage
\section{Conversion between Colour systems}
\index{colour!systems!HLS}
\index{colour!systems!RGB}
\subsection{RGB to HLS}
\Shubr{IGRTOH}{(CR,CB,CG,CH*,CL*,CS*)}
\Action
This routine convert a RGB colour into an HLS colour.
\Pdesc
\begin{DLtt}{1234567}
\item[CR] Red value \Lit{0.\(\leq\)CR\(\leq\)1.}
\item[CG] Green value \Lit{0.\(\leq\)CG\(\leq\)1.}
\item[CB] Blue value \Lit{0.\(\leq\)CB\(\leq\)1.}
\item[CH] Hue value \Lit{0.\(\leq\)CH\(\leq\)360.}
\item[CL] Light value \Lit{0.\(\leq\)CL\(\leq\)1.}
\item[CS] Saturation value \Lit{0.\(\leq\)CS\(\leq\)1.}
\end{DLtt}
\subsection{HLS to RGB}
\Shubr{IGHTOR}{(CH,CL,CS,CR*,CB*,CG*)}
\Action
This routine convert a HLS colour into an RGB colour.
\Pdesc
\begin{DLtt}{1234567}
\item[CH] Hue value \Lit{0.\(\leq\)CH\(\leq\)360.}
\item[CL] Light value \Lit{0.\(\leq\)CL\(\leq\)1.}
\item[CS] Saturation value \Lit{0.\(\leq\)CS\(\leq\)1.}
\item[CR] Red value \Lit{0.\(\leq\)CR\(\leq\)1.}
\item[CG] Green value \Lit{0.\(\leq\)CG\(\leq\)1.}
\item[CB] Blue value \Lit{0.\(\leq\)CB\(\leq\)1.}
\end{DLtt}
\newpage
\section{Conversion between character string and numbers}
\index{character!conversion to number}
\index{number!conversion to character}
Often it is necessary to convert a \FORTRAN{} character string into
a number (integer or real) or vice versa. For example, routine \Rind{IGMENU}
returns some parameters as character strings and it is often necessary to convert
these into numbers.
Also, to print graphically the result of a computation with
\Rind{ITX} it is necessary to convert a number into a character string.
The routines described in this paragraph allow these kinds of conversions.
\subsection{Character to integer}
\Shubr{IZCTOI}{(CHVAL,IVAL*)}
\Action
Converts the character string {\tt CHVAL} into the integer {\tt IVAL}.
\Pdesc
\begin{DLtt}{1234567}
\item[CHVAL] Character string.
\item[IVAL] Integer.
\end{DLtt}
\subsection{Character to real}
\Shubr{IZCTOR}{(CHVAL,RVAL*)}
\Action
Converts the character string {\tt CHVAL} into the real {\tt RVAL}.
\Pdesc
\begin{DLtt}{1234567}
\item[CHVAL] Character string.
\item[RVAL] Real.
\end{DLtt}
\subsection{Integer to character}
\Shubr{IZITOC}{(IVAL,CHVAL*)}
\Action
Converts the integer {\tt IVAL} into character string {\tt CHVAL}.
\Pdesc
\begin{DLtt}{1234567}
\item[IVAL] Integer.
\item[CHVAL] Character string.
\end{DLtt}
\subsection{Real to character}
\Shubr{IZRTOC}{(RVAL,CHVAL*)}
\Action
Converts the real {\tt RVAL} into character string {\tt CHVAL}.
\Pdesc
\begin{DLtt}{1234567}
\item[RVAL] Real.
\item[CHVAL] Character string.
\end{DLtt}
| {
"alphanum_fraction": 0.6345551667,
"avg_line_length": 35.0258064516,
"ext": "tex",
"hexsha": "e5b019dc55c51c466aa711d8c4791292b90edaec",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "berghaus/cernlib-docs",
"max_forks_repo_path": "higzhplot/higzmisc.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "berghaus/cernlib-docs",
"max_issues_repo_path": "higzhplot/higzmisc.tex",
"max_line_length": 81,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "berghaus/cernlib-docs",
"max_stars_repo_path": "higzhplot/higzmisc.tex",
"max_stars_repo_stars_event_max_datetime": "2019-07-24T12:30:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-24T12:30:01.000Z",
"num_tokens": 1630,
"size": 5429
} |
\subsection{Processing as a Complex Number}
The transfer function that a virtual microphone applies to the signal is applied in the time and amplitude domains. This can be represented as a complex number in the form:
\begin{equation}\label{vMicFunction}
m(S) = \Delta{}v + \Delta{}t
\end{equation}
Where $\Delta v$ is the change in amplitude, and $\Delta t$ is the change in time
\subsection{$\Delta{}v$ Calculation}
\input{deltavcalc.tex}
\subsection{$\Delta{}t$ Calculation}
\input{deltatcalc.tex}
\subsection{$M(S)$ Array processing}
It then follows that the complex-number formulation of the function $m(S)$ can be written out:
\begin{equation}
m(S) = S\left[\left(1 - p_m\right) + p\cos(\theta_{mS})\right] + \hat{t}\frac{d_{\vec{v}}}{343 + \Delta{}c}
\end{equation}
Which is comprised of two components: the adjusted amplitude of $S$, and the time-domain shift of $S$.
The virtual microphone function (\ref{vMicFunction}) can be applied for each element in $M$ to find the effect of the total microphone array:
\begin{equation}
M(S) = \{m_1(S) ... m_n(S)\}
\end{equation}
The elements of $M(S)$ can be apportioned to the mixdown channels following:
\begin{equation}\label{output}
\begin{bmatrix} L(M(S)_n) \\ R(M(S)_n) \end{bmatrix} = gM(S)_n \begin{bmatrix} k_L \\ k_R \end{bmatrix}
\end{equation}
Where $g$ is a scalar constant, and $k_L$ and $k_R$ are related proportionality constants to determine the level of the element $M(S)_n$ within the encoded left and right channels.
\subsubsection{Output implementation}
For purposes of this implementation, $g$ is a scalar value applied per pair (or to the center microphone). The proportionality constants $k$ for the flanks follow a standard sine-cosine panning function. The center microphone uses $k = 1$.
The $g$ scalar is exposed to the user as a value $\alpha$, where $\alpha$ is a number in decibels and $\alpha = [-20,0]$.
Converting $\alpha_m$ to a scalar $g_m$ can be done:
\begin{equation}\label{dbscalarconvert}
g_m = 10^{\frac{\alpha}{20}}
\end{equation}
For the apportionment of the signal to the mixdown channels, the user inputs a value, $\beta$, where $\beta = [0, 1]$. The value for $\beta$ is used to indicate the separation of the virtual microphone pair into their corresponding mixdown channels. Lower values of $\beta$ indicate that both microphones in the pair should increasingly come out of both mixdown channels; whereas higher values indicate more separation into their corresponding channel. The relationship with $k$ can be shown as:
\begin{equation}\label{pairsPanning}
\begin{bmatrix} k_1 \\ k_2 \end{bmatrix} = \begin{bmatrix} \sin{k(\beta)} \\ \cos{k(\beta)} \end{bmatrix} \; | \; k(\beta) = \beta\frac{pi}{4} + \frac{pi}{4}
\end{equation}
Following this, for any virtual microphone pair in $M$, the placement of the encoded sounds within the output mixdown follows:
\begin{equation}\label{centerPanning}
\begin{bmatrix} y(L) \\ y(R) \end{bmatrix} = \begin{bmatrix} \sin{k(\beta)} & \cos{k(\beta)} \\ \cos{k(\beta)} & \sin{k(\beta)} \end{bmatrix} \cdot g\begin{bmatrix} m(S)_L \\ m(S)_R \end{bmatrix}
\end{equation}
While $m(S)_{center}$ is simply:
\begin{equation}
\begin{bmatrix} y(L) \\ y(R) \end{bmatrix} = gm(S)_{c}J_{2,1}
\end{equation}
Thus, the final, encoded output of the processor can be represented as:
\begin{equation}\label{output}
y(M, S) = \sum\limits_{i=1}^{|M|} y(M(S)_i)
\end{equation}
Which, when taken with the relationship to the UI in (\ref{pairsPanning}) and (\ref{centerPanning}), (\ref{output}) can be expanded to:
\begin{equation}
y(M, S) = \begin{bmatrix} y(L)_\text{mains} \\ y(R)_\text{mains} \end{bmatrix} + \begin{bmatrix} y(L)_\text{flanks} \\ y(R)_\text{flanks} \end{bmatrix} + \begin{bmatrix} y(L)_\text{center} \\ y(R)_\text{center} \end{bmatrix}
\end{equation}
| {
"alphanum_fraction": 0.7157068063,
"avg_line_length": 45.4761904762,
"ext": "tex",
"hexsha": "2818a0a100c1114a340d1bb692e099847b4406af",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a78161b4b99b50e481c133ba8a5049d548561954",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "jmclark85/StereoPairsEmulator",
"max_forks_repo_path": "Technical Documentation/processingascomplexnumber.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "a78161b4b99b50e481c133ba8a5049d548561954",
"max_issues_repo_issues_event_max_datetime": "2020-08-26T18:24:14.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-08-26T18:24:14.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "jmclark85/StereoPairsEmulator",
"max_issues_repo_path": "Technical Documentation/processingascomplexnumber.tex",
"max_line_length": 495,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a78161b4b99b50e481c133ba8a5049d548561954",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "jmclark85/StereoPairsEmulator",
"max_stars_repo_path": "Technical Documentation/processingascomplexnumber.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1168,
"size": 3820
} |
\documentclass{IEEEtran}
\title{TODO}
\author{TODO}
\begin{document}
\maketitle
\begin{abstract}
TODO
\end{abstract}
\section{Introduction}
\label{sec:introduction}
\subsection*{Context}
TODO~\cite{Hackenberg2014}~\cite{Legat2014}~\cite{Teufl2015}
\subsection*{Problem}
TODO
\subsection*{Solution}
TODO
\subsection*{Outline}
This article is structured as follows:
In Section~\ref{sec:differentiation} we discuss related work on the problem defined previously.
In Section~\ref{sec:contribution} we present our original solution to the problem at hand.
Then, in Section~\ref{sec:evaluation} we evaluate our solution with respect to different criteria.
Finally, in Section~\ref{sec:conclusion} we summarize our learnings and provide an outlook on future work.
\section{Related work}
\label{sec:differentiation}
TODO
\section{Our solution}
\label{sec:contribution}
TODO
\section{Critical evaluation}
\label{sec:evaluation}
TODO
\section{Conclusion}
\label{sec:conclusion}
\subsection*{Summary}
TODO
\subsection*{Outlook}
TODO
\bibliographystyle{plain}
\bibliography{main}
\end{document} | {
"alphanum_fraction": 0.6873508353,
"avg_line_length": 22.8545454545,
"ext": "tex",
"hexsha": "10aeb1c80cebd47fc7e2af4b71e0f70c3b0c265d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "eeabd28a2f741263c5de2caef31ae9973d4089d2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ghackenberg/productboard",
"max_forks_repo_path": "manuscripts/2022-06-template/main.tex",
"max_issues_count": 10,
"max_issues_repo_head_hexsha": "eeabd28a2f741263c5de2caef31ae9973d4089d2",
"max_issues_repo_issues_event_max_datetime": "2022-03-28T12:52:31.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-18T13:34:43.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ghackenberg/productboard",
"max_issues_repo_path": "manuscripts/2022-06-template/main.tex",
"max_line_length": 110,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "eeabd28a2f741263c5de2caef31ae9973d4089d2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ghackenberg/productboard",
"max_stars_repo_path": "manuscripts/2022-06-template/main.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-05T07:28:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-10T19:48:41.000Z",
"num_tokens": 341,
"size": 1257
} |
\documentclass[a4paper,11pt]{article}
\usepackage[utf8]{inputenc}
\usepackage{graphicx}
\usepackage{enumerate}
\usepackage{geometry}
\usepackage{fancyhdr}
\usepackage{minted}
\usepackage{xcolor}
\usepackage{soul}
\usepackage{listings}
\geometry{total={210mm,297mm},
left=25mm,right=25mm,%
bindingoffset=0mm, top=20mm,bottom=20mm}
\renewcommand*\sfdefault{phv}
\renewcommand\familydefault{\sfdefault}
\newcommand*{\TitleFont}{%
\usefont{\encodingdefault}{\rmdefault}{b}{n}%
\fontsize{16}{20}%
\selectfont}
\linespread{1.3}
% my own titles
\makeatletter
\renewcommand{\maketitle}{
\begin{center}
\vspace{2ex}
{\huge \textsc{\@title}}
\vspace{1ex}
\\
\rule{\linewidth}{0.5pt}\\
\@author \hfill \@date
\vspace{4ex}
\end{center}
}
\makeatother
\definecolor{bg}{rgb}{0.95,0.95,0.95}
\lstset{
basicstyle=\footnotesize\ttfamily,
extendedchars=true,
numbersep=5mm,
% basicstyle=\ttfamily\small,
breaklines=true,
tabsize=4,
frame=single,
numbers=left,
captionpos=t,
framesep=3mm,
escapeinside={\%*}{*)}
}
\newminted{python}{
linenos,
fontsize=\footnotesize\ttfamily,
frame=single,
}
% custom footers and headers
\pagestyle{fancy}
\lhead{}
\chead{}
\rhead{}
\lfoot{Assignment 4 : DNS(2) }
\cfoot{}
\rfoot{Page \thepage}
\renewcommand{\headrulewidth}{0pt}
\renewcommand{\footrulewidth}{0pt}
%%----------%%%----------%%%----------%%%----------%%%
\renewcommand{\thesubsubsection}{\thesubsection.\alph{subsubsection}}
\newmintedfile{html}{
linenos,
breaklines,
python3,
numbersep=8pt,
frame=single,
framesep=3mm}
\begin{document}
% \newminted{all}{linenos, frame=single}
% \usemintedstyle{monokai}
\usemintedstyle{manni}
% \usemintedstyle{xcode}
% \usemintedstyle{vs}
% \usemintedstyle{autumn}
% \usemintedstyle{colorful}
% \usemintedstyle{trac}
\title{ \TitleFont Assignment 4 : DNS(2) }
\author{Emil Sharifulllin, Innopolis University}
\date{\today}
\maketitle
\section{Reverse Zone Files}
\subsection{Why is that useful?}
Sometimes this is required for diagnostic purposes, more frequently these days it is used for security purposes to trace a hacker or spammer. \hl{For example many of e-mail servers in Internet are configured to reject incoming e-mails from any IP address that not have reverse DNS. The original use of the rDNS: network troubleshooting via tools such as traceroute, ping.}
\subsection{Reverse zone configuration}
\subsubsection{Set up your own reverse zone for your IPv4 subnet. Please use subnet from
10.192.X.0/24 where X your table number.}
To enable reverse zone we need to change two files:
\begin{lstlisting}[caption=/etc/named.conf]
...
zone "10.192.10.in-addr.arpa" {
type master;
file "10.192.10.rev";
notify no;
};
\end{lstlisting}
\begin{lstlisting}[caption=10.192.10.rev]
$TTL 86400
@ IN SOA st10.os3.su. admin.st10.os3.su. (
2016082900 ; serial
360000 ; refresh every 100 hours
3600 ; retry after 1 hour
3600000 ; expire after 1000 hours
3600 ; negative cache is 1 hour
)
IN NS ns.st10.os3.su.
0 IN PTR st10.os3.su.
\end{lstlisting}
\subsubsection{Show that a reverse lookup works.}
To check reverse zone I run following command:
\begin{lstlisting}
dig -x 10.192.10.0
; <<>> DiG 9.10.4-P2 <<>> -x 10.192.10.0
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 19187
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 1, ADDITIONAL: 2
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;0.10.192.10.in-addr.arpa. IN PTR
;; ANSWER SECTION:
0.10.192.10.in-addr.arpa. 86400 IN PTR st10.os3.su.
;; AUTHORITY SECTION:
10.192.10.in-addr.arpa. 86400 IN NS ns.st10.os3.su.
;; ADDITIONAL SECTION:
ns.st10.os3.su. 86400 IN A 188.130.155.43
;; Query time: 0 msec
;; SERVER: 127.0.1.1#53(127.0.1.1)
;; WHEN: Mon Sep 12 16:07:16 MSK 2016
;; MSG SIZE rcvd: 111
\end{lstlisting}
\section{Delegating Your Own Zone}
\addtocounter{subsection}{3}
\subsection{}
\subsubsection{How did you setup the subdomains in your zone file?}
Initially I have zone st10.os3.su. working with student \#11 I need to delegate zone st11.st10.os3.su. to him and I need to support zone st10.st11.os3.su.
To allow delegation I need to change st10.os3.su.zone file and add new NS record to it:
\begin{lstlisting}
$TTL 86400
@ IN SOA ns10.os3.su. admin.st10.os3.su. (
2016082900 ; serial
360000 ; refresh every 100 hours
3600 ; retry after 1 hour
3600000 ; expire after 1000 hours
3600 ; negative cache is 1 hour
)
@ IN NS ns10.os3.su.
@ IN A 188.130.155.43
@ IN MX 10 mail
st11 IN NS ns11.os3.su.
ns IN A 188.130.155.43
www IN A 188.130.155.43
mail IN A 188.130.155.43
web IN CNAME www
mob IN CNAME www
ns1 IN CNAME ns
ns2 IN CNAME ns
\end{lstlisting}
After that if my pair student setup zone st11.st10.os3.su. this zone will be resolved.
\subsubsection{What named.conf options did you add or change?}
To support delegation of zone st10.st11.os3.su. I need to add to files named.conf and st10.st11.os3.su.zone following lines:
\begin{lstlisting}[caption=named.conf]
zone "st10.st11.os3.su." {
type master;
file "st10.st11.os3.su.zone";
allow-transfer { 188.130.155.44; };
};
\end{lstlisting}
\begin{lstlisting}
IN SOA ns10.os3.su. admin.st10.st11.os3.su. (
;2016082900 ; serial
2016091200
360000 ; refresh every 100 hours
3600 ; retry after 1 hour
3600000 ; expire after 1000 hours
3600 ; negative cache is 1 hour
)
@ IN NS ns10.os3.su.
@ IN A 188.130.155.43
www IN CNAME @
new IN A 188.130.155.43
\end{lstlisting}
\subsubsection{Show the results of the tests that you performed.}
\begin{lstlisting}[caption="Checking zone st11.st10.os3.su"]
$ dig st11.st10.os3.su +trace @8.8.8.8
; <<>> DiG 9.10.4-P2 <<>> st11.st10.os3.su +trace @8.8.8.8
;; global options: +cmd
. 7330 IN NS j.root-servers.net.
. 7330 IN NS b.root-servers.net.
. 7330 IN NS d.root-servers.net.
. 7330 IN NS a.root-servers.net.
. 7330 IN NS f.root-servers.net.
. 7330 IN NS l.root-servers.net.
. 7330 IN NS m.root-servers.net.
. 7330 IN NS c.root-servers.net.
. 7330 IN NS e.root-servers.net.
. 7330 IN NS g.root-servers.net.
. 7330 IN NS h.root-servers.net.
. 7330 IN NS k.root-servers.net.
. 7330 IN NS i.root-servers.net.
. 7330 IN RRSIG NS 8 0 518400 20161002050000 20160919040000 46551 . X7qv1M68ydJjh3gXzdrXAkY9YB8mCLeDkvi8wJSLjtOvHtJJuwiD9/GH GVTU2LOLUYoHfLc+11evhaLxf3gYOlECWRKPmK42OGgdW546GKfzF/uN D/FcLl6ip/HooQcvj5MVES/m4eB87UASIyCKWFuXz097bb6tqVkKKcdu D3M=
;; Received 397 bytes from 8.8.8.8#53(8.8.8.8) in 21 ms
su. 172800 IN NS e.dns.ripn.net.
su. 172800 IN NS d.dns.ripn.net.
su. 172800 IN NS f.dns.ripn.net.
su. 172800 IN NS b.dns.ripn.net.
su. 172800 IN NS a.dns.ripn.net.
su. 86400 IN DS 22111 8 2 3363C13346E0C9C511526EDFFC756C3701FB35A7EFAD5775BB6C13D6 FC2BBA01
su. 86400 IN RRSIG DS 8 1 86400 20161002050000 20160919040000 46551 . Rck2sDYnDHpW+2OD8l+63YU3gI+FS/u1UrFjxaGq4/xt4Rujf3tIh2Op ZuQSp+fyrhDYsZ545FKkmZBFyumjdo53v0qRyaSo0ecMe8XExfI4Oqha 52ZjiBjCObID81qYBL5Kf4sKa0SXhVhZeRHV23UutPH13Y+RynCqlkJG lzY=
;; Received 564 bytes from 192.112.36.4#53(g.root-servers.net) in 74 ms
OS3.SU. 345600 IN NS ns2.masterhost.ru.
OS3.SU. 345600 IN NS ns.masterhost.ru.
OS3.SU. 345600 IN NS ns1.masterhost.ru.
DOVG5BL88TU14V0TNIJJGO18H7JH7C1E.SU. 3600 IN NSEC3 1 1 3 00FF E4CHA6HEO3F4IRI6S4V9HDASKGABKP28 NS SOA RRSIG DNSKEY NSEC3PARAM
DOVG5BL88TU14V0TNIJJGO18H7JH7C1E.SU. 3600 IN RRSIG NSEC3 8 2 3600 20161009075420 20160829085649 24459 su. REbEdkeEWwzzc8ecOA+aW1fs6o80dW3NIEquQ7SgA4s45+LRi296Kxrc DA8YKjJ0EhVeHaV0NbDFvVTBAnRPzFKmasOeVqhVPOMnnIw6lK5dKas1 G/mi3AphsmkFkRZVfIkl/D58SkYp7JqKqxEf07+M29NdfCtOBEr6bZ6h 0E4=
7D5ULGN7M6DVECAC1GAFR3FAST3VE2LQ.SU. 3600 IN NSEC3 1 1 3 00FF 8JMNFT53J2UADTLQBNPVDR70KBUTAI91 NS DS RRSIG
7D5ULGN7M6DVECAC1GAFR3FAST3VE2LQ.SU. 3600 IN RRSIG NSEC3 8 2 3600 20161011153428 20160902135650 24459 su. fVie0EpvZWY84KOpoeJVO10ykVrhLUYqUCW76EuQCOm2ouwoWAAhR7I6 xw1rDTstrTsGAltNUMLe73kzkYSVGc+Qn2HtnigCZNydUVxyWjDfWY9w PS3HSdQPcKL4MJtcZ2Jn3cZIeZ6GDp6jO6FgZLQZ6rfszia0p72bS0TA eI0=
;; Received 604 bytes from 193.232.142.17#53(e.dns.ripn.net) in 59 ms
st10.os3.su. 900 IN NS st11.os3.su.
st10.os3.su. 900 IN NS st10.os3.su.
;; Received 110 bytes from 217.16.22.15#53(ns2.masterhost.ru) in 21 ms
st11.st10.os3.su. 1800 IN A 188.130.155.44
st11.st10.os3.su. 1800 IN NS ns11.os3.su.
;; Received 80 bytes from 188.130.155.43#53(st10.os3.su) in 0 ms
\end{lstlisting}
\begin{lstlisting}[caption="Checking zone st10.st11.os3.su"]
$ dig st10.st11.os3.su +trace @8.8.8.8
; <<>> DiG 9.10.4-P2 <<>> st10.st11.os3.su +trace @8.8.8.8
;; global options: +cmd
. 7269 IN NS j.root-servers.net.
. 7269 IN NS b.root-servers.net.
. 7269 IN NS d.root-servers.net.
. 7269 IN NS a.root-servers.net.
. 7269 IN NS f.root-servers.net.
. 7269 IN NS l.root-servers.net.
. 7269 IN NS m.root-servers.net.
. 7269 IN NS c.root-servers.net.
. 7269 IN NS e.root-servers.net.
. 7269 IN NS g.root-servers.net.
. 7269 IN NS h.root-servers.net.
. 7269 IN NS k.root-servers.net.
. 7269 IN NS i.root-servers.net.
. 7269 IN RRSIG NS 8 0 518400 20161002050000 20160919040000 46551 . X7qv1M68ydJjh3gXzdrXAkY9YB8mCLeDkvi8wJSLjtOvHtJJuwiD9/GH GVTU2LOLUYoHfLc+11evhaLxf3gYOlECWRKPmK42OGgdW546GKfzF/uN D/FcLl6ip/HooQcvj5MVES/m4eB87UASIyCKWFuXz097bb6tqVkKKcdu D3M=
;; Received 397 bytes from 8.8.8.8#53(8.8.8.8) in 20 ms
su. 172800 IN NS a.dns.ripn.net.
su. 172800 IN NS e.dns.ripn.net.
su. 172800 IN NS b.dns.ripn.net.
su. 172800 IN NS f.dns.ripn.net.
su. 172800 IN NS d.dns.ripn.net.
su. 86400 IN DS 22111 8 2 3363C13346E0C9C511526EDFFC756C3701FB35A7EFAD5775BB6C13D6 FC2BBA01
su. 86400 IN RRSIG DS 8 1 86400 20161002050000 20160919040000 46551 . Rck2sDYnDHpW+2OD8l+63YU3gI+FS/u1UrFjxaGq4/xt4Rujf3tIh2Op ZuQSp+fyrhDYsZ545FKkmZBFyumjdo53v0qRyaSo0ecMe8XExfI4Oqha 52ZjiBjCObID81qYBL5Kf4sKa0SXhVhZeRHV23UutPH13Y+RynCqlkJG lzY=
;; Received 564 bytes from 192.33.4.12#53(c.root-servers.net) in 66 ms
OS3.SU. 345600 IN NS ns1.masterhost.ru.
OS3.SU. 345600 IN NS ns.masterhost.ru.
OS3.SU. 345600 IN NS ns2.masterhost.ru.
DOVG5BL88TU14V0TNIJJGO18H7JH7C1E.SU. 3600 IN NSEC3 1 1 3 00FF E4CHA6HEO3F4IRI6S4V9HDASKGABKP28 NS SOA RRSIG DNSKEY NSEC3PARAM
DOVG5BL88TU14V0TNIJJGO18H7JH7C1E.SU. 3600 IN RRSIG NSEC3 8 2 3600 20161009075420 20160829085649 24459 su. REbEdkeEWwzzc8ecOA+aW1fs6o80dW3NIEquQ7SgA4s45+LRi296Kxrc DA8YKjJ0EhVeHaV0NbDFvVTBAnRPzFKmasOeVqhVPOMnnIw6lK5dKas1 G/mi3AphsmkFkRZVfIkl/D58SkYp7JqKqxEf07+M29NdfCtOBEr6bZ6h 0E4=
7D5ULGN7M6DVECAC1GAFR3FAST3VE2LQ.SU. 3600 IN NSEC3 1 1 3 00FF 8JMNFT53J2UADTLQBNPVDR70KBUTAI91 NS DS RRSIG
7D5ULGN7M6DVECAC1GAFR3FAST3VE2LQ.SU. 3600 IN RRSIG NSEC3 8 2 3600 20161011153428 20160902135650 24459 su. fVie0EpvZWY84KOpoeJVO10ykVrhLUYqUCW76EuQCOm2ouwoWAAhR7I6 xw1rDTstrTsGAltNUMLe73kzkYSVGc+Qn2HtnigCZNydUVxyWjDfWY9w PS3HSdQPcKL4MJtcZ2Jn3cZIeZ6GDp6jO6FgZLQZ6rfszia0p72bS0TA eI0=
;; Received 604 bytes from 193.232.128.6#53(a.dns.ripn.net) in 17 ms
st11.os3.su. 900 IN NS st10.os3.su.
st11.os3.su. 900 IN NS st11.os3.su.
;; Received 110 bytes from 217.16.22.30#53(ns2.masterhost.ru) in 16 ms
st10.st11.os3.su. 3600 IN A 188.130.155.43
st10.st11.os3.su. 3600 IN NS ns10.os3.su.
;; Received 80 bytes from 188.130.155.43#53(st10.os3.su) in 0 ms
\end{lstlisting}
\section{Setting Up A Slave Server}
\addtocounter{subsection}{5}
% \subsection{}
Slave server is a nameserver that is devided from primary nameserver and can be used in cases when primary nameserver not reponds. Secondary nameserver contains exactly the same information about domain zone as a a primary nameserver.
\subsubsection{How did you set up the slave nameserver?}
To setup slave nameserver you must add zone section to your named.conf file and define this zone section as slave.
\subsubsection{Show the changes to the configuration files that you made.}
To implement master-slave replication of nameservers I changed following lines:
\begin{lstlisting}[caption=named.conf]
zone "st10.st11.os3.su." {
type master;
file "st10.st11.os3.su.zone";
allow-transfer { 188.130.155.44; };
};
zone "st11.st10.os3.su." {
type slave;
file "st11.st10.os3.su.zone";
masters { 188.130.155.44; };
};
\end{lstlisting}
\hl{In master record I added allow transfer to allow secondary nameserver of my zone get information about it. The main idea to not allow anyone other to transfer my zone but secondary nameserver.} After restarting bind will copy zone file from primary nameserver.
\subsection{What happens if the primary nameserver for the subdomain fails?}
Recursor tries to call master server and if it fall recursor calls secondary nameserver. \hl{Secondary nameserver will serve requests before expire time will come.}
\subsection{Considering that the slave nameserver is also the delegating nameserver, explain why this is essentially a bad setup?}
\hl{If master server will fall load for secondary nameserver will increase and it can also be down. After this delegated zone will fall. Fall of one server promotes fail of other.}
\section{Zone Transfer}
\addtocounter{subsection}{7}
\subsection{}
\begin{lstlisting}
$ dig axfr st10.os3.su @127.0.0.1
; <<>> DiG 9.10.4-P2 <<>> axfr st10.os3.su @127.0.0.1
;; global options: +cmd
st10.os3.su. 86400 IN SOA ns10.os3.su. admin.st10.os3.su. 2016082900 360000 3600 3600000 3600
st10.os3.su. 86400 IN NS ns10.os3.su.
st10.os3.su. 86400 IN A 188.130.155.43
st10.os3.su. 86400 IN MX 10 mail.st10.os3.su.
bot.st10.os3.su. 86400 IN CNAME st10.os3.su.
mail.st10.os3.su. 86400 IN A 188.130.155.43
mob.st10.os3.su. 86400 IN CNAME www.st10.os3.su.
ns.st10.os3.su. 86400 IN A 188.130.155.43
ns1.st10.os3.su. 86400 IN CNAME ns.st10.os3.su.
ns2.st10.os3.su. 86400 IN CNAME ns.st10.os3.su.
st11.st10.os3.su. 86400 IN NS ns11.os3.su.
web.st10.os3.su. 86400 IN CNAME www.st10.os3.su.
www.st10.os3.su. 86400 IN A 188.130.155.43
st10.os3.su. 86400 IN SOA ns10.os3.su. admin.st10.os3.su. 2016082900 360000 3600 3600000 3600
;; Query time: 0 msec
;; SERVER: 127.0.0.1#53(127.0.0.1)
;; WHEN: Mon Sep 19 20:23:35 MSK 2016
;; XFR size: 14 records (messages 1, bytes 343)
\end{lstlisting}
\begin{lstlisting}
$ dig axfr st11.st10.os3.su @188.130.155.44
; <<>> DiG 9.10.4-P2 <<>> axfr st11.st10.os3.su @188.130.155.44
;; global options: +cmd
st11.st10.os3.su. 1800 IN SOA ns11.os3.su. admin.st11.os3.su. 2016091201 3600 900 1209600 1800
st11.st10.os3.su. 1800 IN NS ns11.os3.su.
st11.st10.os3.su. 1800 IN A 188.130.155.44
new.st11.st10.os3.su. 1800 IN A 188.130.155.44
www.st11.st10.os3.su. 1800 IN A 188.130.155.44
st11.st10.os3.su. 1800 IN SOA ns11.os3.su. admin.st11.os3.su. 2016091201 3600 900 1209600 1800
;; Query time: 0 msec
;; SERVER: 188.130.155.44#53(188.130.155.44)
;; WHEN: Mon Sep 19 20:25:00 MSK 2016
;; XFR size: 6 records (messages 1, bytes 203)
\end{lstlisting}
\subsection{Describe the steps in the transfer process.}
\begin{enumerate}
\item The secondary server for the zone waits a certain amount of time (specified in the Refresh field of the SOA resource record), and then polls the master server for its SOA.
\item The master server for the zone responds with the SOA resource record.
\item The secondary server for the zone compares the returned serial number to its own serial number. If the serial number sent by the master server for the zone is higher than its own serial number, that means its zone database is out of date, and it sends an AXFR request (a request for a full zone transfer).
\item The master server for the zone sends the full zone database to the secondary server.
\end{enumerate}
\subsection{What information did the slave server receive?}
Secondary nameserver will receive full inormation about zone because we use full zone transfer
\begin{lstlisting}
st10.os3.su. 86400 IN SOA ns10.os3.su. admin.st10.os3.su. 2016082900 360000 3600 3600000 3600
st10.os3.su. 86400 IN NS ns10.os3.su.
st10.os3.su. 86400 IN A 188.130.155.43
st10.os3.su. 86400 IN MX 10 mail.st10.os3.su.
bot.st10.os3.su. 86400 IN CNAME st10.os3.su.
mail.st10.os3.su. 86400 IN A 188.130.155.43
mob.st10.os3.su. 86400 IN CNAME www.st10.os3.su.
ns.st10.os3.su. 86400 IN A 188.130.155.43
ns1.st10.os3.su. 86400 IN CNAME ns.st10.os3.su.
ns2.st10.os3.su. 86400 IN CNAME ns.st10.os3.su.
st11.st10.os3.su. 86400 IN NS ns11.os3.su.
web.st10.os3.su. 86400 IN CNAME www.st10.os3.su.
www.st10.os3.su. 86400 IN A 188.130.155.43
st10.os3.su. 86400 IN SOA ns10.os3.su. admin.st10.os3.su. 2016082900 360000 3600 3600000 3600
\end{lstlisting}
\subsection{Show the changes you had to make to your configuration.}
To configure zone transfer I addeded my companions IP address to parameter \textbf{allow-transfer} at my zone section in named.conf.
\hl{It is not necessary to add notify option to config file because it setted on by default. \\}
\hl{notify If yes (the default), DNS NOTIFY messages are sent when a zone the }\hl{server is authorita- tive for changes, see Section 4.1. The messages are sent to } the servers listed in the zone’s NS records (except the master server identified in the SOA MNAME field), and to any servers listed in the also-notify option.
\begin{lstlisting}
allow-transfer { 188.130.155.44; };
\end{lstlisting}
\subsection{Show how to make BIND/NSD run in a chroot environment.}
\begin{lstlisting}
mkdir -p /chroot/named
mkdir: cannot create directory '/chroot': Permission denied
$ ~ sudo mkdir -p /chroot/named
$ ~ sudo cd /chroot/named
sudo: cd: command not found
$ ~ cd /chroot/named
$ named sudo mkdir -p dev etc/bind/slave var/run
$ named cp -p /etc/named.conf /chroot/named/etc/
cp: cannot create regular file '/chroot/named/etc/named.conf': Permission denied
$ named sudo !!
$ named sudo cp -p /etc/named.conf /chroot/named/etc/
$ named sudo cp -a /var/named/* /chroot/named/etc/bind/
zsh: no matches found: /var/named/*
$ named sudo cp -a /etc/bind/* /chroot/named/etc/bind/
$ named sudo chown -R named:named /chroot/named/etc/bind/slave
$ named sudo chown named:named /chroot/named/var/run
$ named sudo mknod /chroot/named/dev/null c 1 3
$ named sudo mknod /chroot/named/dev/random c 1 8
$ named sudo chmod 666 /chroot/named/dev/{null,random}
$ named cp /etc/localtime /chroot/named/etc/
cp: cannot create regular file '/chroot/named/etc/localtime': Permission denied
$ named sudo cp /etc/localtime /chroot/named/etc/
$ named chown root /chroot
chown: changing ownership of '/chroot': Operation not permitted
$ named sudo chown root /chroot
$ named sudo chmod 700 /chroot
$ named sudo chown named:named /chroot/named
$ named sudo chmod 700 /chroot/named
$ sudo mkdir -p /chroot/named/usr/lib/x86_64-linux-gnu/openssl-1.0.0/engines/
$ sudo cp /usr/lib/x86_64-linux-gnu/openssl-1.0.0/engines/libgost.so /chroot/named/usr/lib/x86_64-linux-gnu/openssl-1.0.0/engines/libgost.so
$ sudo ls -l /proc/$(pgrep named)/root
lrwxrwxrwx 1 named named 0 Sep 21 17:02 /proc/15615/root -> /chroot/named
\end{lstlisting}
\subsection{What do all those parameters in the SOA record do, and what use could fiddling with them have?}
SOA record of st10.os3.su is
\begin{lstlisting}
$TTL 86400
@ IN SOA st10.os3.su. admin.st10.os3.su. (
2016082900; serial
360000 ; refresh every 100 hours
3600 ; retry after 1 hour
3600000 ; expire after 1000 hours
3600 ; negative cache is 1 hour
)
\end{lstlisting}
\begin{enumerate}
\item \textbf{\$TTL 86400} not a required field defines Time To Live
\item \textbf{@ IN SOA st10.os3.su. admin.st10.os3.su.} state of authority record means that this server is authoritative for st10.os3.su. zone and [email protected]. is administrator e-mail.
\item \textbf{2016082900} serial number - is needed to determine that sone file has been changed.
\item \textbf{360000} refresh - means how often secondary server should ask primary server if serial number is changed.
\item \textbf{3600} retry - means how long secondary server should wait to send another request to primary server if previous request was failed.
\item \textbf{3600000} expire - after this quantity of seconds secondary server mustn to use data about this zone.
\end{enumerate}
SOA record allows administrators fine tune parameters of caching of nameservers and they can set high caching to reduce network flood or set minimal caching to debugging.
\end{document} | {
"alphanum_fraction": 0.6982006793,
"avg_line_length": 41.8157389635,
"ext": "tex",
"hexsha": "81ec46bcf2fb554df76c76acac169fe2c68b2b4d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "23233ae0fcea4397c1e040b7856b4a6a712353d8",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "litleleprikon/SNE",
"max_forks_repo_path": "CIA/labs/lab_4/lab_4.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "23233ae0fcea4397c1e040b7856b4a6a712353d8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "litleleprikon/SNE",
"max_issues_repo_path": "CIA/labs/lab_4/lab_4.tex",
"max_line_length": 372,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "23233ae0fcea4397c1e040b7856b4a6a712353d8",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "litleleprikon/SNE",
"max_stars_repo_path": "CIA/labs/lab_4/lab_4.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8180,
"size": 21786
} |
\chapter{Numerical integration}
| {
"alphanum_fraction": 0.7941176471,
"avg_line_length": 8.5,
"ext": "tex",
"hexsha": "bc465f6a2a929bd3c7183647e1eda3e049379dfe",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/computer/integration/00-00-Chapter_name.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/computer/integration/00-00-Chapter_name.tex",
"max_line_length": 31,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/computer/integration/00-00-Chapter_name.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8,
"size": 34
} |
\section*{Acknowledgments}
Many thanks to Bjarne Stroustrup and Gabriel Dos Reis for providing feedback on earlier versions of this paper.
| {
"alphanum_fraction": 0.8214285714,
"avg_line_length": 35,
"ext": "tex",
"hexsha": "8ea556aeb258e338b9a4cbda56ce22658545d51d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9c47bad046a784c954d79e98814e8482658324fa",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jdgarciauc3m/stdcpp",
"max_forks_repo_path": "N4294/ack.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9c47bad046a784c954d79e98814e8482658324fa",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jdgarciauc3m/stdcpp",
"max_issues_repo_path": "N4294/ack.tex",
"max_line_length": 111,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9c47bad046a784c954d79e98814e8482658324fa",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jdgarciauc3m/stdcpp",
"max_stars_repo_path": "N4294/ack.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 31,
"size": 140
} |
\documentclass[12pt]{article}
\usepackage{latexsym,amssymb,amsmath} % for \Box, \mathbb, split, etc.
% \usepackage[]{showkeys} % shows label names
\usepackage{cite} % sorts citation numbers appropriately
\usepackage{path}
\usepackage{url}
\usepackage{verbatim}
\usepackage{graphicx}
\usepackage{array}
\usepackage{multirow}
% horizontal margins: 1.0 + 6.5 + 1.0 = 8.5
\setlength{\oddsidemargin}{0.0in}
\setlength{\textwidth}{6.5in}
% vertical margins: 1.0 + 9.0 + 1.0 = 11.0
\setlength{\topmargin}{0.0in}
\setlength{\headheight}{12pt}
\setlength{\headsep}{13pt}
\setlength{\textheight}{625pt}
\setlength{\footskip}{24pt}
\renewcommand{\textfraction}{0.10}
\renewcommand{\topfraction}{0.85}
\renewcommand{\bottomfraction}{0.85}
\renewcommand{\floatpagefraction}{0.90}
\usepackage{graphicx}
\usepackage{wrapfig}
\usepackage{lscape}
\usepackage{rotating}
\usepackage{epstopdf}
\makeatletter
\setlength{\arraycolsep}{2\p@} % make spaces around "=" in eqnarray smaller
\makeatother
% change equation, table, figure numbers to be counted inside a section:
\numberwithin{equation}{section}
\numberwithin{table}{section}
\numberwithin{figure}{section}
% begin of personal macros
\newcommand{\half}{{\textstyle \frac{1}{2}}}
\newcommand{\eps}{\varepsilon}
\newcommand{\myth}{\vartheta}
\newcommand{\myphi}{\varphi}
\usepackage[utf8]{inputenc}
% Default fixed font does not support bold face
\DeclareFixedFont{\ttb}{T1}{txtt}{bx}{n}{8} % for bold
\DeclareFixedFont{\ttm}{T1}{txtt}{m}{n}{8} % for normal
% Custom colors
\usepackage{color}
\definecolor{deepblue}{rgb}{0,0,0.5}
\definecolor{deepred}{rgb}{0.6,0,0}
\definecolor{deepgreen}{rgb}{0,0.5,0}
\definecolor{backcolour}{rgb}{0.96,0.96,0.96}
\usepackage{listings}
% cpp style for highlighting
\newcommand\cppstyle{\lstset{
language=C++,
basicstyle=\tiny\ttfamily,
keywordstyle=\color{blue}\ttfamily,
stringstyle=\color{red}\ttfamily,
commentstyle=\color{green}\ttfamily,
morecomment=[l][\color{magenta}]{\#},
frame=tb, % Any extra options here
showstringspaces=false, %
backgroundcolor=\color{backcolour}
}}
% cpp environment
\lstnewenvironment{cpp}[1][]
{
\cppstyle
\lstset{#1}
}
{}
% cpp for external files
\newcommand\cppexternal[2][]{{
\cppstyle
\lstinputlisting[#1]{#2}}}
% cpp for inline
\newcommand\cppinline[1]{{\cppstyle\lstinline!#1!}}
\newcommand{\IN}{\mathbb{N}}
\newcommand{\IZ}{\mathbb{Z}}
\newcommand{\IQ}{\mathbb{Q}}
\newcommand{\IR}{\mathbb{R}}
\newcommand{\IC}{\mathbb{C}}
\newcommand{\Real}[1]{\mathrm{Re}\left({#1}\right)}
\newcommand{\Imag}[1]{\mathrm{Im}\left({#1}\right)}
\usepackage{booktabs}
\usepackage{caption}
\usepackage{float}
\usepackage{titlesec}
\usepackage{capt-of}
%dashed line
\usepackage{array}
\usepackage{arydshln}
\setlength\dashlinedash{0.2pt}
\setlength\dashlinegap{1.5pt}
\setlength\arrayrulewidth{0.3pt}
%Widows & Orphans & Penalties
\widowpenalty500
\clubpenalty500
\clubpenalty=9996
\exhyphenpenalty=50 %for line-breaking at an explicit hyphen
\brokenpenalty=4991
\predisplaypenalty=10000
\postdisplaypenalty=1549
\displaywidowpenalty=1602
\floatingpenalty = 20000
\usepackage[T1]{fontenc}
\usepackage{fontspec}
\setmainfont[Scale=0.85, Ligatures={Required,Common,Contextual,TeX}]{TeX Gyre Schola} % Incredible font inside latex
\newcommand{\norm}[2]{\|{#1}\|_{{}_{#2}}}
\newcommand{\abs}[1]{\left|{#1}\right|}
\newcommand{\ip}[2]{\left\langle {#1}, {#2} \right\rangle}
\newcommand{\der}[2]{\frac{\partial {#1}}{\partial {#2}}}
\newcommand{\dder}[2]{\frac{\partial^2 {#1}}{\partial {#2}^2}}
\usepackage{enumitem}
\newcommand{\nn}{\mathbf{n}}
\newcommand{\xx}{\mathbf{x}}
\newcommand{\uu}{\mathbf{u}}
\usepackage{tikz}
\usetikzlibrary{arrows}
\usetikzlibrary{positioning}
\usepackage{titlesec}
\newcommand{\junk}[1]{{}}
\usepackage{sectsty}
\usepackage{xcolor}
\newcommand\MyBox[2]{
\fbox{\lower0.75cm
\vbox to 1.7cm{\vfil
\hbox to 1.7cm{\hfil\parbox{1.4cm}{#1\\#2}\hfil}
\vfil}%
}%
}
\makeatletter
\renewcommand*\env@matrix[1][\arraystretch]{%
\edef\arraystretch{#1}%
\hskip -\arraycolsep
\let\@ifnextchar\new@ifnextchar
\array{*\c@MaxMatrixCols c}}
\makeatother
\makeatletter
\renewcommand*\env@matrix[1][*\c@MaxMatrixCols c]{%
\hskip -\arraycolsep
\let\@ifnextchar\new@ifnextchar
\array{#1}}
\makeatother
\definecolor{darkblue}{rgb}{0,0,0.4}
\usepackage[colorlinks = true,
linkcolor = darkblue,
urlcolor = darkblue,
citecolor = darkblue,
anchorcolor = darkblue]{hyperref}
% set two lengths for the includegraphics commands used to import the plots:
\newlength{\fwtwo} \setlength{\fwtwo}{0.45\textwidth}
% end of personal macros
\begin{document}
\DeclareGraphicsExtensions{.jpg}
\begin{center}
\textsc{\Huge Multi-core Programming} \\[2pt]
\textsc{\Large Assignment 3}\\
\vspace{0.5cm}
Ali Gholami \\[6pt]
Department of Computer Engineering \& Information Technology\\
Amirkabir University of Technology \\[6pt]
\def\UrlFont{\em}
\url{https://aligholamee.github.io}\\
\href{mailto:[email protected]}{\textit{[email protected]}}
\end{center}
\begin{abstract}
One common example of parallel processing is the implementation of the merge sort within a parallel processing environment. In the fully parallel model, you repeatedly split the sublists down to the point where you have single-element lists. You then merge these in parallel back up the processing tree until you obtain the fully merged list at the top of the tree. In this report, we'll analyze the possibility of Merge-sort parallelization using OpenMP tasks and sections.
\end{abstract}
\subparagraph{Keywords.} \textit{Merge-sort, Parallel Recursive Sort, Parallel Sort, Heterogeneous Programming, OpenMP, C Programming, C++ Programming, Parallelization, Multi-thread Programming.}
\section{Merge-sort Parallelization with Tasks}
\subsection{Problem Specification}
This assignment focuses on the parallel sorting problem using OpenMP. The goal is to improve the speed of array sorting by implementing the parallelization in recursive section of the sorting algorithm. We'll conduct the two features of OpenMP called \textit{Tasks} and \textit{Sections} respectively. The initial code implemented for the Merge-sort is given below.
\begin{cpp}
void mergeSort(int *a, int n) {
int m;
if (n < 2)
return;
m = n / 2;
mergeSort(a, m);
mergeSort(a + m, n - m);
merge(a, n, m);
}
\end{cpp}
\subsection{Task Parallelization}
Considering the \textit{Tasks} as units of work, we'll generate them by a single thread. These generated tasks are put into a \textit{Task Queue}. The other threads are waiting for the first task to be available. Each thread takes a task from the queue and starts working on it. This idea is clearly described in the figure 1.1 which is brought from the official slides from Intel.
\subsection{Task Execution}
Tasks are queued and executed whenever possible at the so-called task scheduling points. Under some conditions, the runtime could be allowed to move task between threads, even in the mid of their lifetime. Such tasks are called untied and an untied task might start executing in one thread, then at some scheduling point it might be migrated by the runtime to another thread {\UrlFont[by Hristo Iliev on Stackoverflow]}.
\begin{figure}[!h]\centering
\includegraphics[width=0.7\textwidth]{task_concept.png}
\caption{Illustration of Task Concept in OpenMP.}
\label{figsolplot}
\end{figure}
\subsection{Task Parallelization in Recursive Loops}
Tasks are useful in many scenarios as well as recursive loops. In a high-dimensional space, the Merge-sort algorithm generate large numbers of single numbers at the leaves of the computational tree. These leaves can be merged by different threads. Thus, Parallelization can be done in every depth of the tree. An important criteria that needs to be satisfied is that \textit{depths of the tree are dependent on each other}. Thus, when some threads complete their job on some leaves, they have to wait until other threads are done with other leaves. This can be achieved using the \textit{taskwait} directive.
\subsection{Parallelized Recursive Loop}
Here is the final parallelized code for th recursive loops we just talked about. Note that the \textit{parallel region} is implemented \textbf{outside} of the recursive function.
\begin{cpp}
void mergeSort(int *a, int n) {
int m;
if(n < 2)
return;
// else...
m = n / 2;
#pragma omp task
mergeSort(a, m);
#pragma omp task
mergeSort(a + m, n - m);
#pragma omp taskwait
merge(a, n, m);
}
\end{cpp}
\subsection{Testing \& Evaluation}
Assuming each \textit{integer} as 4 bytes, we'll be filling the table 1.1 using the average time computed after 6 times of running the program. According to this assumption, each dimension can be computed as below:
\begin{itemize}
\item \textbf{100 MB}: $d = \frac{10^8}{4} = 25 * 10^6$
\item \textbf{200 MB}: $d = \frac{2 * 10^8}{4} = 50 * 10^6$
\item \textbf{300 MB}: $d = \frac{3 * 10^8}{4} = 75 * 10^6$
\item \textbf{500 MB}: $d = \frac{5 * 10^8}{4} = 125 * 10^6$
\end{itemize}
\def\arraystretch{1.3}
\begin{table}[!h]
\centering
\begin{tabular}{ |p{3cm}||p{2cm}|p{2cm}|p{2cm}|p{2cm}|p{1.5cm}| }
\hline
\multicolumn{6}{|c|}{Total Array Size} \\
\hline
Num of Threads & 100 MB & 200 MB & 300 MB & 500 MB & Average Speedup\\
\hline
1 & 6.120653 & 12.599889 & 19.009687 & 31.613370 & -\\
2 & 8.140178 & 16.292684 & 24.851261 & 41.500230 & 0.85\\
4 & 4.665155 & 9.426776 & 14.257916 & 23.927245 & 1.9\\
8 & 3.540851 & 7.224303 & 10.782175 & 19.272577 & 1.2\\
\hline
\end{tabular}
\caption{Results of Recursive Loop Parallelization using Tasks.}
\label{figsolplot}
\end{table}
\section{Merge-sort Parallelization with Sections}
Sections are very similar to the tasks in the concept point of view. Unfortunately, sections are not efficient compared to the tasks. The main reason is related to the \textit{time frame} they execute the code.
\subsubsection{The Problem with Sections Block}
Here is a simple but great explanation of the \textit{Sections} bottleneck. Assume we have $n$ threads to run the code ($n > 2$) and there are only $2$ sections defined by the developer. Only $2$ threads start executing the code in each of the \textit{section} block and the other $n-2$ threads should wait at the end of the \textit{sections} parallel block. Recall that the \textit{sections} parallel block contains an \textit{implicit} barrier(shown with *) at the end. Here is the illustration of this phenomenon thanks to the \textit{Hristo Iliev} explanation on the \textit{Stack-overflow}.
\begin{cpp}
[ sections ]
Thread 0: -------< section 1 >---->*------
Thread 1: -------< section 2 >*------
Thread 2: ------------------------>*------
... *
Thread N-1: ---------------------->*------
\end{cpp}
\subsection{Parallelized Recursive Loop}
Here is the final parallelized code for the recursive loops using \textit{sections}.
\begin{cpp}
void mergeSort(int *a, int n) {
int m;
if(n < 2)
return;
// else...
m = n / 2;
#pragma omp parallel sections
{
#pragma omp section
mergeSort(a, m);
#pragma omp section
mergeSort(a + m, n - m);
}
merge(a, n, m);
}
\end{cpp}
\subsection{Testing \& Evaluation}
Assuming each \textit{integer} as 4 bytes, we'll be filling the table 1.1 using the average time computed after 6 times of running the program. According to this assumption, each dimension can be computed as below:
\begin{itemize}
\item \textbf{100 MB}: $d = \frac{10^8}{4} = 25 * 10^6$
\item \textbf{200 MB}: $d = \frac{2 * 10^8}{4} = 50 * 10^6$
\item \textbf{300 MB}: $d = \frac{3 * 10^8}{4} = 75 * 10^6$
\item \textbf{500 MB}: $d = \frac{5 * 10^8}{4} = 125 * 10^6$
\end{itemize}
\def\arraystretch{1.3}
\begin{table}[!h]
\centering
\begin{tabular}{ |p{3cm}||p{2cm}|p{2cm}|p{2cm}|p{2cm}|p{1.5cm}| }
\hline
\multicolumn{6}{|c|}{Total Array Size} \\
\hline
Num of Threads & 100 MB & 200 MB & 300 MB & 500 MB & Average Speedup\\
\hline
1 & 6.639830 & 13.757173 & 20.468429 & 34.843211 & -\\
2 & 3.523107 & 7.219967 & 10.830306 & 18.208799 & 1.8\\
4 & 3.519742 & 7.189632 & 10.897979 & 18.089861 & 1\\
8 & 3.590844 & 7.235916 & 10.896604 & 18.313119 & 1\\
\hline
\end{tabular}
\caption{Results of Recursive Loop Parallelization using Sections.}
\label{figsolplot}
\end{table}
\section{System Specifications}
Please refer to {\UrlFont\href{https://www.dropbox.com/s/chkpoyd5rhhf7z8/ASUS_N56JK.html?dl=0}{this}} link to see the complete system specification. These information are extracted using \textit{CPU-Z}.
\end{document} | {
"alphanum_fraction": 0.7036950603,
"avg_line_length": 37.0461095101,
"ext": "tex",
"hexsha": "80c7326d6e6a566b479b1758569eefe99e41856e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f6ddf919d105fb385a735e598abd82a330b13de0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "aligholamee/Parallax",
"max_forks_repo_path": "docs/assignment-3/MCP_9531504_HW3.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f6ddf919d105fb385a735e598abd82a330b13de0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "aligholamee/Parallax",
"max_issues_repo_path": "docs/assignment-3/MCP_9531504_HW3.tex",
"max_line_length": 608,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "f6ddf919d105fb385a735e598abd82a330b13de0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "gholomia/Parallax",
"max_stars_repo_path": "docs/assignment-3/MCP_9531504_HW3.tex",
"max_stars_repo_stars_event_max_datetime": "2020-03-03T19:01:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-03T19:01:37.000Z",
"num_tokens": 4198,
"size": 12855
} |
% this is both a latex file and a Sicstus Prolog file.
% just compile it in Prolog or Latex it.
% the only difference is that the Latex version comments out the following
% line: (i.e., the latex vesion starts with "%/*" and the prolog has "/*")
/*
\documentclass[11pt,fleqn]{article}
%\usepackage[LY1]{fontenc} % for Y&Y tex
%\usepackage[mtbold,LY1]{mathtime} % for Y&Y tex
\usepackage{times,latexsym,makeidx}
\pagestyle{myheadings}
\markright{ICL interpreter --- Version 0.2.1}
\makeindex
\newcommand{\btt}{\ttfamily\bfseries}
\title{Independent Choice Logic Interpreter\\Version 0.2.1
\\PROLOG CODE\thanks{Copyright \copyright 1998 David Poole. All rights
reserved.}}
\author{David Poole\\
Department of Computer Science,\\
University of British Columbia,\\
2366 Main Mall,\\
Vancouver, B.C. Canada V6T 1Z4\\
Phone: (604) 822-6254\\
Fax: (604) 822-5485\\
Email: {\ttfamily [email protected]}\\
URL: {\ttfamily http://www.cs.ubc.ca/spider/poole}}
\begin{document}
\maketitle
\begin{abstract}
This paper gives the code for a simple independent choice logic
\cite{Poole97b,Poole98a} interpreter. This is based on a naive Prolog
search (rather than some best first, or iterative deepening search).
It includes negation as failure,
controllables (although is a very limited form --- they are
not chosen, but the user can control_icl them), and debugging facilities
(including tracing, traversing proof trees, and automatic detection
of non-disjoint rules).
This is experimental code. It is written to let us explore with
ideas. I make no warranty as to its suitability for anything. Use at
your own risk.
\end{abstract}
\newpage
\section{Syntax} \label{vocabulary}
The following commands can be used in a file or
as a user command to the prolog prompt. Note that all commands end with a period.
\begin{description}
\item[{\btt rule$(R)$.}]
The facts are
given in the form of {\ttfamily rule($R$)}, where $R$ is either a rule of
the form {\ttfamily H :- B} or is just an atom (similar to the use of
$clause$ in many Prolog systems). $B$ is a body made up of {\ttfamily
true}, atoms, conjunctions (using ``\verb|&|''), disjunctions (using
``\verb|;|'') and negations (using ``\verb|~|''). This code assumes
that rules are disjoint.
\item[{\ttfamily $H$ <- $B$.}] is the same as {\ttfamily rule((H :-
B))}. Rules with empty
bodies (facts) must be written as {\ttfamily H <- true.} or as
{\ttfamily rule(H).}
\item[{\btt random$([h_1:p_1,\cdots,h_n:p_n])$.}]declares
the $h_i$ to be pairwise disjoint hypotheses, with $P(h_i)=p_i$.
\item[{\btt random$(X,h,[x_1:p_1,\cdots,x_n:p_n])$.}]
where $h$ is an atom that contains variable $X$, and the $x_1$ are
different terms, declares the atoms $h$ with $X$ replaced by each
$x_i$ to be pairwise disjoint hypotheses with $P(h[x_i])=p_i$.
\item[{\btt controllable$([h_1,\cdots,h_n])$.}]declares
the $h_i$ to be pairwise disjoint controllable variables (i.e., the
agent can choose one of the $h_i$.)
\end{description}
The following commands can be used as
user commands to the prolog prompt:
\begin{description}
\item[{\btt explain$(G,C)$.}] asks to find all explanations of $G$
given controllables $C$.
\item[{\btt thconsult$(\hbox{\em filename})$.}] loads a file called
{\em filename}. This does not erase any definitions in the database.
\item[{\btt tracing(F).}] sets tracing to have status $F$ which is
one of {\ttfamily \{yes,no,duals\}}. {\ttfamily duals} traces only the
duals (i.e., the explanations of the negation of an atom).
\item[{\btt icl_debug(F).}] sets debugging to have status $F$ which is
one of {\ttfamily \{yes,no\}}. This lets you choose which rules get
selected, so you can pinpoint missing cluases (i.e., when an answer
wasn't returned).
\item[{\btt help.}] gives a list of commands.
\item[{\btt how$(G,C,N)$.}]
is used to explain the $N$th explanation of $G$ given $C$. Called
after {\ttfamily explain$(G,C)$.}
\item[{\btt diff$(G,C,N,M)$.}]
prints the differences in the proof tree for the $N$th and $M$th
explanation of $G$ given $C$. Called after {\ttfamily explain$(G,C)$}.
\item[{\btt check$(G)$.}]
checks for disjoint rules in the explanations of $G$.
Called after {\ttfamily explain$(G,C)$.}.
\item[{\btt icl_check\_disj$(G_1,G_2)$.}]
checks for cases in which $G_1$ and $G_2$ are both true.
Called after {\ttfamily explain$(G_1,C)$} and {\ttfamily explain$(G_2,C)$}.
\item[{\btt recap$(G).$}]
recaps the explanations of $G$, with posterior probabilities of each
explanation (i.e., given $G$).
Called after {\ttfamily explain$(G,C)$}.
\item[{\btt recap.}]
gives the prior probabilities of every goal explained.
\item[{\btt icl_check\_undef.}] checks for undefined atoms --- those atoms
in the body of rules for which there is no corresponding definition.
\item[{\btt clear.}] clears the knowledge base. This should be done_icl before reloading clauses.
\end{description}
\section{Code}
\subsection{Operators}
The ``if'' operator is written as ``{\ttfamily <-}''. In bodies,
conjunction is represented as ``{\ttfamily \&}'', disjunction as ``{\ttfamily
;}'', negation as failure as ``\verb|~|'', and inequality as
``\verb|\=|.
\begin{verbatim} */
:- module(icl_int,[]).
:- use_module(library(logicmoo_common)).
:- op(1060, xfy, '&').
:- op(900,fy, ~).
:- op(700,xfx, \=).
:- op(1150, xfx, <- ).
/* \end{verbatim}
The following declare the predicates that are used to store the
object-level knowledge base.
\begin{verbatim} */
:- dynamic rul_icl/2.
:- dynamic control_icl/1.
:- dynamic hypothesis_icl/2.
:- dynamic nogood/2.
/* \end{verbatim}
\subsection{Clearing the knowledge base}
\index{clear}
\begin{verbatim} */
clear :-
icl_int:(
retractall(rul_icl(_,_)),
retractall(control_icl(_)),
retractall(hypothesis_icl(_,_)),
retractall(nogood(_,_)),
retractall(done_icl(_,_,_,_,_))).
show :-
icl_int:(listing(rul_icl(_,_)),
listing(control_icl(_)),
listing(hypothesis_icl(_,_)),
listing(nogood(_,_)),
listing(done_icl(_,_,_,_,_))).
/* \end{verbatim}
\subsection{Declaring Rules}
$rule(R)$ where $R$ is the form of a Prolog rule. This asserts the
rule produced.
{\ttfamily h <- b} is the same as $rule(h,b)$.
\index{rule}
\begin{verbatim} */
% (H <- B) :- rule((H :- B)).
rule(H) :- expand_rule_term(H,HH), H \=@=HH, !, rule(HH).
rule((H :- B)) :- !,
assert_if_new(icl_int:rul_icl(H,B)).
rule(H) :-
assert_if_new(icl_int:rul_icl(H,true)).
expand_rule_term(H,HH):- expand_term(H,HH), !.
/* \end{verbatim}
$lemma(G)$ is here to make the program upwardly compatible with the
version that includes lemmata. The declaration is ignored here.
\index{rule}
\begin{verbatim} */
lemma(_).
/* \end{verbatim}
\subsection{Declaring Hypotheses}
\[\hbox{\ttfamily random}([h_1:p_1,\cdots,h_n:p_n]).\]
declares the $h_i$ to be pairwise disjoint hypotheses, with $P(h_i)=p_i$.
It should be the case that
\[\sum_{i=1}^n p_i = 1\]
This asserts $hypothesis(h_i,p_i)$ for each $i$ and asserts
$ngood(h_i,h_j)$ for each $i \neq j$.
\index{random}
\begin{verbatim} */
:- op( 500, xfx, : ).
:- dynamic hypothesis_icl/2.
random(L) :-
probsum(L,T),
randomt(L,T).
probsum([],0).
probsum([_:P|R],P1) :-
probsum(R,P0),
P1 is P0+P.
randomt([],_).
randomt([H:P|R],T) :-
NP is P/T,
assertz(hypothesis_icl(H,NP)),
make_hyp_disjoint(H,R),
randomt(R,T).
time_arg_icl(P, N):-
functor(P, F,_),
assert_if_new(is_time_arg(F,N)).
/* \end{verbatim}
\index{make\_disjoint}
\begin{verbatim} */
make_hyp_disjoint(_,[]).
make_hyp_disjoint(H,[H2 : _ | R]) :-
asserta(nogood(H,H2)),
asserta(nogood(H2,H)),
make_hyp_disjoint(H,R).
/* \end{verbatim}
\[\hbox{\ttfamily random}(X,h,[x_1:p_1,\cdots,x_n:p_n]).\]
where $X$ is a variable and $h$ is an atom that contains $X$ free, and the $x_1$ are different terms, is an abbreviation for
\[\hbox{\ttfamily random}([h[X \leftarrow x_1]:p_1,\cdots,h[X \leftarrow x_n]:p_n]).\]
Where $h[X \leftarrow x_1]$ is the atom $h$ with $X$ replaced by $x_i$.
\index{random}
\begin{verbatim} */
random(X,H,L) :-
repvar(X,X1,H,H1),
asserta((nogood(H,H1) :- dif(X,X1))),
probsum(L,T),
random_each(X,H,L,T).
random_each(_,_,[],_).
random_each(X,H,[X:P|_],T) :-
NP is P/T,
asserta(hypothesis_icl(H,NP)),
fail.
random_each(X,H,[_|R],T) :-
random_each(X,H,R,T).
/* \end{verbatim}
\[\hbox{\ttfamily controllable}([h_1,\cdots,h_n]).\]
declares the $h_i$ to be pairwise disjoint controllable hypotheses.
This asserts $control(h_i)$ for each $i$ and asserts
$ngood(h_i,h_j)$ for each $i \neq j$.
\index{disjoint}
\begin{verbatim} */
:- op( 500, xfx, : ).
controllable([]).
controllable([H|R]) :-
asserta(control_icl(H)),
make_cont_disjoint(H,R),
controllable(R).
/* \end{verbatim}
\index{make\_cont\_disjoint}
\begin{verbatim} */
make_cont_disjoint(_,[]).
make_cont_disjoint(H,[H2 | R]) :-
asserta(nogood(H,H2)),
asserta(nogood(H2,H)),
make_cont_disjoint(H,R).
/* \end{verbatim}
\section{The Internals of the Interpreter}
\subsection{Meta-interpreter}
The meta-interpreter is implemented using the relation:
\[prove(G,C0,C1,R0,R1,P0,P1,T)\]
where
\begin{description}
\item $G$ is the goal to be proved.
\item $R1-R0$ is a difference list of random assumptions to prove $G$.
\item $C1-C0$ is a difference list of controllable assumptions to prove $G$.
\item $P0$ is the probability of $R0$, $P1$ is the probability of $R1$.
\item $T$ is the returned proof tree.
\end{description}
The first rules defining $prove$ are the special purpose rules
for commands that are defined in the system.
\index{prove}
\begin{verbatim} */
prove(H,C0,C1,R0,R1,P0,P1,AT):-
prove1(H,C0,C1,R0,R1,P0,P1,AT)
*-> true
; (tracn(yes),writeln_icl(['Failed: ',H,' assuming: ',R0,' prob=',P0]), fail).
prove1(ans(A),C,C,R,R,P,P,ans(A)) :- !,
ans(A,C,R, _R , P, _T).
prove1(report_cp,C,C,R,R,P,P,_) :- !,
wdmsg(missing(report_cp(C,R,P))).
prove1(report_evidence,C,C,R,R,P,P,_) :- !,
wdmsg(missing(report_evidence(C,R,P))).
/* \end{verbatim}
The remaining rules are the real definition
\begin{verbatim} */
prove1(true,C,C,R,R,P,P,true) :- !.
prove1((A & B),C0,C2,R0,R2,P0,P2,(AT & BT)) :- !,
prove(A,C0,C1,R0,R1,P0,P1,AT),
prove(B,C1,C2,R1,R2,P1,P2,BT).
prove1((A ; _),C0,C2,R0,R2,P0,P2,AT) :-
prove(A,C0,C2,R0,R2,P0,P2,AT).
prove1((_ ; B),C0,C2,R0,R2,P0,P2,BT) :-
prove(B,C0,C2,R0,R2,P0,P2,BT).
prove1((~ G),C0,C0,R0,R2,P0,P3,if(G,not)) :-
findall(R2,prove(G,C0,_,R0,R2,P0,_,_), ExpG),
duals(ExpG,R0,[exp(R0,P0)],ADs),
make_disjoint(ADs,MDs),
( (tracn(yes); tracn(duals)) ->
writeln_icl([' Proved ~ ',G ,', assuming ',R0,'.']) ,
writeln_icl([' explanations of ',G,': ',ExpG]),
writeln_icl([' duals: ',ADs]),
writeln_icl([' disjointed duals: ',MDs])
; true),!,
member(exp(R2,P3),MDs).
prove1(H,_,_,R,_,P,_,_) :-
tracn(yes),
writeln_icl(['Proving: ',H,' assuming: ',R,' prob=',P]),
fail.
prove1(H,C,C,R,R,P,P,if(H,assumed)) :-
hypothesis_icl(H,_),
member(H,R),
( tracn(yes) -> writeln_icl([' Already assumed: ',H ,'.']) ; true).
prove1(H,C,C,R,[H|R],P0,P1,if(H,assumed)) :-
hypothesis_icl(H,PH),
\+ member(H,R),
PH > 0,
good(H,R),
P1 is P0*PH,
( tracn(yes) -> writeln_icl([' Assuming: ',H ,'.']) ; true).
prove1(H,C,C,R,R,P,P,if(H,given)) :-
control_icl(H),member(H,C),!,
( tracn(yes) -> writeln_icl([' Given: ',H,'.']) ; true).
prove1(H,C,C,R,R,P,P,if(H,builtin)) :-
builtin(H), call(H).
prove1(A \= B,C,C,R,R,P,P,if(A \= B,builtin)) :-
dif(A,B).
prove1(G,C0,C1,R0,R1,P0,P1,if(G,BT)) :-
rul_icl(G,B),
( tracn(yes) -> writeln_icl([' Using rule: ',G ,' <- ',B,'.']) ; true),
( debgn(yes) -> deb(G,B) ; true),
tprove(G,B,C0,C1,R0,R1,P0,P1,BT),
( tracn(yes) -> writeln_icl([' Proved: ',G ,' assuming ',R1,'.']) ; true).
tprove(_,B,C0,C1,R0,R1,P0,P1,BT) :-
prove(B,C0,C1,R0,R1,P0,P1,BT).
tprove(G,_,_,_,R,_,P,_,_) :-
tracn(yes),
writeln_icl([' Retrying: ',G,' assuming: ',R,' prob=',P]),
fail.
/* \end{verbatim}
We allow many built in relations to be evaluated directly by Prolog.
\index{builtin}
\begin{verbatim} */
:- dynamic builtin/1.
%:- multifile builtin/1.
builtin((_ is _)).
builtin((_ < _)).
builtin((_ > _)).
builtin((_ =< _)).
builtin((_ >= _)).
builtin((_ = _)).
builtin((user_help)).
/* \end{verbatim}
\begin{verbatim} */
deb(G,B) :-
writeln_icl([' Use rule: ',G ,' <- ',B,'? [y, n or off]']),
read_icl(A),
( A = y -> true ;
A = n -> fail ;
A = off -> icl_debug(off) ;
true -> writeln_icl(['y= use this rule, n= use another rule, off=debugging off']) ,
deb(G,B)
).
/* \end{verbatim}
\subsection{Negation}
\[duals(Es,R0,D0,D1)\]
is true if $Es$ is a list of composite choices (all of whose tail is
$R0$), and $D1-D2$ is a list of $exp(R1,P1)$ such that $R1-R0$ is a
hitting set of negations of $Es$.
\index{dual}
\begin{verbatim} */
duals([],_,D,D).
duals([S|L],R0,D0,D2) :-
split_each(S,R0,D0,[],D1),
duals(L,R0,D1,D2).
/* \end{verbatim}
\[split\_each(S,R0,D0,D,D1)\]
is true if $S$ is a composite choice (with tail $R0$), and
$D2$ is $D$ together with the hitting set of negations of $D0$.
\index{split\_each}
\begin{verbatim} */
split_each(R0,R0,_,D0,D0) :- !.
split_each([A|R],R0,D0,PDs,D2) :-
negs(A,NA),
add_to_each(A,NA,D0,PDs,D1),
split_each(R,R0,D0,D1,D2).
/* \end{verbatim}
\[add\_to\_each(S,R0,D0,D,D1)\]
is true if $S$ is a composite choice (with tail $R0$), and
$D2$ is $D$ together with the hitting set of negations of $D0$.
\index{add\_to\_each}
\begin{verbatim} */
add_to_each(_,_,[],D,D).
add_to_each(A,NA,[exp(E,_)|T],D0,D1) :-
member(A,E),!,
add_to_each(A,NA,T,D0,D1).
add_to_each(A,NA,[exp(E,PE)|T],D0,D2) :-
bad(A,E),!,
insert_exp(exp(E,PE),D0,D1),
add_to_each(A,NA,T,D1,D2).
add_to_each(A,NA,[B|T],D0,D2) :-
ins_negs(NA,B,D0,D1),
add_to_each(A,NA,T,D1,D2).
/* \end{verbatim}
\[ins\_negs(NA,B,D0,D1)\]
is true if adding the elements of $NA$ to composite choice $B$, and
adding these to $D0$ produces $D1$.
\index{ins\_negs}
\begin{verbatim} */
ins_negs([],_,D0,D0).
ins_negs([N|NA],exp(E,PE),D,D2) :-
hypothesis_icl(N,PN),
P is PN * PE,
insert_exp(exp([N|E],P),D,D1),
ins_negs(NA,exp(E,PE),D1,D2).
/* \end{verbatim}
\[insert\_exp(E,L0,L1)\]
is true if inserting composite choice $E$ into list $L0$
produces list $L1$. Subsumed elements are removed.
\index{insert\_exp}
\begin{verbatim} */
insert_exp(exp(_,0.0),L,L) :-!.
insert_exp(E,[],[E]) :- !.
insert_exp(exp(E,_),D,D) :-
member(exp(E1,_),D),
icl_subset(E1,E),!.
insert_exp(exp(E,P),[exp(E1,_)|D0],D1) :-
icl_subset(E,E1),!,
insert_exp(exp(E,P),D0,D1).
insert_exp(exp(E,P),[E1|D0],[E1|D1]) :-
insert_exp(exp(E,P),D0,D1).
/* \end{verbatim}
\subsection{Making Composite Choices Disjoint}
\[make\_disjoint(L,SL)\]
is true if $L$ and $SL$ are lists of the form $exp(R,P)$, such that
$L1$ is a icl_subset of $L$ containing minimal elements with minimal
$R$-values.
\index{ins\_neg}
\begin{verbatim} */
make_disjoint([],[]).
make_disjoint([exp(R,P)|L],L2) :-
member(exp(R1,_),L),
\+ incompatible(R,R1),!,
member(E,R1), \+ member(E,R),!,
negs(E,NE),
split(exp(R,P),NE,E,L,L1),
make_disjoint(L1,L2).
make_disjoint([E|L1],[E|L2]) :-
make_disjoint(L1,L2).
split(exp(R,P),[],E,L,L1) :-
hypothesis_icl(E,PE),
P1 is P*PE,
insert_exp1(exp([E|R],P1),L,L1).
split(exp(R,P),[E1|LE],E,L,L2) :-
hypothesis_icl(E1,PE),
P1 is P*PE,
split(exp(R,P),LE,E,L,L1),
insert_exp1(exp([E1|R],P1),L1,L2).
negs(E,NE) :-
findall(N,nogood(E,N),NE).
insert_exp1(exp(_,0.0),L,L) :-!.
insert_exp1(exp(E,_),D,D) :-
member(exp(E1,_),D),
icl_subset(E1,E),!.
insert_exp1(exp(E,P),D,[exp(E,P)|D]).
/* \end{verbatim}
\subsection{Nogoods}
We assume three relations for handling $nogoods$:
\[good(A,L)\]
fails if $[A|L]$ has a icl_subset that has been declared nogood. We can
assume that no icl_subset of $L$ is nogood (this allows us to more
efficiently index nogoods).
\[allgood(L)\]
fails if $L$ has a icl_subset that has been declared nogood.
\index{good}
\begin{verbatim} */
allgood([]).
allgood([H|T]) :-
good(H,T),
allgood(T).
good(A,T) :-
\+ ( makeground((A,T)), bad(A,T)).
bad(A,[B|_]) :-
nogood(A,B).
bad(A,[_|R]) :-
bad(A,R).
/* \end{verbatim}
\subsection{Explaining}
To find an explanation for a subgoal $G$ given controllables $C$ and
building on random assumables $R$, we do
an $explain(G,C,R)$. Both $R$ and $C$ are optional.
\index{explain}
\begin{verbatim} */
:- dynamic done_icl/4.
explain(G) :-
explain(G,[],[]).
explain(G,C) :-
explain(G,C,[]).
explain(G,C,R) :-
statistics(runtime,_),
ex(G,C,R).
example_query(G):- call(G).
:- dynamic false/6.
/* \end{verbatim}
$ex(G,C,R)$ tries to prove $G$ with controllables $C$ and random
assumptions $R$. It repeatedly proves $G$, calling $ans$ for each
successful proof.
\index{ex}
\index{ans}
\begin{verbatim} */
:- dynamic done_icl/5.
ex(G,C,R0) :-
prove(G,C,_,R0,R,1,P,T),
ans(G,C,R0,R,P,T), fail.
ex(G,C,R) :-
done_icl(G,C,R,_,Pr),
append(C,R,CR),
writeln_icl([nl,'Prob( ',G,' | ',CR,' ) = ',Pr]),
statistics(runtime,[_,Time]),
writeln_icl(['Runtime: ',Time,' msec.']).
ex(G,C,R) :-
\+ done_icl(G,C,R,_,_),
append(C,R,CR),
writeln_icl([nl,'Prob( ',G,' | ',CR,' ) = ',0.0]),
statistics(runtime,[_,Time]),
writeln_icl(['Runtime: ',Time,' msec.']).
ans(G,C,R0,R,P,T) :-
allgood(R),
( retract(done_icl(G,C,R0,Done,DC))
-> true
; Done=[], DC=0),
DC1 is DC+P,
asserta(done_icl(G,C,R0,[expl(R,P,T)|Done],DC1)),
length(Done,L),
append(C,R0,Given),
findall(F,rul_icl(F, true),Init),
writeln_icl([nl,'***** Explanation #',L,' Prior = ',P,'\n',
(of:-G),('given':-Given),init:-Init,(result:-R)]),
writeln_icl([]).
/* \end{verbatim}
$recap$ is used to give a list of all conditional probabilities computed.
\index{ans}
\begin{verbatim} */
recap :-
done_icl(G,C,R,_,Pr),
append(C,R,CR),
writeln_icl(['Prob( ',G,' | ',CR,' ) = ',Pr]),
fail.
recap.
recap(G) :-
recap(G,_,_).
recap(G,C) :-
recap(G,C,_).
recap(G,C,R) :-
done_icl(G,C,R,Expls,Pr),
append(C,R,CR),
writeln_icl(['Prob( ',G,' | ',CR,' ) = ',Pr]),
writeln_icl(['Explanations:']),
recap_each(_,Expls,Pr).
recap_each(0,[],_).
recap_each(N,[expl(R,P,_)|L],Pr) :-
recap_each(N0,L,Pr),
N is N0+1,
PP is P/Pr,
writeln_icl([N0,': ',R,' Post Prob=',PP]).
/* \end{verbatim}
\section{Debugging}
\subsection{Help}
\begin{verbatim} */
:- rule(h <- user_help).
user_help :- writeln_icl([
'rule(R).','
asserts either a rule of the form H :- B or an atom.','
H <- B. ','
is the same as rule((H :- B)). ','
Rules with empty bodies (facts) must be written as H <- true. or as rule(H).','
random([h1:p1,...,hn:pn]).','
declares the hi to be pairwise disjoint hypotheses, with P(hi)=pi. ','
random(X,h,[x1:p1,...,xn:pn]).','
declares h[X/xi] to be pairwise disjoint hypotheses with P(h[X/xi])=pi.','
controllable([h1,...,hn]).','
declares the hi to be pairwise disjoint controllable variables.','
explain(G,C). ','
finds explanations of G given list of controlling values C.','
how(G,C,R,N).','
is used to explain the Nth explanation of G given controllables C,','
and randoms R.','
diff(G,C,N,M) ','
prints difference in the proof tree for the Nth and Mth explanation','
of G given C.','
icl_check(G,C).','
checks for disjoint rules in the explanations of G given C.','
recap(G). ','
recaps the explanations of G, with posterior probabilities (given G).','
recap. ','
gives the prior probabilities of everything explained.','
thcons(filename). ','
loads a file called filename. ','
tracing(F). ','
sets tracing to have status F which is one of {yes,no,duals}.','
icl_debug(F). ','
sets debugging to have status F which is one of {yes,no}.','
check_undef.','
checks for undefined atoms in the body of rules.','
clear.','
clears the knowedge base. Do this before reloading.','
Reconsulting a program will not remove old clauses and declarations.','
help.','
print this message.']).
/* \end{verbatim}
\subsection{Tracing}
Tracing is used to trace the details of the search tree. It is ugly
except for very small programs.
\index{tracing}
\begin{verbatim} */
:- dynamic tracn/1.
tracing(V) :-
member(V,[yes,no,duals]),!,
retractall(tracn(_)),
asserta(tracn(V)).
tracing(V) :-
member(V,[on,y]),!,
retractall(tracn(_)),
asserta(tracn(yes)).
tracing(V) :-
member(V,[off,n]),!,
retractall(tracn(_)),
asserta(tracn(no)).
tracing(_) :-
writeln_icl(['Argument to tracing should be in {yes,no,duals}.']),
!,
fail.
tracn(no).
% user_help :- unix(shell('more help')).
/* \end{verbatim}
\subsection{Debugging}
Debugging is useful for determining why a program failed.
\begin{verbatim} */
:- dynamic debgn/1.
debgn(no).
icl_debug(V) :-
member(V,[yes,on,y]),!,
icl_int:retractall(debgn(_)),
icl_int:assert(debgn(yes)).
icl_debug(V) :-
member(V,[no,off,n]),!,
icl_int:retractall(debgn(_)),
icl_int:assert(debgn(no)).
icl_debug(_) :-
writeln_icl(['Argument to icl_debug should be in {yes,no}.']), !, fail.
/* \end{verbatim}
\subsection{How was a goal proved?}
The programs in this section are used to explore how proofs were generated.
\[how(G,C,R,N)\]
is used to explain the $N$th explanation of $G$ given controllables
$C$, and randoms $R$. $R$ and $C$ are optional.
\index{how}
\begin{verbatim} */
how(G,N) :-
how(G,[],[],N).
how(G,C,N) :-
how(G,C,[],N).
how(G,C,R,N) :-
tree(G,C,R,N,T),
traverse(T).
/* \end{verbatim}
\[tree(G,C,R,N,NT)\]
is true if $NT$ is the proof tree for the $N$th explanation of $G$ given $C\wedge R$.
\index{tree}
\begin{verbatim} */
tree(G,C,N,NT):-
tree(G,C,[],N,NT).
tree(G,C,R,N,NT) :-
done_icl(G,C,R,Done,_),
nthT(Done,N,NT).
nthT([expl(_,_,T) |R],N,T) :-
length(R,N),!.
nthT([_|R],N,T) :-
nthT(R,N,T).
/* \end{verbatim}
\[traverse(T)\]
is true if T is a tree being traversed.
\index{traverse}
\begin{verbatim} */
traverse(if(H,true)) :-
writeln_icl([H,' is a fact']).
traverse(if(H,builtin)) :-
writeln_icl([H,' is built-in.']).
traverse(if(H,assumed)) :-
writeln_icl([H,' is assumed.']).
traverse(if(H,given)) :-
writeln_icl([H,' is a given controllable.']).
traverse(if(H,not)) :-
writeln_icl([~ H,' is a negation - I cannot trace it. Sorry.']).
traverse(if(H,B)) :-
B \== true,
B \== builtin,
B \== assumed,
B \== given,
writeln_icl([H,' :-']),
printbody(B,1,Max),
read_icl(Comm),
interpretcommand(Comm,B,Max,if(H,B)).
/* \end{verbatim}
\[printbody(B,N)\]
is true if B is a body to be printed and N is the
count of atoms before B was called (this assumes that ``{\ttfamily \&}'' is
left-associative).
\index{printbody}
\begin{verbatim} */
printbody((A&B),N,N2) :-
printbody(A,N,N),
N1 is N+1,
printbody(B,N1,N2).
printbody(if(H,not),N,N) :-!,
writeln_icl([' ',N,': ~ ',H]).
printbody(if(H,_),N,N) :-
writeln_icl([' ',N,': ',H]).
printbody(true,N,N):-!,
writeln_icl([' ',N,': true ']).
printbody(builtin,N,N):-!,
writeln_icl([' ',N,': built in ']).
printbody(assumed,N,N):-!,
writeln_icl([' ',N,': assumed ']).
printbody(given,N,N):-!,
writeln_icl([' ',N,': given ']).
/* \end{verbatim}
\[interpretcommand(Comm,B)\]
interprets the command $Comm$ on body $B$.
\index{interpretcommand}
\begin{verbatim} */
interpretcommand(N,B,Max,G) :-
integer(N),
N > 0,
N =< Max,
nth(B,N,E),
traverse(E),
traverse(G).
interpretcommand(up,_,_,_).
interpretcommand(N,_,Max,G) :-
integer(N),
(N < 1 ; N > Max),
writeln_icl(['Number out of range: ',N]),
traverse(G).
interpretcommand(help,_,_,G) :-
writeln_icl(['Give either a number, up or exit. End command with a Period.']),
traverse(G).
interpretcommand(C,_,_,G) :-
\+ integer(C),
C \== up,
C \== help,
C \== exit,
C \== end_of_file,
writeln_icl(['Illegal Command: ',C,' Type "help." for help.']),
traverse(G).
% nth(S,N,E) is true if E is the N-th element of conjunction S
nth(A,1,A) :-
\+ (A = (_&_)).
nth((A&_),1,A).
nth((_&B),N,E) :-
N>1,
N1 is N-1,
nth(B,N1,E).
/* \end{verbatim}
\subsection{Diff}
\[diff(G,C,N,M)\] \index{diff}
prints the differences in the proof tree for the $N$th and $M$th
explanation of $G$ given $C$.
\begin{verbatim} */
diff(G,C,N,M) :-
tree(G,C,N,NT),
tree(G,C,M,MT),
diffT(NT,MT).
/* \end{verbatim}
\[diffT(T1,T2)\]
prints the differences in the proof trees $T1$ and $T2$.
\begin{verbatim} */
diffT(T,T) :-
writeln_icl(['Trees are identical']).
diffT(if(H,B1),if(H,B2)) :-
immdiff(B1,B2),!,
writeln_icl([H,' :-']),
printbody(B1,1,N1),
writeln_icl([H,' :-']),
printbody(B2,N1,_).
diffT(if(H,B1),if(H,B2)) :-
diffT(B1,B2).
diffT((X&Y),(X&Z)) :- !,
diffT(Y,Z).
diffT((X&_),(Y&_)) :-
diffT(X,Y).
immdiff((A&_),(B&_)) :-
immdiff(A,B).
immdiff((_&A),(_&B)) :-
immdiff(A,B).
immdiff((_&_),if(_,_)).
immdiff(if(_,_),(_&_)).
immdiff(if(A,_),if(B,_)) :-
\+ A = B.
immdiff(if(_,_),B) :-
atomic(B).
immdiff(A,if(_,_)) :-
atomic(A).
/* \end{verbatim}
\subsection{Check}
\[icl_check(G,C,R)\]\index{icl_check}
checks the explanations of $G$ given controllables $C$ and randoms $R$
for rules which violate the disjoint
assumptions assumption. The two rules which are not disjoint are returned.
\begin{verbatim} */
icl_check :-
icl_check(_,_,_).
icl_check(G) :-
icl_check(G,_,_).
icl_check(G,C) :-
icl_check(G,C,_).
icl_check(G,C,R) :-
done_icl(G,C,R,Done,_),
check_done(Done).
check_done([expl(R1,_,T1)|D]) :-
memberR(expl(R2,_,T2),D,DR),
\+ incompatible(R1,R2),
length(D,LD),
length(DR,L2),
icl_union(R1,R2,R),
writeln_icl(['Non-disjoint rules ',LD,' & ',L2,' assuming ',R]),
diffT(T1,T2).
check_done([_|D]) :-
check_done(D).
/* \end{verbatim}
\subsection{Check Disjoint Explanations}
\[icl_check\_disj(G0,G1)\] \index{icl_check\_disj}
checks whether explanations of $G0$ and $G1$ are disjoint. This is useful when $G0$ and $G1$ should be incompatible.
\begin{verbatim} */
check_disj(G0,G1):-
check_disj(G0,G1,_,_).
check_disj(G0,G1,C,R):-
done_icl(G0,C,R,D0,_),
done_icl(G1,C,R,D1,_),
memberR(expl(R0,_,_),D0,LD0),
memberR(expl(R1,_,_),D1,LD1),
\+ incompatible(R0,R1),
length(LD0,L0),
length(LD1,L1),
append(C,R,CR),
writeln_icl(['Explanation ',L0,' of ',G0,' and ',L1,' of ',G1,
', given ',CR,' are compatible assuming:']),
icl_union(R0,R1,R),
writeln_icl([R]).
incompatible(R1,R2) :-
member(A1,R1),
member(A2,R2),
nogood(A1,A2).
/* \end{verbatim}
\subsection{Checking for undefined atoms}
$check_undef$ searches through the knowledge base looking for a rule
containing an atom in the body which doesn't have a corresponding
definition (i.e., a clause with it at the head, or an atomic choice).
\begin{verbatim} */
check_undef :-
forall(rul_icl(H,B),
forall(body_elt_undefined(B,H,B), true)), !.
body_elt_undefined(true,_,_) :- !,fail.
body_elt_undefined((A&_),H,B) :-
body_elt_undefined(A,H,B).
body_elt_undefined((_&A),H,B) :- !,
body_elt_undefined(A,H,B).
body_elt_undefined((~ A),H,B) :- !,
body_elt_undefined(A,H,B).
body_elt_undefined((A;_),H,B) :-
body_elt_undefined(A,H,B).
body_elt_undefined((_;A),H,B) :- !,
body_elt_undefined(A,H,B).
body_elt_undefined(call(A),H,B) :- !,
body_elt_undefined(A,H,B).
body_elt_undefined(_ \= _,_,_) :- !, fail.
%body_elt_undefined(A,_,_) :-
% askabl(A),!,fail.
%body_elt_undefined(A,_,_) :-
% assumabl(A),!,fail.
body_elt_undefined(A,_,_) :-
builtin(A),!,fail.
body_elt_undefined(A,_,_) :-
hypothesis_icl(A,_),!,fail.
body_elt_undefined(A,_,_) :-
control_icl(A),!,fail.
body_elt_undefined(A,_,_) :-
rul_icl(A,_),!,fail.
body_elt_undefined(A,H,B) :-
writeln_icl(['Warning: no clauses for ',A,' in rule ',(H <- B),'.']),!,fail.
/* \end{verbatim}
\section{Miscellaneous}
\subsection{File Handling}
To consult a probabilistic Horn abduction file, you should do a,
\begin{verse}
{\bf thcons}\em (filename).
\end{verse}
The following is the definition of {\em thcons}. Basically we just
keep reading the file and executing the commands in it until we stop.
This does not clear any previous database. If you reconsult a file you
will get multiple instances of clauses and this will undoubtedly screw you up.
\index{thcons}
\begin{verbatim} */
thcons(File) :-
current_input(OldFile),
open(File,read,Input),
set_input(Input),
read_icl(T),
read_all_icl(T),
set_input(OldFile),
writeln_icl(['ICL theory ',File,' consulted.']),!.
/* \end{verbatim}
\index{read\_all}
\begin{verbatim} */
read_all_icl(end_of_file) :- !.
read_all_icl(T) :-
once(on_icl_read(T);format('Warning: ~w failed~n',[T])),
read_icl(T2),
read_all_icl(T2).
on_icl_read(:- T):- !, on_icl_read(T).
on_icl_read(time_arg(F,A)):- !, on_icl_read(time_arg_icl(F,A)),!.
on_icl_read('<-'(H,B)):- !, on_icl_read(rule((H :- B))),!.
on_icl_read(T):- must_or_rtrace(icl_int:call(T)),!.
read_icl(T):- icl_int:read_term(current_input,T,[module(icl_int)]).
/* \end{verbatim}
\subsection{Utility Functions}
\subsubsection{List Predicates}
$append(X,Y,Z)$ is the normal append function
\index{append}
\begin{verbatim} */
/*append([],L,L).
append([H|X],Y,[H|Z]) :-
append(X,Y,Z).*/
/* \end{verbatim}
\index{icl_union}
\begin{verbatim} */
icl_union([],L,L).
icl_union([H|X],Y,Z) :-
member(H,Y),!,
icl_union(X,Y,Z).
icl_union([H|X],Y,[H|Z]) :-
icl_union(X,Y,Z).
/* \end{verbatim}
\index{member}
\begin{verbatim} */
%member(A,[A|_]).
%member(A,[_|R]) :-
% member(A,R).
/* \end{verbatim}
\index{member}
\begin{verbatim} */
memberR(A,[A|R],R).
memberR(A,[_|T],R) :-
memberR(A,T,R).
/* \end{verbatim}
\index{icl_subset}
\begin{verbatim} */
icl_subset([],_).
icl_subset([H|T],L) :-
member(H,L),
icl_subset(T,L).
/* \end{verbatim}
\subsubsection{Term Management}
\index{makeground}
\begin{verbatim} */
makeground(T) :-
numbervars(T,0,_,[attvars(bind)]).
/* \end{verbatim}
$repvar(X,X1,T,T1)$ replaces each occurrence of $X$ in $T$ by $X1$ forming $T1$.
\index{copy}
\begin{verbatim} */
repvar(X,X1,Y,X1) :- X==Y, !.
repvar(_,_,Y,Y) :- var(Y), !.
repvar(_,_,Y,Y) :- ground(Y), !.
repvar(X,X1,[H|T],[H1|T1]) :- !,
repvar(X,X1,H,H1),
repvar(X,X1,T,T1).
repvar(X,X1,T,T1) :-
T =.. L,
repvar(X,X1,L,L1),
T1 =.. L1.
/* \end{verbatim}
\subsubsection{Output}
\index{writeln_icl}
\begin{verbatim} */
writeln_icl([]) :- nl.
writeln_icl([H|T]) :-
writeln_icl_1(H),
writeln_icl(T).
writeln_icl_1(L):- L == nl, !, format('~N~n',[]).
writeln_icl_1(L):- \+ compound(L), !, write(L).
writeln_icl_1(call(L)):- !, call(L).
writeln_icl_1(L):- portray_clause(L).
:- fixup_exports.
/* \end{verbatim}
\bibliographystyle{plain}
%\bibliography{/ai/poole/bib/reason.string,/ai/poole/bib/poole_local_reason,/ai/poole/bib/reason}
%\bibliography{../../../book/bib/string,../../../book/bib/poole,../../../book/bib/reason}
\begin{thebibliography}{1}
\bibitem{Poole97b}
D.~Poole.
\newblock The independent choice logic for modelling multiple agents under
uncertainty.
\newblock {\em Artificial Intelligence}, 94:7--56, 1997.
\newblock special issue on economic principles of multi-agent systems.
\bibitem{Poole98a}
D.~Poole.
\newblock Abducing through negation as failure: stable models in the
{Independent Choice Logic}.
\newblock {\em Journal of Logic Programming}, to appear, 1998.
\end{thebibliography}
\printindex
\end{document}
*/
| {
"alphanum_fraction": 0.6382473149,
"avg_line_length": 28.615593835,
"ext": "tex",
"hexsha": "53267bc1813eb8d31c83f0869d8aa32e5fc4f0c7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9f3f541aa6a521638b6a2061032d7b0920fd1488",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "TeamSPoon/planner_api",
"max_forks_repo_path": "prolog/code_icl/icl_int.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "9f3f541aa6a521638b6a2061032d7b0920fd1488",
"max_issues_repo_issues_event_max_datetime": "2018-08-17T22:13:27.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-08-17T22:13:27.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "TeamSPoon/logicmoo_planners",
"max_issues_repo_path": "prolog/code_icl/icl_int.tex",
"max_line_length": 124,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "9f3f541aa6a521638b6a2061032d7b0920fd1488",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "TeamSPoon/pddl_valoptic_api",
"max_stars_repo_path": "prolog/code_icl/icl_int.tex",
"max_stars_repo_stars_event_max_datetime": "2020-09-29T07:28:25.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-08-17T22:13:36.000Z",
"num_tokens": 10676,
"size": 31563
} |
In order to give a better sense of the approach to reliability analysis and
optimization presented in \sref{chaos-reliability-analysis} and
\sref{chaos-optimization}, we consider a concrete application, meaning that we
specify the uncertain parameters and discuss the accompanying computations. This
application is also utilized for the quantitative evaluation of our technique
presented in the next section, \sref{chaos-optimization-results}.
\subsection{\problemtitle}
Assume that the structure of the reliability model $R(\cdot | \vg)$ of the
system at hand is the one given in \eref{reliability-model} where each
individual reliability function $R_i(\cdot | \vg_i)$ is the one shown in
\eref{weibull-reliability} with its own parameters $\scale_i$ and $\shape_i$.
During each iteration, the temperature of processing element~$i$ exhibits \nk{i}
cycles. Each cycle generally has different characteristics and hence causes a
different amount of damage to the processing element. This aspect is accounted
for by adjusting $\scale_i$ as shown in \eref{thermal-cycling-scale}. The shape
parameter $\shape_i$ is known to be indifferent to temperature \cite{chang2006}.
For simplicity, assume that $\shape_i$ does not depend on process parameters
either, and that $\shape_i = \shape$ for $i = \range{1}{\np}$.
Under the above assumptions, \rref{weibull-homogeneity} applies, and the
lifetime $\life: \Omega \to \real$ of the system has a Weibull distribution as
follows:
\[
\life | (\scale, \shape) \sim \mathrm{Weibull}(\scale, \shape)
\]
where $\scale$ is the one given in \rref{weibull-homogeneity} combined with
\eref{thermal-cycling-scale}. Even though the reliability model has two
parameters, only one of them is uncertain to the designer, namely $\scale$.
Therefore, we treat the model as if it was parameterized only by $\scale$. The
shape parameter $\shape$ is assumed to be implicitly given.
In the case of reliability analysis under process variation without any
accompanying exploration of the design space, one can proceed to constructing a
\ac{PC} expansion of $\scale$. Having obtained this lightweight surrogate, the
reliability of the system can be studied from various perspectives. In the
current scenario, however, the quantity of interest \g is the one given in
\eref{chaos-optimization-quantity}, since it allows for evaluating the objective
function and constraints defined in \eref{chaos-optimization-objective} and
\eref{chaos-optimization-constraints}, respectively. In
\eref{chaos-optimization-quantity}, the component denoted by \life stands for
the parameterization of the reliability model; consequently, it is $\scale$ in
the illustrative application developed in this section.
Let us now turn our attention to the uncertain parameters \vu of the problem
being addressed. We focus on two crucial process parameters: the effective
channel length and gate oxide thickness. Each processing element is then
assigned two random variables corresponding to the two process parameters, which
means that $\nu = 2 \np$ in the current example; see also \sref{chaos-problem}.
\begin{remark}
The variability in a process parameter at a spatial location can be modeled as a
composition of several parts---such as inter-lot, inter-wafer, inter-die, and
intra-die variations---which is demonstrated in
\sref{chaos-transient-application}. In this section, we illustrate a different
approach. From a mathematical perspective, it is sufficient to consider only one
random variable per location with an adequate distribution and correlations with
respect to the other locations.
\end{remark}
Based on \sref{chaos-formulation}, the parameters \vu are assumed to be given as
a set of marginal distributions and a correlation matrix denoted by
$\set{F_i}_{i = 1}^\nu$ and $\correlation{\vu}$, respectively. Note that the
number of distinct marginals is only two, since \np components of \vu correspond
to the same process parameter.
Both process parameters, the effective channel length and gate oxide thickness,
correspond to Euclidean distances; they take values on bounded intervals of the
positive half of the real line. Consequently, similarly to
\sref{chaos-transient-application}, we model the two process parameters using
the four-parameter family of beta distributions shown in
\eref{beta-distribution}. Without loss of generality, the parameters are assumed
to be independent of each other, and the correlations between those elements of
\vu that correspond to the same process parameter are assumed to be given by the
correlation function shown in \eref{bayes-correlation}.
The process parameters manifest themselves in the calculations associated with
the power model shown in \eref{chaos-power-model-bulk} through static power.
Analogously to \sref{chaos-transient-application}, the modeling here is based on
\up{SPICE} simulations of a series of \up{CMOS} invertors. The invertors are
taken from the 45-nm open cell library by NanGate \cite{nangate} and configured
according to the 45-nm \up{PTM} \up{HP} model \cite{ptm}. The simulations are
performed on a fine-grained and sufficiently broad three-dimensional grid
comprising the effective channel length, gate oxide thickness, and temperature;
the results are tabulated. An interpolation algorithm is subsequently employed
whenever static power is to be evaluated at a particular point within the range
of the grid. The output of this model is scaled up to account for about 40\% of
the total power consumption \cite{liu2007}. Regarding temperature, the thermal
\up{RC} circuit utilized for dynamic steady-state analysis is constructed by
virtue of HotSpot \cite{skadron2003} as described in \sref{temperature-model}.
At this point, the two outputs of Stage~1 are now specified.
\subsection{Probability Transformation}
At Stage~2 in \fref{chaos-overview}, the uncertain parameters \vu are
transformed into a vector of independent random variables \vz via a suitable
transformation $\transform$. Specifically, we use the one given in
\eref{probability-transformation}, which also includes model order reduction.
Unlike \sref{chaos-transient-application}, in this section, we let \vz obey the
standard Gaussian distribution and, therefore, tailor $\transform$ accordingly;
see \xref{probability-transformation}.
\subsection{Surrogate Construction}
Since the auxiliary variables $\vz = (\z_i)_{i = 1}^\nz$ are Gaussian, the
polynomial basis considered at Stage~3 is to be composed of Hermite polynomials,
which is the exact scenario described in \xref{polynomial-chaos}. The variables
also tell us how to approach numerical integration needed for evaluation of the
coefficients of \ac{PC} expansions: since we are interested in integrals with
respect to the standard Gaussian measure, Gauss--Hermite quadratures
\cite{maitre2010} are worth considering. These quadratures are especially
efficient, since they belong to the class of Gaussian quadratures and thus
inherit their properties; see \xref{numerical-integration}.
Lastly, let us illustrate the Hermite basis. In the case of working with only
one standard Gaussian variable ($\nz = 1$), a second-level \ac{PC} expansion
($\lc = 2$) of a three-dimensional quantity of interest \vg is as follows:
\[
\chaos{1}{2}{\vg}
= \hat{\vg}_{(0)} \psi_{(0)}
+ \hat{\vg}_{(1)} \psi_{(1)}
+ \hat{\vg}_{(2)} \psi_{(2)}
\]
where $\set{\hat{\vg}_{\vi}} \subset \real^3$,
\begin{align*}
& \psi_{(0)}(\vz) = 1, \\
& \psi_{(1)}(\vz) = \z_1, \text{ and} \\
& \psi_{(2)}(\vz) = \z_1^2 - 1.
\end{align*}
At Stage~4, the expansion is post-processed as described in
\sref{chaos-optimization}.
| {
"alphanum_fraction": 0.7832131148,
"avg_line_length": 56.4814814815,
"ext": "tex",
"hexsha": "e99f311050dff10e93558ec47c98125975d34d16",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "95a7e2ee7664b94156906322610555e36e53cfe0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "IvanUkhov/thesis",
"max_forks_repo_path": "include/uncertainty/process/development/optimization-application.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "95a7e2ee7664b94156906322610555e36e53cfe0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "IvanUkhov/thesis",
"max_issues_repo_path": "include/uncertainty/process/development/optimization-application.tex",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "95a7e2ee7664b94156906322610555e36e53cfe0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "IvanUkhov/thesis",
"max_stars_repo_path": "include/uncertainty/process/development/optimization-application.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1870,
"size": 7625
} |
%--------------------------------------------------------
\subsection{Conventions: {\tt conventions.xml}}
\label{sec:conventions}
%--------------------------------------------------------
The conventions to associate with a set market quotes in the construction of termstructures are specified in another xml
file which we will refer to as {\tt conventions.xml} in the following though the file name can be chosen by the user.
Each separate set of conventions is stored in an XML node. The type of conventions that a node holds is determined by
the node name. Every node has an \lstinline!Id! node that gives a unique identifier for the convention set. The
following sections describe the type of conventions that can be created and the allowed values.
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Zero Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{Zero} is used to store conventions for direct zero rate quotes. Direct zero rate quotes can be
given with an explicit maturity date or with a tenor and a set of conventions from which the maturity date is
deduced. The node for a zero rate quote with an explicit maturity date is shown in Listing
\ref{lst:zero_conventions_date}. The node for a tenor based zero rate is shown in Listing
\ref{lst:zero_conventions_tenor}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<Zero>
<Id> </Id>
<TenorBased>False</TenorBased>
<DayCounter> </DayCounter>
<CompoundingFrequency> </CompoundingFrequency>
<Compounding> </Compounding>
</Zero>
\end{minted}
\caption{Zero conventions}
\label{lst:zero_conventions_date}
\end{listing}
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<Zero>
<Id> </Id>
<TenorBased>True</TenorBased>
<DayCounter> </DayCounter>
<CompoundingFrequency> </CompoundingFrequency>
<Compounding> </Compounding>
<TenorCalendar> </TenorCalendar>
<SpotLag> </SpotLag>
<SpotCalendar> </SpotCalendar>
<RollConvention> </RollConvention>
<EOM> </EOM>
</Zero>
\end{minted}
\caption{Zero conventions, tenor based}
\label{lst:zero_conventions_tenor}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item €TenorBased€: True if the conventions are for a tenor based zero quote and False if they are
for a zero quote with an explicit maturity date.
\item DayCounter: The day count basis associated with the zero rate quote (for choices see section
\ref{sec:allowable_values})
\item CompoundingFrequency: The frequency of compounding (Choices are {\em Once, Annual, Semiannual, Quarterly,
Bimonthly, Monthly, Weekly, Daily}).
\item Compounding: The type of compounding for the zero rate (Choices are {\em Simple, Compounded, Continuous,
SimpleThenCompounded}).
\item TenorCalendar: The calendar used to advance from the spot date to the maturity date by the zero rate tenor (for
choices see section \ref{sec:allowable_values}).
\item SpotLag [Optional]: The number of business days to advance from the valuation date before applying the zero rate
tenor. If not provided, this defaults to 0.
\item SpotCalendar [Optional]: The calendar to use for business days when applying the \lstinline!SpotLag!. If not
provided, it defaults to a calendar with no holidays.
\item RollConvention [Optional]: The roll convention to use when applying the zero rate tenor. If not provided, it
defaults to Following (Choices are {\em Backward, Forward, Zero, ThirdWednesday, Twentieth, TwentiethIMM, CDS}).
\item EOM [Optional]: Whether or not to use the end of month convention when applying the zero rate tenor. If not
provided, it defaults to false.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Deposit Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{Deposit} is used to store conventions for deposit or index fixing quotes. The conventions can be
index based, in which case all necessary conventions are deduced from a given index family. The structure of the index
based node is shown in Listing \ref{lst:deposit_conventions_index}. Alternatively, all the necessary conventions can be
given explicitly without reference to an index family. The structure of this node is shown in Listing
\ref{lst:deposit_conventions_explicit}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<Deposit>
<Id> </Id>
<IndexBased>True</IndexBased>
<Index> </Index>
</Deposit>
\end{minted}
\caption{Deposit conventions}
\label{lst:deposit_conventions_index}
\end{listing}
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<Deposit>
<Id> </Id>
<IndexBased>False</IndexBased>
<Calendar> </Calendar>
<Convention> </Convention>
<EOM> </EOM>
<DayCounter> </DayCounter>
</Deposit>
\end{minted}
\caption{Deposit conventions}
\label{lst:deposit_conventions_explicit}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item IndexBased: \emph{True} if the deposit conventions are index based and \emph{False} if the conventions are given
explicitly.
\item Index: The index family from which to imply the conventions for the deposit quote. For example, this could be
EUR-EURIBOR, USD-LIBOR etc.
\item Calendar: The business day calendar for the deposit quote.
\item Convention: The roll convention for the deposit quote.
\item EOM: \emph{True} if the end of month roll convention is to be used for the deposit quote and \emph{False} if not.
\item DayCounter: The day count basis associated with the deposit quote.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Future Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{Future} is used to store conventions for IMM Future quotes. The structure of this node is shown
in Listing \ref{lst:future_conventions}. The only piece of information needed is the underlying money market or
overnight future index name and this is given in the \lstinline!Index! node. For example, this could be EUR-EURIBOR-3M,
USD-LIBOR-3M, USD-SOFR, GBP-SONIA etc.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<Future>
<Id> </Id>
<Index> </Index>
</Future>
\end{minted}
\caption{Future conventions}
\label{lst:future_conventions}
\end{listing}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{FRA Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{FRA} is used to store conventions for FRA quotes. The structure of this node is shown in Listing
\ref{lst:fra_conventions}. The only piece of information needed is the underlying index name and this is given in the
\lstinline!Index! node. For example, this could be EUR-EURIBOR-6M, CHF-LIBOR-6M etc.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<FRA>
<Id> </Id>
<Index> </Index>
</FRA>
\end{minted}
\caption{FRA conventions}
\label{lst:fra_conventions}
\end{listing}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{OIS Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{OIS} is used to store conventions for Overnight Indexed Swap (OIS) quotes. The structure of this
node is shown in Listing \ref{lst:ois_conventions}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<OIS>
<Id> </Id>
<SpotLag> </SpotLag>
<Index> </Index>
<FixedDayCounter> </FixedDayCounter>
<PaymentLag> </PaymentLag>
<EOM> </EOM>
<FixedFrequency> </FixedFrequency>
<FixedConvention> </FixedConvention>
<FixedPaymentConvention> </FixedPaymentConvention>
<Rule> </Rule>
<PaymentCalendar> </PaymentCalendar>
</OIS>
\end{minted}
\caption{OIS conventions}
\label{lst:ois_conventions}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item SpotLag: The number of business days until the start of the OIS.
\item Index: The name of the overnight index. For example, this could be EUR-EONIA, USD-FedFunds etc.
\item FixedDayCounter: The day count basis on the fixed leg of the OIS.
\item PaymentLag [Optional]: The payment lag, as a number of business days, on both legs. If not provided, this defaults
to 0.
\item EOM [Optional]: \emph{True} if the end of month roll convention is to be used when generating the OIS schedule and
\emph{False} if not. If not provided, this defaults to \emph{False}.
\item FixedFrequency [Optional]: The frequency of payments on the fixed leg. If not provided, this defaults to
\emph{Annual}.
\item FixedConvention [Optional]: The roll convention for accruals on the fixed leg. If not provided, this defaults to
\emph{Following}.
\item FixedPaymentConvention [Optional]: The roll convention for payments on the fixed leg. If not provided, this
defaults to \emph{Following}.
\item Rule [Optional]: The rule used for generating the OIS dates schedule i.e.\ \emph{Backward} or \emph{Forward}. If
not provided, this defaults to \emph{Backward}.
\item PaymentCalendar [Optional]: The business day calendar used for determining coupon payment dates.
If not specified, this defaults to the fixing calendar defined on the overnight index.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Swap Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{Swap} is used to store conventions for vanilla interest rate swap (IRS) quotes. The structure of
this node is shown in Listing \ref{lst:swap_conventions}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<Swap>
<Id> </Id>
<FixedCalendar> </FixedCalendar>
<FixedFrequency> </FixedFrequency>
<FixedConvention> </FixedConvention>
<FixedDayCounter> </FixedDayCounter>
<Index> </Index>
<FloatFrequency> </FloatFrequency>
<SubPeriodsCouponType> </SubPeriodsCouponType>
</Swap>
\end{minted}
\caption{Swap conventions}
\label{lst:swap_conventions}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item FixedCalendar: The business day calendar on the fixed leg.
\item FixedFrequency: The frequency of payments on the fixed leg.
\item FixedConvention: The roll convention on the fixed leg.
\item FixedDayCounter: The day count basis on the fixed leg.
\item Index: The Ibor index on the floating leg.
\item FloatFrequency [Optional]: The frequency of payments on the floating leg, to be used if the frequency is different to the tenor of the index (e.g. CAD swaps for BA-3M have a 6M or 1Y payment frequency with a Compounding coupon)
\item SubPeriodsCouponType [Optional]: Defines how coupon rates should be calculated when the float frequency is different to that of the index. Possible values are "Compounding" and "Averaging".
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Average OIS Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{AverageOIS} is used to store conventions for average OIS quotes. An average OIS is a swap where a
fixed rate is swapped against a daily averaged overnight index plus a spread. The structure of this node is shown in
Listing \ref{lst:average_ois_conventions}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<AverageOIS>
<Id> </Id>
<SpotLag> </SpotLag>
<FixedTenor> </FixedTenor>
<FixedDayCounter> </FixedDayCounter>
<FixedCalendar> </FixedCalendar>
<FixedConvention> </FixedConvention>
<FixedPaymentConvention> </FixedPaymentConvention>
<Index> </Index>
<OnTenor> </OnTenor>
<RateCutoff> </RateCutoff>
</AverageOIS>
\end{minted}
\caption{Average OIS conventions}
\label{lst:average_ois_conventions}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item SpotLag: Number of business days until the start of the average OIS.
\item FixedTenor: The frequency of payments on the fixed leg.
\item FixedDayCounter: The day count basis on the fixed leg.
\item FixedCalendar: The business day calendar on the fixed leg.
\item FixedFrequency: The frequency of payments on the fixed leg.
\item FixedConvention: The roll convention for accruals on the fixed leg.
\item FixedPaymentConvention: The roll convention for payments on the fixed leg.
\item Index: The name of the overnight index.
\item OnTenor: The frequency of payments on the overnight leg.
\item RateCutoff: The rate cut-off on the overnight leg. Generally, the overnight fixing is only observed up to a
certain number of days before the payment date and the last observed rate is applied for the remaining days in the
period. This rate cut-off gives the number of days e.g.\ 2 for Fed Funds average OIS.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Tenor Basis Swap Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{TenorBasisSwap} is used to store conventions for tenor basis swap quotes. The structure of this
node is shown in Listing \ref{lst:tenor_basis_conventions}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<TenorBasisSwap>
<Id> </Id>
<LongIndex> </LongIndex>
<ShortIndex> </ShortIndex>
<ShortPayTenor> </ShortPayTenor>
<SpreadOnShort> </SpreadOnShort>
<IncludeSpread> </IncludeSpread>
<SubPeriodsCouponType> </SubPeriodsCouponType>
</TenorBasisSwap>
\end{minted}
\caption{Tenor basis swap conventions}
\label{lst:tenor_basis_conventions}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item LongIndex: The name of the long tenor Ibor index.
\item ShortIndex: The name of the short tenor Ibor index.
\item ShortPayTenor [Optional]: The frequency of payments on the short tenor Ibor leg. This is usually the same as the
short tenor Ibor index's tenor. However, it can also be longer e.g.\ USD tenor basis swaps where the short tenor Ibor
index is compounded and paid on the same frequency as the long tenor Ibor index. If not provided, this defaults to the
short tenor Ibor index's tenor.
\item SpreadOnShort [Optional]: \emph{True} if the tenor basis swap quote has the spread on the short tenor Ibor index
leg and \emph{False} if not. If not provided, this defaults to \emph{True}.
\item IncludeSpread [Optional]: \emph{True} if the tenor basis swap spread is to be included when compounding is
performed on the short tenor Ibor index leg and \emph{False} if not. If not provided, this defaults to \emph{False}.
\item SubPeriodsCouponType [Optional]: This field can have the value \emph{Compounding} or \emph{Averaging} and it only
applies when the frequency of payments on the short tenor Ibor leg does not equal the short tenor Ibor index's tenor. If
\emph{Compounding} is specified, then the short tenor Ibor index is compounded and paid on the frequency specified in
the \lstinline!ShortPayTenor! node. If \emph{Averaging} is specified, then the short tenor Ibor index is averaged and
paid on the frequency specified in the \lstinline!ShortPayTenor! node. If not provided, this defaults to
\emph{Compounding}.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Tenor Basis Two Swap Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{TenorBasisTwoSwap} is used to store conventions for tenor basis swap quotes where the quote is
the spread between the fair fixed rate on two swaps against Ibor indices of different tenors. We call the swap against
the Ibor index of longer tenor the long swap and the remaining swap the short swap. The structure of the tenor basis two
swap conventions node is shown in Listing \ref{lst:tenor_basis_two_conventions}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<TenorBasisTwoSwap>
<Id> </Id>
<Calendar> </Calendar>
<LongFixedFrequency> </LongFixedFrequency>
<LongFixedConvention> </LongFixedConvention>
<LongFixedDayCounter> </LongFixedDayCounter>
<LongIndex> </LongIndex>
<ShortFixedFrequency> </ShortFixedFrequency>
<ShortFixedConvention> </ShortFixedConvention>
<ShortFixedDayCounter> </ShortFixedDayCounter>
<ShortIndex> </ShortIndex>
<LongMinusShort> </LongMinusShort>
</TenorBasisTwoSwap>
\end{minted}
\caption{Tenor basis two swap conventions}
\label{lst:tenor_basis_two_conventions}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item Calendar: The business day calendar on both swaps.
\item LongFixedFrequency: The frequency of payments on the fixed leg of the long swap.
\item LongFixedConvention: The roll convention on the fixed leg of the long swap.
\item LongFixedDayCounter: The day count basis on the fixed leg of the long swap.
\item LongIndex: The Ibor index on the floating leg of the long swap.
\item ShortFixedFrequency: The frequency of payments on the fixed leg of the short swap.
\item ShortFixedConvention: The roll convention on the fixed leg of the short swap.
\item ShortFixedDayCounter: The day count basis on the fixed leg of the short swap.
\item ShortIndex: The Ibor index on the floating leg of the short swap.
\item LongMinusShort [Optional]: \emph{True} if the basis swap spread is to be interpreted as the fair rate on the long
swap minus the fair rate on the short swap and \emph{False} if the basis swap spread is to be interpreted as the fair
rate on the short swap minus the fair rate on the long swap. If not provided, it defaults to \emph{True}.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{FX Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{FX} is used to store conventions for FX spot and forward quotes for a given currency pair. The
structure of this node is shown in Listing \ref{lst:fx_conventions}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<FX>
<Id> </Id>
<SpotDays> </SpotDays>
<SourceCurrency> </SourceCurrency>
<TargetCurrency> </TargetCurrency>
<PointsFactor> </PointsFactor>
<AdvanceCalendar> </AdvanceCalendar>
<SpotRelative> </SpotRelative>
<AdditionalSettleCalendar> </AdditionalSettleCalendar>
</FX>
\end{minted}
\caption{FX conventions}
\label{lst:fx_conventions}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item SpotDays: The number of business days to spot for the currency pair.
\item SourceCurrency: The source currency of the currency pair. The FX quote is assumed to give the number of units of
target currency per unit of source currency.
\item TargetCurrency: The target currency of the currency pair.
\item PointsFactor: The number by which a points quote for the currency pair should be divided before adding it to the
spot quote to obtain the forward rate.
\item AdvanceCalendar [Optional]: The business day calendar(s) used for advancing dates for both spot and forwards. If
not provided, it defaults to a calendar with no holidays.
\item SpotRelative [Optional]: \emph{True} if the forward tenor is to be interpreted as being relative to the spot date.
\emph{False} if the forward tenor is to be interpreted as being relative to the valuation date. If not provided, it
defaults to \emph{True}.
\item AdditionalSettleCalendar [Optional]: In some cases, when the spot date is calculated using the values in the
\lstinline!AdvanceCalendar! and \lstinline!SpotDays! nodes, it is checked against an additional settlement calendar(s)
and if it is not a good business day then it is moved forward until it is a good business day on both the additional
settlement calendar(s) and the AdvanceCalendar. This additional settlement calendar(s) can be specified here. If not
provided, it defaults to a calendar with no holidays.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Cross Currency Basis Swap Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{CrossCurrencyBasis} is used to store conventions for cross currency basis swap quotes. The
structure of this node is shown in Listing \ref{lst:xccy_basis_conventions}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<CrossCurrencyBasis>
<Id> </Id>
<SettlementDays> </SettlementDays>
<SettlementCalendar> </SettlementCalendar>
<RollConvention> </RollConvention>
<FlatIndex> </FlatIndex>
<SpreadIndex> </SpreadIndex>
<EOM> </EOM>
<IsResettable> </IsResettable>
<FlatIndexIsResettable> </FlatIndexIsResettable>>
</CrossCurrencyBasis>
\end{minted}
\caption{Cross currency basis swap conventions}
\label{lst:xccy_basis_conventions}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item SettlementDays: The number of business days to the start of the cross currency basis swap.
\item SettlementCalendar: The business day calendar(s) for both legs and to arrive at the settlement date using the
SettlementDays above.
\item RollConvention: The roll convention for both legs.
\item FlatIndex: The name of the index on the leg that does not have the cross currency basis spread.
\item SpreadIndex: The name of the index on the leg that has the cross currency basis spread.
\item EOM [Optional]: \emph{True} if the end of month convention is to be used when generating the schedule on both legs, and \emph{False} if not. If not provided, it defaults to \emph{False}.
\item IsResettable [Optional]: \emph{True} if the swap is mark-to-market resetting, and \emph{False} otherwise. If not provided, it defaults to \emph{False}.
\item FlatIndexIsResettable [Optional]: \emph{True} if it is the notional on the leg paying the flat index that resets, and \emph{False} otherwise. If not provided, it defaults to \emph{True}.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Inflation Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{InflationSwap} is used to store conventions for zero or year on year inflation swap quotes. The
structure of this node is shown in Listing \ref{lst:inflation_conventions}
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<InflationSwap>
<Id>EUHICPXT_INFLATIONSWAP</Id>
<FixCalendar>TARGET</FixCalendar>
<FixConvention>MF</FixConvention>
<DayCounter>30/360</DayCounter>
<Index>EUHICPXT</Index>
<Interpolated>false</Interpolated>
<ObservationLag>3M</ObservationLag>
<AdjustInflationObservationDates>false</AdjustInflationObservationDates>
<InflationCalendar>TARGET</InflationCalendar>
<InflationConvention>MF</InflationConvention>
</InflationSwap>
\end{minted}
\caption{Inflation swap conventions}
\label{lst:inflation_conventions}
\end{listing}
The meaning of the elements is as follows:
\begin{itemize}
\item FixCalendar: The calendar for the fixed rate leg of the swap.
\item FixConvention: The rolling convention for the fixed rate leg of the swap.
\item DayCounter: The payoff / coupon day counter (applied to both legs).
\item Index: The underlying inflation index.
\item Interpolated: Flag indicating interpolation of the index in the swap's payoff calculation.
\item ObservationLag: The index observation lag to be applied.
\item AdjustInfObsDates: Flag indicating whether index observation dates should be adjusted or not.
\item InfCalendar: The calendar for the inflation leg of the swap.
\item InfConvention: The rolling convention for the inflation leg of the swap.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{CMS Spread Option Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{CmsSpreadOption} is used to store the conventions.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<CmsSpreadOption>
<Id>EUR-CMS-10Y-2Y-CONVENTION</Id>
<ForwardStart>0M</ForwardStart>
<SpotDays>2D</SpotDays>
<SwapTenor>3M</SwapTenor>
<FixingDays>2</FixingDays>
<Calendar>TARGET</Calendar>
<DayCounter>A360</DayCounter>
<RollConvention>MF</RollConvention>
</CmsSpreadOption>
\end{minted}
\caption{Inflation swap conventions}
\label{lst:cms_spread_option_conventions}
\end{listing}
The meaning of the elements is as follows:
\begin{itemize}
\item ForwardStart: The calendar for the fixed rate leg of the swap.
\item SpotDays: The number of business days to spot for the CMS Spread Index.
\item SwapTenor: The frequency of payments on the CMS Spread leg.
\item FixingDays: The number of fixing days.
\item Calendar: The calendar for the CMS Spread leg.
\item DayCounter: The day counter for the CMS Spread leg.
\item RollConvention: The rolling convention for the CMS Spread Leg.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Ibor Index Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{IborIndex} is used to store conventions for Ibor indices. This can be used to define new Ibor
indices without the need of adding them to the C++ code, or also to override the conventions of existing Ibor indices.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<IborIndex>
<Id>EUR-EURIBOR_ACT365-3M</Id>
<FixingCalendar>TARGET</FixingCalendar>
<DayCounter>A365F</DayCounter>
<SettlementDays>2</SettlementDays>
<BusinessDayConvention>MF</BusinessDayConvention>
<EndOfMonth>true</EndOfMonth>
</IborIndex>
\end{minted}
\caption{Ibor index convention}
\label{lst:ibor_index_conventions}
\end{listing}
The meaning of the elements is as follows:
\begin{itemize}
\item Id: The index name. This must be of the form ``CCY-NAME-TENOR'' with a currency ``CCY'', an index name ``NAME''
and a string ``TENOR'' representing a period. The name should not be ``GENERIC'', since this is reserved.
\item FixingCalendar: The fixing calendar of the index.
\item DayCounter: The day count convention used by the index.
\item SettlementDays: The settlement days for the index. This must be a non-negative whole number.
\item BusinessDayConvention: The business day convention used by the index.
\item EndOfMonth: A flag indicating whether the index employs the end of month convention.
\end{itemize}
Notice that if another convention depends on an Ibor index convention (because it contains the Ibor index name defined
in the latter convention), the Ibor index convention must appear before the convention that depends on it in the
convention input file.
Also notice that customised indices can not be used in cap / floor volatility surface configurations.
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Overnight Index Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{OvernightIndex} is used to store conventions for Overnight indices. This can be used to define
new Overnight indices without the need of adding them to the C++ code, or also to override the conventions of existing
Overnight indices.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<OvernightIndex>
<Id>EUR-ESTER</Id>
<FixingCalendar>TARGET</FixingCalendar>
<DayCounter>A360</DayCounter>
<SettlementDays>0</SettlementDays>
</OvernightIndex>
\end{minted}
\caption{Overnight index convention}
\label{lst:overnight_index_conventions}
\end{listing}
The meaning of the elements is as follows:
\begin{itemize}
\item Id: The index name. This must be of the form ``CCY-NAME'' with a currency ``CCY'' and an index name ``NAME''. The
name should not be ``GENERIC'', since this is reserved.
\item FixingCalendar: The fixing calendar of the index.
\item DayCounter: The day count convention used by the index.
\item SettlementDays: The settlement days for the index. This must be a non-negative whole number.
\end{itemize}
Notice that if another convention depends on an Overnight index convention (because it contains the Overnight index name
defined in the latter convention), the Overnight index convention must appear before the convention that depends on it
in the convention input file.
Also notice that customised indices can not be used in cap / floor volatility surface configurations.
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Swap Index Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{SwapIndex} is used to store conventions for Swap indices (also known as ``CMS'' indices).
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<SwapIndex>
<Id>EUR-CMS-2Y</Id>
<Conventions>EUR-EURIBOR-6M-SWAP</Conventions>
</SwapIndex>
\end{minted}
\caption{Swap index convention}
\label{lst:swap_index_conventions}
\end{listing}
The meaning of the elements is as follows:
\begin{itemize}
\item Id: The index name. This must be of the form ``CCY-CMS-TENOR'' with a currency ``CCY'' and a string ``TENOR''
representing a period. The index name can contain an optional tag ``CCY-CMS-TAG-TENOR'' which is an arbitrary label
that allows to define more than one swap index per currency.
\item Conventions: A swap convention defining the index conventions.
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{FX Option Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \emph{FxOption} is used to store conventions for FX option quotes for a given currency pair. The
structure of this node is shown in Listing \ref{lst:fx_option_conventions}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
<FxOption>
<Id> </Id>
<AtmType> </AtmType>
<DeltaType> </DeltaType>
</FxOption>
\end{minted}
\caption{FX option conventions}
\label{lst:fx_option_conventions}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item AtmType: Convention of ATM option quote (Choices are {\em AtmNull, AtmSpot, AtmFwd,
AtmDeltaNeutral, AtmVegaMax, AtmGammaMax, AtmPutCall50}).
\item DeltaType: Convention of Delta option quote (Choices are {\em Spot, Fwd, PaSpot,
PaFwd}).
\end{itemize}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\subsubsection{Commodity Forward Conventions}
%- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A node with name \lstinline!CommodityForward! is used to store conventions for commodity forward price quotes. The
structure of this node is shown in Listing \ref{lst:commodity_forward_conventions}.
\begin{listing}[H]
\begin{minted}[fontsize=\footnotesize]{xml}
<CommodityForward>
<Id>...</Id>
<SpotDays>...</SpotDays>
<PointsFactor>...</PointsFactor>
<AdvanceCalendar>...</AdvanceCalendar>
<SpotRelative>...</SpotRelative>
<BusinessDayConvention>...</BusinessDayConvention>
<Outright>...</Outright>
</CommodityForward>
\end{minted}
\caption{Commodity forward conventions}
\label{lst:commodity_forward_conventions}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item \lstinline!Id!: The identifier for the commodity forward convention. The identifier here should match the \lstinline!Name! that would be provided for the commodity in the trade XML as described in Table \ref{tab:commodity_data}.
\item \lstinline!SpotDays! [Optional]: The number of business days to spot for the commodity. Any non-negative integer is allowed here. If omitted, this takes a default value of 2.
\item \lstinline!PointsFactor! [Optional]: This is only used if \lstinline!Outright! is \lstinline!false!. Any positive real number is allowed here. When \lstinline!Outright! is \lstinline!false!, the commodity forward quotes are provided as points i.e. a number that should be added to the commodity spot to give the outright commodity forward rate. The \lstinline!PointsFactor! is the number by which the points quote should be divided before adding it to the spot quote to obtain the forward price. If omitted, this takes a default value of 1.
\item \lstinline!AdvanceCalendar! [Optional]: The business day calendar(s) used for advancing dates for both spot and forwards. The allowable values are given in Table \ref{tab:calendar}. If omitted, it defaults to \lstinline!NullCalendar! i.e. a calendar where all days are considered good business days.
\item \lstinline!SpotRelative! [Optional]: The allowable values are \lstinline!true! and \lstinline!false!. If \lstinline!true!, the forward tenor is interpreted as being relative to the spot date. If \lstinline!false!, the forward tenor is interpreted as being relative to the valuation date. If omitted, it defaults to \lstinline!True!.
\item \lstinline!BusinessDayConvention! [Optional]: The business day roll convention used to adjust dates when getting from the valuation date to the spot date and the forward maturity date. The allowable values are given in Table \ref{tab:allow_stand_data}. If omitted, it defaults to \lstinline!Following!.
\item \lstinline!Outright! [Optional]: The allowable values are \lstinline!true! and \lstinline!false!. If \lstinline!true!, the forward quotes are interpreted as outright forward prices. If \lstinline!false!, the forward quotes are interpreted as points i.e. as a number that must be added to the spot price to get the outright forward price. If omitted, it defaults to \lstinline!true!.
\end{itemize}
\subsubsection{Commodity Future Conventions}
A node with name \lstinline!CommodityFuture! is used to store conventions for commodity future contracts and options on them. These conventions are used in commodity derivative trades and commodity curve construction to calculate contract expiry dates. The structure of this node is shown in Listing \ref{lst:commodity_future_conventions}.
\begin{listing}[h!]
\begin{minted}[fontsize=\footnotesize]{xml}
<CommodityFuture>
<Id>...</Id>
<AnchorDay>
...
</AnchorDay>
<ContractFrequency>...</ContractFrequency>
<Calendar>...</Calendar>
<ExpiryCalendar>...</ExpiryCalendar>
<ExpiryMonthLag>...</ExpiryMonthLag>
<OneContractMonth>...</OneContractMonth>
<OffsetDays>...</OffsetDays>
<BusinessDayConvention>...</BusinessDayConvention>
<AdjustBeforeOffset>...</AdjustBeforeOffset>
<IsAveraging>...</IsAveraging>
<OptionExpiryOffset>...</OptionExpiryOffset>
<ProhibitedExpiries>
<Dates>
<Date>...</Date>
...
</Dates>
</ProhibitedExpiries>
</CommodityFuture>
\end{minted}
\caption{Commodity future conventions}
\label{lst:commodity_future_conventions}
\end{listing}
The meanings of the various elements in this node are as follows:
\begin{itemize}
\item \lstinline!Id!: The identifier for the commodity future convention. The identifier here should match the \lstinline!Name! that would be provided for the commodity in the trade XML as described in Table \ref{tab:commodity_data}.
\item \lstinline!AnchorDay!: This node is used to give a date in the future contract month to use as a base date for calculating the expiry date. It can contain a \lstinline!DayOfMonth! node, a \lstinline!CalendarDaysBefore! node or an \lstinline!NthWeekday! node:
\begin{itemize}
\item The \lstinline!DayOfMonth! This node can contain any integer in the range $1,\ldots,31$ indicating the day of the month. A value of 31 will guarantee that the last day in the month is used a base date.
\item The \lstinline!CalendarDaysBefore! This node can contain any non-negative integer. The contract expiry date is this number of calendar days before the first calendar day of the contract month.
\item The \lstinline!NthWeekday! This node has the elements shown in Listing \ref{lst:nth_weekday_node}. This node is used to indicate a date in a given month in the form of the n-th named weekday of that month e.g. 3rd Wednesday. The allowable values for \lstinline!Nth! are ${1,2,3,4}$. The \lstinline!Weekday! node takes a weekday in the form of the first three characters of the weekday with the first character capitalised.
\end{itemize}
\item \lstinline!ContractFrequency!: This node indicates the frequency of the commodity future contracts. The value here is usually \lstinline!Monthly! or \lstinline!Quarterly!.
\item \lstinline!Calendar!: The business day trading calendar(s) applicable for the commodity future contract.
\item \lstinline!ExpiryCalendar! [Optional]: The business day expiry calendar(s) applicable for the commodity future contract. This calendar is used when deriving expiry dates. If omitted, this defaults to the trading day calendar specified in the \lstinline!Calendar! node.
\item \lstinline!ExpiryMonthLag! [Optional]: The allowable values are any non-negative integer. This value indicates the number of months from the month containing the expiry date to the contract month. If 0, the commodity future contract expiry date is in the contract month. If the value of \lstinline!ExpiryMonthLag! is $n > 0$, the commodity future contract expires in the $n$-th month prior to the contract month. The value of \lstinline!ExpiryMonthLag! is generally 0, 1 or 2. For example, \lstinline!NYMEX:CL! has an \lstinline!ExpiryMonthLag! of 1 and \lstinline!ICE:B! has an \lstinline!ExpiryMonthLag! of 2. If omitted, it defaults to 0.
\item \lstinline!OneContractMonth! [Optional]: This node takes a calendar month in the form of the first three characters of the month with the first character capitalised. The month provided should be an arbitrary valid future contract month. It is used in cases where the \lstinline!ContractFrequency! is not \lstinline!Monthly! in order to determine the valid contract months. If omitted, it defaults to January.
\item \lstinline!OffsetDays! [Optional]: The number of business days that the expiry date is before the base date where the base date is implied by the \lstinline!AnchorDay! node above. Any non-negative integer is allowed here. If omitted, this takes a default value of zero.
\item \lstinline!BusinessDayConvention! [Optional]: The business day roll convention used to adjust the expiry date. The allowable values are given in Table \ref{tab:allow_stand_data}. If omitted, it defaults to \lstinline!Preceding!.
\item \lstinline!AdjustBeforeOffset! [Optional]: The allowable values are \lstinline!true! and \lstinline!false!. If \lstinline!true!, if the base date implied by the \lstinline!AnchorDay! node above is not a good business day according to the calendar provided in the \lstinline!Calendar! node, this date is adjusted before the offset specified in the \lstinline!OffsetDays! is applied. If \lstinline!false!, this adjustment does not happen. If omitted, it defaults to \lstinline!true!.
\item \lstinline!IsAveraging! [Optional]: The allowable values are \lstinline!true! and \lstinline!false!. This node indicates if the future contract is based on the average commodity price of the contract period. If omitted, it defaults to \lstinline!false!.
\item \lstinline!OptionExpiryOffset! [Optional]: The number of business days that the option expiry date is before the future expiry date. Any non-negative integer is allowed here. If omitted, this takes a default value of zero and the expiry date of an option on the future contract is assumed to equal the expiry date of the future contract.
\item \lstinline!ProhibitedExpiries! [Optional]: This node can be used to specify explicit dates which are not allowed as future contract expiry dates or as option expiry dates. A useful example of this is the ICE Brent contract which has the following constraint on expiry dates: \emph{If the day on which trading is due to cease would be either: (i) the Business Day preceding Christmas Day, or (ii) the Business Day preceding New Year’s Day, then trading shall cease on the next preceding Business Day}.
\end{itemize}
\begin{listing}[h!]
\begin{minted}[fontsize=\footnotesize]{xml}
<NthWeekday>
<Nth>...</Nth>
<Weekday>...</Weekday>
</NthWeekday>
\end{minted}
\caption{\textnormal{\lstinline!NthWeekday!} node outline}
\label{lst:nth_weekday_node}
\end{listing}
An example \lstinline!CommodityFuture! node for the NYMEX WTI future contract, specified \href{https://www.cmegroup.com/trading/energy/crude-oil/light-sweet-crude_contract_specifications.html}{here}, is provided in Listing \ref{lst:ex_wti_comm_future_convention}.
\begin{listing}[h!]
\begin{minted}[fontsize=\footnotesize]{xml}
<CommodityFuture>
<Id>NYMEX:CL</Id>
<AnchorDay>
<DayOfMonth>25</DayOfMonth>
</AnchorDay>
<ContractFrequency>Monthly</ContractFrequency>
<Calendar>US-NYSE</Calendar>
<ExpiryMonthLag>1</ExpiryMonthLag>
<OffsetDays>3</OffsetDays>
<BusinessDayConvention>Preceding</BusinessDayConvention>
<IsAveraging>false</IsAveraging>
</CommodityFuture>
\end{minted}
\caption{NYMEX WTI \textnormal{\lstinline!CommodityFuture!} node}
\label{lst:ex_wti_comm_future_convention}
\end{listing}
| {
"alphanum_fraction": 0.7199571071,
"avg_line_length": 52.521902378,
"ext": "tex",
"hexsha": "e9ac3959d4d5fb2dee8e23773a04836eb53c61d7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a5ee0fc09d5a50ab36e50d55893b6e484d6e7004",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "zhangjiayin/Engine",
"max_forks_repo_path": "Docs/UserGuide/conventions.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a5ee0fc09d5a50ab36e50d55893b6e484d6e7004",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "zhangjiayin/Engine",
"max_issues_repo_path": "Docs/UserGuide/conventions.tex",
"max_line_length": 647,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "a5ee0fc09d5a50ab36e50d55893b6e484d6e7004",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "nvolfango/Engine",
"max_stars_repo_path": "Docs/UserGuide/conventions.tex",
"max_stars_repo_stars_event_max_datetime": "2021-02-24T20:43:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-02-24T20:43:38.000Z",
"num_tokens": 11566,
"size": 41965
} |
\hypertarget{diospyros-evaluation}{%
\section{Diospyros Evaluation}\label{diospyros-evaluation}}
This directory contains the evaluation scripts for our ASPLOS 2021
paper, ``Vectorization for Digital Signal Processors via Equality
Saturation''.
This artifact is intended to reproduce the 4 main experimental results
from the paper: - \textbf{Compiling benchmarks (Table 1; Figure 4)}
Compilation and simulated cycle-level performance of 21 kernels (across
4 distinct functions). We compare kernels compiled by Diospyros with
kernels compiled with the vendor's optimizing compiler and optimized
library functions. - \textbf{Translation validation (Section 3)}
Translation validation for all kernels that the scalar specification and
vectorized result (both in our imperative vector domain specific
language) are equivalent. - \textbf{Timeout ablation study (Figure 5)}
Ablation study on a single kernel (10×10 by 10×10 \texttt{MatMul}) over
a range of equality saturation timeouts. - \textbf{Application case
study (Section 5.6)} Speedup of an open source computer vision
application (the \href{https://github.com/sweeneychris/TheiaSfM}{Theia}
structure-from-motion library) with a single kernel compiled by
Diospyros (QR decomposition).
We have split this artifact into two components: 1. \textbf{Diospyros
compiler.} This is our publicly available compiler that produces C/C++
code with intrinsics. This component can be run on the provided
\href{https://www.virtualbox.org/}{VirtualBox} virtual machine, or
installed from source and run locally on the reviewer's machine. 2.
\textbf{Evaluation on licensed instruction set simulator (ISS).} Our
compiler targets the Tensilica Fusion G3, which does not have an
publicly accessible compiler or ISS (the vendor provides free academic
licenses, but the process is not automated). To reproduce the
cycle-level simulation statistics from our paper, we have provided
reviews limited access to our research server (with permission from the
AEC chairs).
\begin{center}\rule{0.5\linewidth}{0.5pt}\end{center}
\hypertarget{prerequisites}{%
\subsection{Prerequisites}\label{prerequisites}}
\hypertarget{option-1-virtualbox}{%
\subsubsection{Option 1: VirtualBox}\label{option-1-virtualbox}}
If you use the provided VirtualBox virtual machine, it has all
dependencies pre-installed.
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Download \href{https://www.virtualbox.org/}{VirtualBox} and follow the
instructions, then login with:
\end{enumerate}
\begin{verbatim}
password: asplos
\end{verbatim}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\setcounter{enumi}{1}
\tightlist
\item
Right-click the Deskop and select ``Open in Terminal''.
\item
Go to the project directory:
\end{enumerate}
\begin{verbatim}
cd diospyros
\end{verbatim}
\hypertarget{option-2-running-locally}{%
\subsubsection{Option 2: Running
locally}\label{option-2-running-locally}}
To run locally, clone this repository and follow the instructions for
installing prerequisites from the top-level README.
\begin{verbatim}
cd diospyos
\end{verbatim}
\hypertarget{using-the-compiler-in-the-virtualbox-or-locally}{%
\subsection{Using the compiler in the VirtualBox or
locally}\label{using-the-compiler-in-the-virtualbox-or-locally}}
First, run make to ensure the compiler is up-to-date:
\begin{verbatim}
make
\end{verbatim}
\begin{center}\rule{0.5\linewidth}{0.5pt}\end{center}
\hypertarget{generating-cc-with-intrinsics-on-the-vm-or-locally}{%
\subsubsection{Generating C/C++ with Intrinsics (on the VM or
locally)}\label{generating-cc-with-intrinsics-on-the-vm-or-locally}}
To start, we will generate most of the compiled C/C++ with intrinsics
independently from the research server (either on the provided VM or
locally). To skip running on the licensed simulator, we pass the
\texttt{-\/-skiprun} flag to the following commands.
First, to sanity check the setup, run the following test command, which
compiles the smallest size of each unique kernel with a 10 second
timeout for each:
\hypertarget{time-estimate-30-seconds}{%
\paragraph{Time estimate: 30 seconds}\label{time-estimate-30-seconds}}
\begin{verbatim}
python3 evaluation/eval_benchmarks.py --timeout 10 --skiprun --test -o test-results
\end{verbatim}
This produces \texttt{*.c} files with vector intrinsics, along with
metadata used for downstream translation validation, in a new
\texttt{test-results} directory. The directory is structured with
subdirectories for each function and size:
\begin{verbatim}
- test-results
- 2d-conv
- <sizes>
- mat-mul
- <sizes>
- q-prod
- <sizes>
- qr-decomp
- <sizes>
\end{verbatim}
Within each size, there are the following files:
\begin{verbatim}
- egg-kernel.c. : vectorized C code with intrinsics.
- params.json : input size and vectorization parameters.
- spec.rkt : specification lifted with symbolic evaluation, in DSL.
- res.rkt : vectorized result of equality saturation, in DSL.
- outputs.rkt, prelude.rkt : metadata for downstreaam translation validation.
- stats.json : summary statistics from compilation, including wall clock time and memory usage.
\end{verbatim}
Once that succeeds, we can run the benchmarks with the default 180
second timeout. For now, we suggest skipping the one kernel
(\texttt{4x4\ QRDecomp}) that requires 38 GB of memory (as documented in
Table 1), since it is infeasible to run in a VM. If you are running
locally on a machine with sufficient memory, you can include this
benchmark by omitting the \texttt{-\/-skiplargemem} flag.
\hypertarget{time-estimate-45-minutes-5-hours-if-no---skiplargemem}{%
\paragraph{\texorpdfstring{Time estimate: 45 minutes (+5 hours if no
\texttt{-\/-skiplargemem})}{Time estimate: 45 minutes (+5 hours if no -\/-skiplargemem)}}\label{time-estimate-45-minutes-5-hours-if-no---skiplargemem}}
\begin{verbatim}
python3 evaluation/eval_benchmarks.py --skiprun --skiplargemem -o results
\end{verbatim}
The results here follow the same pattern of files as specified above,
but for 20/21 total kernel sizes.
\hypertarget{translation-validation}{%
\subsubsection{Translation validation}\label{translation-validation}}
To run translation validation on the results we just generated, we will
rerun the same script and pass \texttt{-\/-validation} and the
\texttt{-\/-skipsynth} flag to tell the script to not regenerate the
results.
\hypertarget{time-estimate-10-minutes}{%
\paragraph{Time estimate: 10 minutes}\label{time-estimate-10-minutes}}
\begin{verbatim}
python3 evaluation/eval_benchmarks.py --skiprun --skipsynth --validation -o results
\end{verbatim}
The line
\texttt{Translation\ validation\ successful!\ \textless{}N\textgreater{}\ elements\ equal}
will be printed to the Terminal for each kernel that passes.
\begin{center}\rule{0.5\linewidth}{0.5pt}\end{center}
\hypertarget{running-code-on-the-licensed-instruction-set-simulator-on-the-research-server}{%
\subsection{Running code on the licensed Instruction Set Simulator (on
the research
server)}\label{running-code-on-the-licensed-instruction-set-simulator-on-the-research-server}}
\hypertarget{setting-up-your-directory}{%
\subsubsection{Setting up your
directory}\label{setting-up-your-directory}}
First, SSH into our research server using the provided credentials:
\hypertarget{on-the-vmlocally}{%
\paragraph{On the VM/locally}\label{on-the-vmlocally}}
\begin{verbatim}
ssh <user>@<server address>
\end{verbatim}
On the research server, cd to the project directory:
\hypertarget{on-the-server}{%
\paragraph{On the server}\label{on-the-server}}
\begin{verbatim}
cd /data/asplos-aec
\end{verbatim}
Here, we want to create a new folder per reviewer to avoid overwriting
each other's work. Run the following, replacing
\texttt{\textless{}letter\textgreater{}} with the reviewer letter you
are assigned in HotCRP (for example, \texttt{mkdir\ reviewer-A}). Then,
copy the \texttt{diospyros} repository into your new directory and
\texttt{cd} there:
\hypertarget{on-the-server-1}{%
\paragraph{On the server}\label{on-the-server-1}}
\begin{verbatim}
mkdir reviewer-<letter>
cp -r diospyros reviewer-<letter>/diospyros
cd reviewer-<letter>/diospyros
\end{verbatim}
Now, back on your original machine (the VM or locally), run the
following command to copy the data you just generated to your new
directory on the research server (replacing
\texttt{\textless{}letter\textgreater{}} with your letter):
\hypertarget{on-the-vmlocally-in-the-diospryros-root-directory-time-estimate-1-minute}{%
\paragraph{\texorpdfstring{On the VM/locally, in the \texttt{diospryros}
root directory (Time estimate: 1
minute)}{On the VM/locally, in the diospryros root directory (Time estimate: 1 minute)}}\label{on-the-vmlocally-in-the-diospryros-root-directory-time-estimate-1-minute}}
\begin{verbatim}
scp -r results <user>@<server address>:/data/asplos-aec/reviewer-<letter>/diospyros/results
\end{verbatim}
Back on the server, check that you have your results:
\hypertarget{on-the-server-2}{%
\paragraph{On the server}\label{on-the-server-2}}
\begin{verbatim}
ls results/*
\end{verbatim}
You should see the 4 kernel directories and respective sizes:
\begin{verbatim}
2d-conv mat-mul q-prod qr-decomp
\end{verbatim}
\hypertarget{running-the-instruction-set-simulator}{%
\subsubsection{Running the Instruction Set
Simulator}\label{running-the-instruction-set-simulator}}
Now, we can actually run the generated kernels on the vendor's
instruction set simulator. We pass the \texttt{-\/-skipsynth} flag to
avoid re-running synthesis and compilation to C with intrinsics.
\hypertarget{time-estimate-minutes}{%
\paragraph{Time estimate: ? minutes}\label{time-estimate-minutes}}
\begin{verbatim}
python3 evaluation/eval_benchmarks.py --skipsynth -d results
\end{verbatim}
This will add a file \texttt{egg-kernel.csv} to each subdirectory of
\texttt{results} with cycle-level performance for each kernel and
corresponding baseline.
\hypertarget{seeing-the-results}{%
\subsubsection{Seeing the Results}\label{seeing-the-results}}
Now that we have collected the data, the next step is to analyze the
results to draw the charts and tables you see in the ASPLOS paper.
First, use \texttt{chart\_benchmarks.py} to generate the plots:
\begin{verbatim}
python3 evaluation/chart_benchmarks.py -d results
\end{verbatim}
This produces the following files:
\begin{verbatim}
all_benchmarks.csv : Combined all individual benchmark runs into one CSV (Feeds Table 1)
all_benchmarks_chart.pdf : Charting graph for all benchmarks (Figure 4)
extended_abstract_chart.pdf : Charting small graph for extended abstract
\end{verbatim}
The produced \texttt{all\_benchmarks.csv} file contains all the raw data
that went into the plots. You can look at it directly if you're curious
about specific numbers. To see some statistics about the compilation
process, run the \texttt{benchtbl.py} script:
\begin{verbatim}
python3 evaluation/benchtbl.py --plain
\end{verbatim}
This script reads \texttt{all\_benchmarks.csv} to make a table like
Table 1 in the ASPLOS paper. The \texttt{-\/-plain} flag emits a
plain-text table for reading directly; omit this flag to generate a
LaTeX fragment instead.
\begin{center}\rule{0.5\linewidth}{0.5pt}\end{center}
\hypertarget{theia-case-study}{%
\subsubsection{Theia Case Study}\label{theia-case-study}}
The ASPLOS paper puts the QR decomposition kernel into context by
measuring the impact on performance in an application: the
\href{https://github.com/sweeneychris/TheiaSfM}{Theia}
structure-from-motion library. The \texttt{theia} subdirectory in this
evaluation package contains the code necessary to reproduce those
end-to-end results.
The first step is to get the generated C code for the QR decomposition
kernel. Copy the \texttt{egg\_kernel.c} file from our previously
generated code for an \texttt{N=3} \texttt{vecwidth=4} \texttt{QRDecomp}
to the Theia directory:
\begin{verbatim}
cp results/qr-decomp/3_4r/egg-kernel.c theia/egg-kernel.c
\end{verbatim}
\textbf{Time estimate: 1 minute.}
Then, type \texttt{make\ run\ -C\ evaluation/theia} to compile and
execute both versions of the \texttt{DecomposeProjectionMatrix}
function: one using Eigen (as the original open-source code does) and
one using the Diospyros-generated QR kernel. You can visually check the
outputs to make sure they match.
\textbf{Time estimate: 10 seconds.}
To post-process this output into the final numbers you see in the paper,
pipe it into the \texttt{dpmresults.py} analysis script:
\begin{verbatim}
make run -C evaluation/theia | python3 evaluation/dpmresults.py
\end{verbatim}
This will produce a JSON document with three values: the cycle count for
the Eigen- and Diospyros-based executions, and the speedup (which is
just the ratio of the two cycle counts). You can see the cycle counts
and speedup number in the ASPLOS paper at the end of Section 5.6.
\begin{center}\rule{0.5\linewidth}{0.5pt}\end{center}
\hypertarget{timeout-ablation-study}{%
\subsubsection{Timeout Ablation Study}\label{timeout-ablation-study}}
Our ASPLOS paper studies the effect of changing the timeout of the
equality saturation solver on the quality of the generated solution. The
ablation study varies the timeout given to our solver and reports the
number of cycles taken by the design. We assume that this experiment is
performed on our research server with the Xtensa compiler and simulator
available.
The process of reproducing has two steps: 1. Run the study with
different timeouts and generate executable code. 2. Simulate the
executable code with the simulator.
The script \texttt{evaluation/ablation/ablation-exp-gen.py} can be used
to generate executable vectorized solutions:
\begin{verbatim}
python3 evaluation/ablation/ablation-exp-gen.py -p evaluation/ablation/params/mat-mul-large -o exp-out -t 10 30 60 120 180
\end{verbatim}
\begin{itemize}
\tightlist
\item
\texttt{-p}: Specifies the parameter file, which itself describes the
size of matrices used in a matrix-multiply kernel.
\item
\texttt{-o}: Location of the output folder
\item
\texttt{-t}: A list of the timeouts to run the solver with.
\end{itemize}
Once the script is finished running, run the following to collect the
data:
\begin{verbatim}
cd exp-out
A_ROWS=10 A_COLS=10 B_ROWS=10 B_COLS=10 ./run_all.sh
cd -
\end{verbatim}
This will generate \texttt{exp-out/ablation.csv}
Finally, run the following to generate the ablation study chart:
\begin{verbatim}
python3 evaluation/ablation/ablation_chart.py exp-out/ablation.csv
\end{verbatim}
This will generate \texttt{ablation.pdf}
| {
"alphanum_fraction": 0.7767881425,
"avg_line_length": 36.4962779156,
"ext": "tex",
"hexsha": "95af69f8166ee2fb91f68e3762e7fb4d48ce5d5f",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-09-27T20:35:15.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-09-27T20:35:15.000Z",
"max_forks_repo_head_hexsha": "27d4e5e5d4e56a6dc5860d7c7d5eefb27de24a5d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sgpthomas/diospyros",
"max_forks_repo_path": "evaluation/readme.tex",
"max_issues_count": 77,
"max_issues_repo_head_hexsha": "27d4e5e5d4e56a6dc5860d7c7d5eefb27de24a5d",
"max_issues_repo_issues_event_max_datetime": "2022-03-11T19:48:43.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-01-21T15:37:35.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sgpthomas/diospyros",
"max_issues_repo_path": "evaluation/readme.tex",
"max_line_length": 169,
"max_stars_count": 27,
"max_stars_repo_head_hexsha": "27d4e5e5d4e56a6dc5860d7c7d5eefb27de24a5d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sgpthomas/diospyros",
"max_stars_repo_path": "evaluation/readme.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-17T04:17:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-16T22:26:34.000Z",
"num_tokens": 3861,
"size": 14708
} |
\section{Introduction}
% write 1-2 pages -- brief history, overview of the field, what the thesis is about, what the goal of the thesis is, and what is new.
The introduction goes here.
\section{Chapter overview}
An overview of each chapter goes here.
\paragraph{Chapter 1 -- Introduction}
This chapter.
\paragraph{Chapter 2 -- Basic \LaTeX{}}
Some latex basics here.
\paragraph{Chapter 3 -- More \LaTeX{}}
Even more latex stuff here.
\paragraph{Appendices}
There is only one appendix, namely Appendix \ref{appendix:appendix_A}, which contains an example of how Python code can be included in a thesis. | {
"alphanum_fraction": 0.7611202636,
"avg_line_length": 31.9473684211,
"ext": "tex",
"hexsha": "23f9ccd242e5953b96935d7dd93052a5839c5dd1",
"lang": "TeX",
"max_forks_count": 10,
"max_forks_repo_forks_event_max_datetime": "2022-01-20T14:53:16.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-04-03T18:26:00.000Z",
"max_forks_repo_head_hexsha": "6ff43421cbe6f9aca634f779f65cd62f87076be0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "TommyOd/latex_templates",
"max_forks_repo_path": "large_thesis/chapters/1_introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6ff43421cbe6f9aca634f779f65cd62f87076be0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "TommyOd/latex_templates",
"max_issues_repo_path": "large_thesis/chapters/1_introduction.tex",
"max_line_length": 144,
"max_stars_count": 36,
"max_stars_repo_head_hexsha": "6ff43421cbe6f9aca634f779f65cd62f87076be0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "TommyOd/latex_templates",
"max_stars_repo_path": "large_thesis/chapters/1_introduction.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-27T20:28:59.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-08-23T09:46:42.000Z",
"num_tokens": 147,
"size": 607
} |
\section{Science Data Quality Assessment} \label{sec:sdqa}
\subsection{Operations Readiness Requirement}
The project team shall demonstrate that the integrated LSST system can monitor and assess the quality of all data as it is being collected.
\subsection{Objectives}
Science Data Quality Assessment is made up of a comprehensive system of tools to monitor and assess quality of all data as it is being collected including raw and processed data. The suite of tools have been designed to collect, analyze and record required information to assess the data quality and make that information available to a variety of end users; observatory specialist, observatory scientists, downstream processing, the science planning/scheduling process and science users of the data.
The fast cadence of data collection requires highly automated data diagnostic and analysis methods (such as data mining techniques for finding patterns in large datasets, and various machine learning regression techniques). he Science Data Quality Assessment is mostly be automated, however it includes human-intensive components allowing further investigation and visualization of SDQA status.
Data quality assessment for Rubin must be carried out at a variety of cadences, which have different goals:
\begin{itemize}
\item Near real-time assessment of whether the data is scientifically useful;
\item Monitoring telemetry and imaging data to track the state of the integrated observatory, including the telescope, camera, networks and other supporting systems;
\item Analysis of the prompt processing properties and performance to determine if the alerts stream meets its requirements; and
\item Analysis of the data release processing properties and performance to determine if the static sky processing meets its requirements.
\end{itemize}
By the time we make a data release the accumulated data quality analysis must be made available as part of the release artifacts.
\subsubsection{Near Real--time Monitoring \& Assessment of the raw data quality}
The quality assessment of the raw image data combines the results from the state of the telescope, the camera (see below) and technical properties of the images. Each will be analyzed as it is taken to a measure its technical properties both on the at the Summit Facility using the LSSTCam Diagnostic cluster and from properties determined during the prompt processing for alert production. Performance properties will be based on measurements and characteristics derived from the images themselves and from daily calibration data, these include:
\begin{itemize}
\item Sensor readnoise, bias and gain variations, bitwise integrity etc... from the CCD data;
\item Properties of the measured PSF, based on the three second moments, or equivalently effective FWHM, e1, e2;
\item Measured sky background level over the full FPA at amplifier level resoution;
\item Measured source positions and errors relative to a reference catalog ({\it e.g. GAIA}) to monitor FPA stability and pointing accuracy; and
\item Measured source fluxes and errors relative to a reference catalogue ({\it e.g. GAIA}) to monitor system throughput, sensitivity and algorithm processing.
\end{itemize}
At a minimum, these metrics enable the Project to determine if the data are within performance parameters to label the visit as "good". Tooling will be provided by the Construction Project that enable users to monitor trends in these quantities ({it. e.g.} as a function of time and where the telescope is pointing and as a function of position in the focal plane. A reference set of tools will initially be provided by the LOVE interface along with more detailed analysis tooling (as described below). In some cases, data from the Rubin Auxiliary Telescope (RAT) will be used to interpret trends the LSSTCam data. The quality analysis needed to determine that the RAT is taking sufficiently good data will use the same tooling as provided for the main survey data.
\subsubsection{Longer Term Assessment}
TBD
\subsubsection{Assessing the quality of the processed data}
The information of the processed data relies on the calibration data products and the pipeline properties. In other words, the data assessment at this stage shall include the correction of the systematic errors.
\subsection{SDQA Tools for analysis}
Science Data Quality Assessment will rely on a suite of tools including as the electronic logging, the engineering facility database (EFD), and the Rubin Science Platform (RSP). There is also a complementary set data visualization tools to facilitate the understanding of the correlation between the data quality and the observatory state.
These tools include:
\begin{itemize}
\item Rubin Science Platform (RSP) -- used for investigative ad--hoc analysis (\citeds{LSE-319}); the RSP itself through it's web based porthole and Jupyter Lab interface provides significant visualization capabilities;
\item Engineering Facility Database -- accessible through science platform and pre-defined dashboards;
\item LOVE - LSST Observing Visualization Environment used to have standardized dashboards and visualization of the system state;
\item SQuaSH - the Science Quality System Harness (\citeds{sqr-009})
\end{itemize}
\subsection{Criteria for Completeness Description}
The SDQA shall monitor and record the properties of the system error budget tree, including image quality and throughput, and define pass or fail status at each of the primary entries entries. These include the following terms of the image quality:
\begin{itemize}
\item PSF FWHM;
\item PSF shape ellipticity as described by second moments;
\item System wavefront measurements for each visit; and
\item Throughput measurements over the entire field of view.
\end{itemize}
Tooling for evaluating SDQA shall demonstrate the ability to display performance on a visit by visit basis as well as being able to show the history of performance metric over a user defined span of time.
\subsection{Pre-Operations Interactions}
The pre-operation interaction include training the observing specialists to understand errors
\subsection{Artifacts for ORR}
\begin{itemize}
\item Demonstrated functional tool kit as described above;
\item Code validation tool kit to quantify software performance;
\item Derived reporting from the Science Verification/Validation survey(s)
\end{itemize}
| {
"alphanum_fraction": 0.808259587,
"avg_line_length": 65.7244897959,
"ext": "tex",
"hexsha": "5945dc4f721be6c055a0b3c0f4c7f1b05cdc4180",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3af377eeada0bf9c98d533f5eac2cd26398fa092",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "lsst-sitcom/sitcomtn-005",
"max_forks_repo_path": "sdqa.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "3af377eeada0bf9c98d533f5eac2cd26398fa092",
"max_issues_repo_issues_event_max_datetime": "2020-08-21T03:10:44.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-08-07T21:38:00.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "lsst-sitcom/sitcomtn-005",
"max_issues_repo_path": "sdqa.tex",
"max_line_length": 770,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3af377eeada0bf9c98d533f5eac2cd26398fa092",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "lsst-sitcom/sitcomtn-005",
"max_stars_repo_path": "sdqa.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1286,
"size": 6441
} |
\chapter{Phonology}\label{phon}
This chapter deals with the phoneme inventory and the phonological and the morpho\-phonological rules and processes that are relevant in Yakkha. The \isi{orthography} used here is explained in \sectref{orth}. The examples in this chapter, unlike in the other chapters, have two lines representing the Yakkha data: the upper line shows the data after the application of all phonological and morphophonological rules, and the lower line shows the underlying phonemic material with morpheme breaks. The \isi{orthography} is used in both representations, and \textsc{ipa} is only used when necessary in the explanations in prose. \sectref{phon-inv} presents the phoneme inventory of Yakkha, \sectref{syllable} deals with the \isi{syllable structure} and \sectref{loansphon} discusses the treatment of loanwords, as they nicely illustrate the phonological features of Yakkha. \sectref{stress} lays out the conditions by which stress is assigned. The abundant morphophonological processes and their connections to \isi{syllable structure}, stress and diachronic processes are the concern of \sectref{morphophon}.
\section{Phoneme inventory and allophonic rules}\label{phon-inv}
\subsection{Vowel phonemes}\label{vowelphon}
\begin{center}
\begin{figure}[b]
\begin{tikzpicture}[scale=1]
\node (a3) at (0, 0) {u};
\node (a1) at (-4, 0) {i};
\node (a2) at (-2, 0) {};
\node (b1) at (-3.4,-1) {e};
\node (b2) at (-1.7,-1) {};
\node (b3) at (0,-1) {o};
\node (c1) at (-2.8,-2) {};
\node (c2) at (-1.4,-2) {};
\node (c3) at (0,-2) {};
\node (d1) at (-2.2,-3) {a};
\node (d2) at (-1.1,-3) {};
\node (d3) at (0,-3) {};
\draw (a1) -- (a2) -- (a3);
\draw (b1)--(b2)--(b3);
\draw (c1)--(c2)--(c3);
\draw (d1)--(d2)--(d3);
\draw (a1)--(b1)--(c1)--(d1);
\draw (a2)--(b2)--(c2)--(d2);
\draw (a3)--(b3)--(c3)--(d3);
\end{tikzpicture}
\caption{Yakkha vowel phonemes}\label{fig-vowels}
\end{figure}
\end{center}
Yakkha has only five basic vowels; it has two close vowels, the front /i/ and the back /u/, two close-mid vowels, the front /e/ and the back /o/, and an open vowel /a/. In contrast to other Kiranti languages, there are no central vowels like /ɨ/, /ʌ/ or /ə/. A chart with the vowel inventory is provided in \figref{fig-vowels}. In addition to these vowels, a front vowel [ɛ] may occur, but only as the contracted form of the diphthong /ai/ (see \sectref{diphth}), not in any other environments. Minimal pairs are provided in \tabref{min-pair-v}. Tone, length or nasal articulation do not constitute phonemic contrasts in Yakkha.
\begin{table}
\resizebox*{\textwidth}{!}{
\small
\begin{tabular}{lllll}
\lsptoprule
{\sc phonemes} & {\sc examples}\\
\midrule
{\bf /e/ vs. /i/} & \emph{nema} & \rede{lay, sow seed} &\emph{nima} & \rede{know, see}\\
& \emph{tema} & \rede{lean on an angle} & \emph{tima} & \rede{put down, invest}\\
{\bf /e/ vs. /a/} &\emph{tema} & \rede{lean on an angle} & \emph{tama} & \rede{come}\\
&\emph{yepma} & \rede{stand} & \emph{yapma} & \rede{be rough}\\
{\bf /o/ vs. /u/} & \emph{okma} & \rede{shriek} & \emph{ukma} & \rede{bring down}\\
& \emph{hoʔma} & \rede{prick, pierce} & \emph{huʔma} & \rede{push, stuff}\\
{\bf /o/ vs. /a/} & \emph{thokma} & \rede{spit} & \emph{thakma} & \rede{weigh, hand/send up}\\
& \emph{hoʔma} & \rede{prick, pierce} & \emph{haʔma} & \rede{scrape off/out}\\
{\bf /u/ vs. /i/} & \emph{ukma} & \rede{bring down} & \emph{ikma} & \rede{chase}\\
& \emph{umma} & \rede{pull} & \emph{imma} & \rede{sleep}\\
\lspbottomrule
\end{tabular}
}
\caption{Minimal pairs for vowel phonemes}\label{min-pair-v}
\end{table}
\subsection{Diphthongs}\label{diphth}
Given that adjacent vowels are generally avoided in Yakkha, it does not come as a surprise that \isi{diphthongs}, i.e., adjacent vowels in the same \isi{syllable}, are rare. The four \isi{diphthongs} /ai/, /ui/, /oi/ and /au/ were found, occuring marginally, as in \emph{ŋhai} (a dish made from fish stomach), \emph{hoi!} \rede{enough!}, \emph{uimalaŋ} \rede{steeply downhill}, \emph{(h)au} (a sentence-final \isi{exclamative} particle) and \emph{ambau!} (an \isi{exclamative} expression indicating that the speaker is impressed by huge or dangerous things). Some speakers pronounce underlying sequences like /ŋond-siʔ-ma/ and /thend-siʔ-ma/ with nasalized \isi{diphthongs}, [ŋoĩsiʔma] and [theĩsiʔma], respectively (instead of the more common pronunciations [ŋonsiʔma] and [thensiʔma]).\footnote{The nasalization is exceptional in these cases. Usually, the prosody of Yakkha supports the opposite process, namely the change of nasal vowels to nasal consonants, e.g., in borrowed \ili{Nepali} lexemes (see \sectref{loansphon}). Nasals may, however, regularly change to nasalization of the preceding vowel in intervocalic environment and before \isi{glides} and \isi{liquids}, as in \emph{mẽ.u.le} (/meN-us-le/) \rede{without entering} and \emph{mẽ.yok.le} (/meN-yok-le/) \rede{without searching}, see \sectref{nas-son}.}
Most \isi{diphthongs} have their origin in a multimorphemic or in a multisyllabic environment. The \isi{adverb} \emph{uimalaŋ}, for instance, like many other spatial adverbs in Yakkha, is composed of a stem (diachronically, most probably a noun) and the possessive prefix \emph{u-}. The marginal nature of the \isi{diphthongs} is confirmed also by the fact that they are found more in names and discourse particles than in lexemes with semantic content, and never in verbal roots. Occasionally, \isi{diphthongs} are just one stage in a larger process of contraction. Consider the inflected form \emph{waiʔ.na} \rede{(he/she/it) exists}, which is also found as [wɛʔ.na]. Its nonpast semantics and synchronically available contracted forms of verbs suggest that [waiʔ.na] used to be *\emph{[wa.me.na] } historically. \tabref{table-diphth} provides an exhaustive list of lexemes containing \isi{diphthongs} from the more than 2400 lexemes in the lexical database that builds the basis of the current analysis.
\begin{table}[htp]
\begin{center}
\begin{tabular}{llll}
\lsptoprule
{\bf /au/}&{\bf /oi/}&{\bf /ui/}&{\bf /ai/}\\
\midrule
\emph{(h)au}&\emph{coilikha}&\emph{uimalaŋ}&\emph{ŋhai}\\
({\sc excla})&(a village)&\rede{steeply downhill}&\rede{fish stomach}\\
\emph{ambau!}&\emph{hoiǃ}&\emph{phakkui}&\emph{Yaiten}\\
\rede{holy smoke!}&\rede{enoughǃ}&\rede{pig droppings}&(a village)\\
& &\emph{waghui} &\emph{lai}\\
& & \rede{chicken droppings}&({\sc excla})\\
\lspbottomrule
\end{tabular}
\caption{Lexemes containing diphthongs}\label{table-diphth}
\end{center}
\end{table}
\subsection{Consonant phonemes}\label{consphon}
\tabref{fig-consonants} below shows the central and the marginal consonant phonemes of Yakkha. The phones that are not in parentheses clearly have phonemic status; they occur in basic, uninflected stems. The phonemic status of the phones in parentheses is not always straightforward (see discussion below). Where my \isi{orthography} deviates from \textsc{ipa}, this is indicated by angle brackets.
\begin{table}[htp]
\resizebox*{\textwidth}{!}{
\small
\begin{tabular}{lcccccc}
\lsptoprule
& {\sc bilabial} & {\sc alveolar} & {\sc retroflex } & {\sc palatal }& {\sc velar }& {\sc glottal}\\
\midrule
{\sc Plosives }& p & t & (ʈ) & & k & ʔ\\
{\sc asp.} & ph & th & (ʈh) & & kh &\\
{\sc voiced}& (b) & (d) & (ɖ) & & (g) &\\
{\sc voiced-asp. } & (bh) & (dh) & (ɖh) & & (gh) &\\
{\sc Affricates }& & ts <c> & & & & \\
{\sc asp.} & & tsʰ <ch> & & & &\\
{\sc voiced} & & (dz) <j> & & & & \\
{\sc voiced-asp.} & & (dzʰ) <jh> & & & &\\
{\sc Fricatives } & & s& & & & h \\
{\sc Nasals } & m & n & & & ŋ & \\
{\sc Nas. asp.} & (mh) & (nh) & & & (ŋh) & \\
{\sc Rhotics }& & r& & & & \\
{\sc Laterals }& & l& & & & \\
{\sc Glides } & w & & &j <y> & & \\
{\sc Glides asp.} & wh & & & & &\\
\lspbottomrule
\end{tabular}
}
\caption{Yakkha consonant phonemes}\label{fig-consonants}
\end{table}
\subsubsection{The main phonemic distinctions in the consonants}
Yakkha distinguishes six places of articulation: bilabial, alveolar, retroflex (or post-alveolar), palatal, velar and glottal. Retroflex plosives most probably made their way into Yakkha via \ili{Nepali} loanwords. They are found only in a few Yakkha lexemes, and no proper minimal pairs could be established. The retroflex series lacks a nasal, too. However, in the few words that are found with retroflex stops, they are robust, and pronouncing these words with an alveolar stop is not an option.
\largerpage
Yakkha fits well into the Eastern branch of Kiranti, for instance in the loss of phonemic contrast between voiced and unvoiced plosives. Generally, plosives, unless they are found in an environment that triggers \isi{voicing}, are pronounced as voiceless. As always, a few exceptions occur that cannot be explained by some rule. The exact parameters of the \isi{voicing} rule are laid out in \sectref{voicing}. A robust phonemic contrast exists between aspirated and unaspirated consonants, as it is found in the plosives (except for the glottal stop), the affricate and the bilabial glide /w/. Aspiration of a stem-initial consonant, historically a morphological means to increase the \isi{transitivity} in \isi{Tibeto-Burman} \citep{Michailovsky1994Manner, Jacques2012_Internal, Hill2014_Note}, has become purely phonemic in Yakkha. The aspirated plosives have a strong fricative component. Three \isi{nasals} are distinguished by their place of articulation: bilabial /m/, alveolar /n/ and velar /ŋ/. Yakkha has two fricatives /s/ and /h/, and two \isi{liquids}, /l/ and /r/. The rhotic does not occur word-initially. In this position, */r/ has changed to the palatal glide /y/\footnote{I use the grapheme <y> to represent IPA [j]; see \sectref{orth} for the notes on the orthography used in this book.} (see also \tabref{soundchange} in Chapter \ref{languageintro} and the references therein).\footnote{Furthermore, /y/ may be omitted before /e/ in some lexemes, but this process is subject to considerable individual variation.} The distribution of the rhotic consonant deserves a closer look, also in the perspective of other Eastern Kiranti languages (see \sectref{rhotic} below). \tabref{min-pair-c} provides minimal pairs for the basic consonant phonemes, mostly from verbal stems or citation forms.
\begin{table}[t]
\resizebox*{\textwidth}{!}{
\small
\begin{tabular}{lllll}
\lsptoprule
{\sc phonemes} & {\sc examples}\\
\midrule
{\bf /k/} vs. {\bf /kh/} & \emph{keʔma} & \rede{come up} & \emph{kheʔma} & \rede{go}\\
& \emph{kapma} & \rede{carry along, have} & \emph{khapma} & \rede{thatch, cover}\\
{\bf /p/} vs. {\bf /ph/} & \emph{pakna} & \rede{young guy} & \emph{phak} & \rede{pig}\\
& \emph{pekma} & \rede{fold} & \emph{phekma} & \rede{slap, sweep}\\
{\bf /t/} vs. {\bf /th/} & \emph{tumma} & \rede{understand} & \emph{thumma} & \rede{tie}\\
& \emph{tokma} & \rede{get} & \emph{thokma} & \rede{hit with horns}\\
{\bf /c/} vs. {\bf /ch/} & \emph{cikma} & \rede{age, ripen} & \emph{chikma} & \rede{measure, pluck}\\
& \emph{cimma} & \rede{teach} & \emph{chimma} & \rede{ask}\\
{\bf /k/} vs. {\bf /ʔ/} & \emph{okma} & \rede{shriek} & \emph{oʔma} & \rede{be visible}\\
{\bf /t/} vs. {\bf /ʔ/ }& \emph{-met} & ({\sc caus}) & \emph{-meʔ} & ({\sc npst})\\
{\bf /p/} vs. {\bf /ʔ/} & \emph{opma} & \rede{consume slowly} & \emph{oʔma} & \rede{be visible}\\
{\bf /t/} vs. {\bf /r/ }& \emph{ot} & \rede{be visible} (stem) & \emph{or} & \rede{peel off}\\
{\bf /l/} vs. {\bf /r/} & \emph{khelek} & \rede{ant} & \emph{kherek} & \rede{hither}\\
{\bf /y/} vs. {\bf /w/} & \emph{yapma} & \rede{be uncomfortable} & \emph{wapma} & \rede{paw, scrabble}\\
& \emph{yamma} & \rede{disturb} & \emph{wamma} & \rede{attack, pounce}\\
{\bf /y/} vs. {\bf /l/} & \emph{yapma} & \rede{be uncomfortable} & \emph{lapma} & \rede{accuse, blame}\\
{\bf /w/} vs. {\bf /wh/ }& \emph{wapma} & \rede{paw, scrabble} & \emph{whapma} & \rede{wash clothes}\\
& \emph{waŋma} & \rede{curve, bend} & \emph{whaŋma} & \rede{boil}\\
{\bf /s/} vs. {\bf /h/}& \emph{sima} & \rede{die} & \emph{hima} & \rede{spread}\\
& \emph{somma} & \rede{stroke gently} & \emph{homma} & \rede{fit into}\\
{\bf /k/} vs. {\bf /ŋ/}& \emph{pekma} & \rede{break} & \emph{peŋma} & \rede{peel}\\
& \emph{okma} & \rede{shriek} & \emph{oŋma} & \rede{attack}\\
{\bf /ŋ/} vs. {\bf /m/} & \emph{toŋma} & \rede{agree} & \emph{tomma} & \rede{place vertically}\\
& \emph{tuŋma} & \rede{pour} & \emph{tumma} & \rede{understand}\\
{\bf /ŋ/} vs. {\bf /n/} & \emph{=ŋa} & ({\sc erg}) & \emph{=na} & ({\sc nmlz.sg})\\
{\bf /m/} vs. {\bf /n/} & \emph{makma} & \rede{burn} & \emph{nakma} & \rede{beg, ask}\\
& \emph{miʔma} & \rede{think, remember} & \emph{niʔma} & \rede{count, consider}\\
\lspbottomrule
\end{tabular}
}
\caption{Minimal pairs for consonants}\label{min-pair-c}
\end{table}
\subsubsection{Marginal consonant phonemes}
Several of the phonemes occur only marginally, either in \ili{Nepali} loanwords, or in just a handful of Yakkha lexemes. This basically applies to the already mentioned retroflex plosives and to all voiced obstruents, as \isi{voicing} is generally not distinctive in Yakkha.\footnote{There are quasi minimal pairs such as \emph{apaŋ} \rede{my house} and \emph{abaŋ} \rede{I came}, but both are inflected words and the difference is that \emph{a-} in \emph{apaŋ} is a prefix, and the rule that is responsible for the \isi{voicing} of plosives excludes prefixes.} Some sounds are never found in uninflected lexemes, so that they only emerge as the result of some morphophonological processes that are triggered by the concatenation of morphemes with certain phonological features. Voiced-aspirated consonants and the aspirated \isi{nasals} [mʰ], [nʰ] and [ŋʰ] belong to this group. The marginal sounds are included in parentheses in \tabref{fig-consonants}. The reader is referred to \sectref{morphophon} for the details of the various morphophonological processes that lead to marginal phonemes.
\subsubsection{The phonemic status of the glottal stop}
The glottal stop is contrastive, as several minimal pairs in \tabref{min-pair-c} demonstrate. The glottal stop surfaces only before \isi{nasals} and laterals, so that one can find minimal pairs like \emph{meŋ.khuʔ.le} \rede{without carrying} and \mbox{\emph{meŋ.khu.le}} \rede{without stealing}, or \emph{men.daʔ.le} \rede{without bringing} and \emph{men.da.le} \rede{without coming}. However, the glottal stop can also be the result of a phonological operation. Unaspirated stops, especially /t/, tend to get neutralized to [ʔ] syllable-finally (aspirated stops do not occur in this position). The glottal stop is also prothesized to vowel-initial words to maximize the onset. In certain grammatical markers, the glottal stop may also be epenthesized at the end of the \isi{syllable} when it is followed by nasal consonants or \isi{glides} (see \Next). This may happen only when the \isi{syllable} is stressed, but the conditions for this epenthesis are not fully understood yet. It never occurs at the end of a word (if the word is defined by the domain to which stress is assigned).
\ex.\a.\glll tu.mok.peʔ.na ma.mu\\
/tumok=pe=na mamu/\\
Tumok{\sc =loc=nmlz.sg} girl\\
\rede{the girl from Tumok}
\b.\glll men baʔ.loǃ\\
/men pa=lo/\\
{\sc cop.neg} {\sc emph=excla}\\
\rede{Of course notǃ}
The glottal stop is less consonant-like than the other plosives. In certain environments, stems that end in a glottal stop may behave identically to stems consisting of open syllables (CV). For instance, if the stem vowel /e/ or /i/ (of a CV stem or a CVʔ stem) is followed by a vocalic suffix like \emph{-a} (marking past or \isi{imperative}), it changes into a glide /y/ and becomes part of the onset. This process is illustrated by the behavior of \emph{kheʔma} \rede{go} and \emph{piʔma} \rede{give}, cf. \tabref{glottal}. If the stem vowel (of a CV stem or a CVʔ stem) is a back vowel, a glide /y/ is inserted between stem and suffixes. If open or /ʔ/-final stems are followed by the suffix sequence \emph{-a-u}, this sequence of suffixes is not overtly realized. Examples of these processes are provided in \tabref{glottal}, contrasted with the behavior of stems with open syllables and stems that end in /p/, /t/ or /k/. The first column shows the underlying stem, the second column provides the citation form and the gloss, the third column shows the behavior before /l/, by means of the forms of the \isi{negative converb}. The fourth and the fifth columns show the behavior before vowels, by means of intransitive {\sc 3.sg} past forms (in \emph{-a}),\footnote{Or detransitivized, depending on the original \isi{valency} of the stem.} and transitive {\sc 3sg.A>3sg.P} past forms (in \emph{-a-u}).\footnote{The verb \emph{cama} \rede{eat} is the only transitive verb that has an open stem in /a/. It is exceptional in having an ablaut. Open stems are rare, and not all of them are found among both transitive and intransitive verbs, so that some fields of the table cannot be filled.}
To wrap up, the intervocalic environment distinguishes /ʔ/-final stems from stems that end in /p/, /t/ or /k/, while the \isi{infinitive} and the environment before /l/ distinguishes /ʔ/-final stems from open stems.
The glottal stop at the end of verbal stems can be reconstructed to */t/, in \isi{comparison} with other Eastern Kiranti languages (cf. \sectref{stem} on the structure of the verbal stems).
\begin{table}[htp]
\resizebox*{\textwidth}{!}{
\small
\begin{tabular}{lllll}%das gleiche mit longtable für mehrseitige tabellen
\lsptoprule
\multirow{3}{*}{\sc stem} &\multirow{3}{*}{\sc citation form} & /\_-l& /\_-a& /\_-a-u\\
&&({\sc neg.cvb})&({\sc 3sg.pst})&({\sc 3sg>3sg.pst})\\
\midrule
\multicolumn{5}{l}{{\bf /ʔ/-final stems}}\\
\midrule
/khuʔ/& \emph{khuʔma} \rede{carry}&\emph{meŋ.khuʔ.le}& \emph{khu.ya.na}&\emph{khu.na}\\
/waʔ/&\emph{waʔma} \rede{wear, put on}&\emph{mẽ.waʔ.le}&\emph{wa.ya.na}&\emph{wa.na}\\
/soʔ/&\emph{soʔma} \rede{look}&\emph{men.soʔ.le}&\emph{so.ya.na}&\emph{so.na}\\
/kheʔ/&\emph{kheʔma} \rede{go}&\emph{meŋ.kheʔ.le}&\emph{khya.na}&-\\
/piʔ/&\emph{piʔma} \rede{give}&\emph{mem.biʔ.le}&\emph{pya.na}&\emph{pi.na}\\
\midrule
\multicolumn{5}{l}{{\bf V-final stems}}\\
\midrule
/ca/&\emph{cama} \rede{eat}&\emph{men.ja.le}&\emph{ca.ya.na}&\emph{co.na}\\
/a/&\emph{ama} \rede{descend}&\emph{mẽ.a.le}&\emph{a.ya.na}&-\\
/u/&\emph{uma} \rede{enter}&\emph{mẽ.u.le}&\emph{u.ya.na}&-\\
/si/&\emph{sima} \rede{die}&\emph{men.si.le}&\emph{sya.na}&-\\
\midrule
\multicolumn{5}{l}{{\bf /p/-, /t/-, /k/-final stems}}\\
\midrule
/lap/&\emph{lapma} \rede{seize, catch}&\emph{mẽ.lap.le}&\emph{la.ba.na}&\emph{la.bu.na}\\
/yok/&\emph{yokma} \rede{search}&\emph{mẽ.yok.le}&\emph{yo.ga.na}&\emph{yo.gu.na}\\
/phat/&\emph{phaʔma} \rede{help}&\emph{mem.phat.le}&\emph{pha.ta.na}&\emph{pha.tu.na}\\
&&\ti \emph{mem.phaʔ.le}&&\\
\lspbottomrule
\end{tabular}
}
\caption{The glottal stop stem-finally, compared to vowels and other plosives}\label{glottal}
\end{table}
\subsubsection{The status of /r/ in Yakkha and in an Eastern Kiranti perspective}\label{rhotic}
The rhotic /r/ does not occur word-initially in genuine Yakkha lexemes, due to the typical Eastern Kiranti sound change from */r/ to /y/ in word-initial position (see \sectref{genetic} and \citealt{Bickeletal_Firstperson}). There are words like \emph{lok} \rede{anger} and \emph{yok} \rede{place}, but no words starting with /r/.\footnote{There are a few exceptions, such as the binomial (a bipartite noun) \emph{raji-raŋma} which means \rede{wealth of land}. It might be a word that preserved an archaic phonological structure, or a loan (\emph{rājya} means \rede{kingdom} in \ili{Nepali}). Both options are possible and attested for the ritual register (the \emph{Munthum}) of other Kiranti languages \citep{Gaenszle2011_Binomials}.} It can, however, occasionally be found in complex onsets, and syllable-initially in intervocalic environment. \tabref{r-l} shows that /r/ and /l/ can be found in very similar environments, even though proper minimal pairs are rare. In some instances, intervocalic /r/ can be traced back to historical */t/, as in the complex predicates in \Next.
\ex.\ag. pe.sa.ra.ya.na\\ %\glll
%/pes-a-*ta-a=naʔ/\\
fly{\sc [3sg]-pst-V2.come-pst=nmlz.sg}\\
\rede{It came flying to me.}
\bg. phuŋ chik.tu.ra=na\\ %\glll
%/phuŋ chikt-u-*taʔ-a-u=na/\\
flower pluck{\sc -3.P-V2.bring-pst-3.P=nmlz.sg}\\
\rede{She plucked and brought a flower.}
\begin{table}
\begin{tabular}{ll}
\lsptoprule
{\bf /r/}&{\bf /l/}\\
\midrule
\emph{khorek} \rede{bowl} &\emph{ulippa} \rede{old}\\
\emph{phiʔwaru} a kind of bird&\emph{chalumma} \rede{second-born daughter}\\
(Nep.: \emph{koʈerā})&\\
\emph{tarokma} \rede{start}&\emph{caloŋ} \rede{maize}\\
\emph{kherek} \rede{this side, hither} & \emph{khelek} \rede{ant}\\
\emph{caram} \rede{yard}& \emph{sala} \rede{talk}\\
\emph{khiriri} \rede{spinning round very fast} & \emph{philili} \rede{jittering}\\
\emph{phimphruwa} \rede{soap berry}& \emph{aphlum} \rede{hearth stones}\\
(Nep.: \emph{riʈʈhā})&\\
\emph{hobrek} \rede{rotten}& \emph{phoplek} \rede{[pouring] at once}\\
\emph{ʈoprak} \rede{leaf plate}& \emph{khesapla} \rede{a kind of fig tree}\\
\lspbottomrule
\end{tabular}
\caption{The phonemes /r/ and /l/ in similar environments}\label{r-l}
\end{table}
\largerpage[-1]
According to \cite{Driem1990The-fall}, [l] and [r] have a complementary distribution in Eastern Kiranti: [l] occurs word-initially and syllable-initially after stops, and [r] occurs between vowels and as the second component of complex onsets. The complementary distribution of [l] and [r] is a consequence of the general Eastern Kiranti sound change from */r/ to /y/ in word-initial position, which left /r/ only in word-internal position.\footnote{The sound change is evident from correspondences such as Yakkha and \ili{Limbu} \emph{yum} \rede{salt} and its non-Eastern cognates, e.g., \emph{rum} in \ili{Puma} (Central Kiranti, \citealt[393]{Bickeletal2009Puma}) or \emph{rɨm} in Dumi (Western Kiranti, \citealt[412]{Driem1993A-grammar}).} It is plausible that [l] and [r], now partly in complementary distribution, were reanalyzed as an allophones as a consequence of this sound change. Van Driem's claim, however, could only partly be confirmed for Yakkha. In contrast to (Phedappe) \ili{Limbu} (\citealt{Driem1987A-grammar}, \citealt[688]{Schieringetal2010The-prosodic}) and other languages from the Greater Eastern branch of Kiranti such as Lohorung \citep[85]{Driem1990The-fall}, the rhotic is not found as an allophone of /l/ in intervocalic environment in Yakkha (compare the term for \rede{second-born daughter}, \emph{chalumma} (Yakkha) and \emph{sarumma} (\ili{Limbu}), \ili{Limbu} data from \citet[131]{Driem1985_LimbuKin}). Allophonic variation between /l/ and /r/ was not found for any environment in Yakkha.
\largerpage[-2]
For instance, the \isi{negative converb} \emph{me(n)...le} does not have an allomorph [me(n)...re] after CV-stems in Yakkha, in contrast to the same \isi{converb} in \ili{Limbu}. Furthermore, the question whether C + /r/ are syllabified as .Cr and C + /l/ as C.l could not be answered satisfactorily for Yakkha, based on auditory and phonological evidence. For instance, /r/ as well as /l/ may trigger \isi{voicing} in a preceding consonant, without any rule becoming apparent from the current data set (see \tabref{r-l}). To sum up, there is more than sufficient evidence for the phonemic status of /r/ in Yakkha.\footnote{The postulation of a phoneme /r/ has implications for a possible \isi{orthography} for future Yakkha materials. One of the current local orthographies, used, e.g., in \citet{Kongren2007Yakkha} and in several school books \citep{Jimi2009Engka-Yakkha}, conflated /r/ and /l/ under the grapheme <{\Deva ल}>, the \isi{Devanagari} letter for <l>. This turned out to be very impractical for the readers. It is not only too much abstracted away from the actual pronunciation, but also not justified by the phonological facts. It is my recommendation to change this in future publications, i.e., to write <{\Deva र}> (r) when a sound is pronounced as a rhotic and <{\Deva ल}> (l) when a sound is pronounced as a lateral.}
It is possibly a rather new development that the rhotic may also appear in syllable-final position. As shown in \Next, it may occur at the end of verbal stems that historically used to have a stem-final /t/-augment (cf. \sectref{stem}). This syllabification is only licensed when the following \isi{syllable} starts with /w/. When the stem is followed by vowel sounds, /r/ will be syllabified as onset. Another process leading to syllable-final \isi{rhotics} is metathesis, which is found in free allophonic variation, as in \emph{tepruki \ti tepurki} \rede{flea} or \emph{makhruna \ti makhurna} \rede{black}.
\ex.\ag.thur-wa-ŋ=na.\\
sew{\sc -npst[3.P]-1sg.A=nmlz.sg}\\
\rede{I will sew it.}
\bg.nir-wa-ŋ-ci-ŋ=ha.\\
count{\sc -npst-1sg.A-3nsg.P-1sg.A=nmlz.sg}\\
\rede{I will count them.}
\subsubsection{Aspirated voiced consonants}\label{asp-voiced}
Aspirated voiced plosives can result from the \isi{voicing} rule (cf. \sectref{morphophon}), or from sequences of morphemes with consonants followed by /h/, as in \Next[a]. In this way, aspirated consonants that are not found in simple lexemes can be created; they always involve a morpheme boundary, at least diachronically.\footnote{An exception is the word \emph{ŋhai} \rede{fish stomach (dish)}, for which no transparent multimorphemic etymology is available.} Another process leading to aspirated voiced consonants is vowel elision. If there is an underlying multimorphemic sequence of the shape /C-V-h-V/, the first vowel gets elided and /h/ surfaces as aspiration of the first consonant (see \Next[b]).
% \protectedex{
\ex.\a. \glll khe.i.ŋha\\
/kheʔ-i-ŋ=ha/\\
go{\sc [pst]-1pl-excl=nmlz.nsg}\\
\rede{We went.}
\b. \glll ca.mha.ci\\
/ca-ma=ha=ci/\\
eat{\sc -inf[deont]=nmlz.nsg=nsg}\\
\rede{They have to be eaten.}
% }
The environment that is required for the vowel elision is also provided by other forms of the verbal inflectional paradigm. In example \Next, the underlying sequence /-ka=ha/ ([-gaha] due to intervocalic \isi{voicing}) licenses the elision of the preceding vowel, which results in the realization of /h/ as aspiration of [g].
\ex.\a. \glll tun.di.wa.gha\\
/tund-i-wa-ka=ha/\\
understand{\sc [3A]-2.P-npst-2=nmlz.nsg}\\
\rede{He/she/they understand(s) you.}
\b. \glll tum.me.cu.ci.gha\\
/tund-meʔ-ci-u-ci-ka=ha/\\
understand{\sc -npst-du.A-3.P-3nsg.P-2=nmlz.nsg}\\
\rede{You (dual) understand them.}
\section{Syllable structure}\label{syllable}
This section describes the parameters for the possible \isi{syllable}s in Yakkha. The structure of the \isi{syllable} is maximally CCVC, i.e., VC, CV, CCV and CVC are possible as well. If a word-initial \isi{syllable} starts with a vowel, a glottal stop is prothesized to yield a minimal onset. Syllables with CVV structure occur only in the form of \isi{diphthongs} (see \sectref{diphth} above). They are exceedingly rare, and they can generally be traced back to bisyllabic or bimorphemic contexts. Syllables containing \isi{diphthongs} are always open.
In a simple onset, any consonant can occur, with the exception of /r/, which got replaced by /y/ diachronically in Eastern Kiranti. Among the complex onsets, two sets have to be distinguished. The first set has the general shape CL, where L stands for \isi{liquids} and \isi{glides}. In this type of \isi{syllable}, the first consonant can be a plosive, a fricative, an affricate or a nasal, while the second consonant can only be a liquid (/l/ or /r/) or a glide (/y/ or /w/). The onsets containing /y/ or /w/ result from contracted CVCV sequences diachronically. Some alternations between a monosyllabic and a bisyllabic structure, like \emph{cwa \ti cu.wa} \rede{beer}, \emph{chwa \ti chu.wa} \rede{sugarcane}, \emph{nwak \ti nu.wak} \rede{bird} and \emph{yaŋcuklik \ti yaŋcugulik} \rede{ant} suggest this. Comparison with related languages like Belhare and \ili{Chintang} provides further evidence for a former bisyllabic structure: \ili{Chintang} and Belhare have \emph{cuwa} and \emph{cua}, respectively, for \rede{water}, and Belhare furthermore has \emph{nua} for \rede{bird} \citep{Bickel1997Dictionary, Raietal2011_Chintangdict}. For Athpare, both bisyllabic and monosyllabic forms are attested \citep{Ebert1997A-grammar}.
On the other hand, complex onsets are not particularly uncommon in \isi{Tibeto-Burman}. Word-initially, the status of CL sequences as complex onsets is robust, but word-inter\-nally, alternative syllabifications would be theoretically possible. This possibility can be ruled out at least for the clusters involving aspirated plosives, because aspirated plosives may never occur syllable-finally. A segmentation like [kith.rik.pa] or [aph.lum] would violate the restriction on a well-formed \isi{syllable} coda in Yakkha, so that it has to be [ki.thrik.pa] and [a.phlum] (\rede{policeman} and \rede{hearth}, respectively). For unaspirated plosives, it is hard to tell how they are syllabified. Not all logically possible onsets occur, and some are only possible in morphologically complex (both inflected and derived) words. Some examples of complex onsets are provided in \tabref{onsets-liq} and \tabref{onsets-gli}. Onset types not shown in the tables do not occur.
\begin{table}
\begin{tabular}{lll}
\lsptoprule
&{\bf }/l/ &{\bf /r/}\\
\midrule
{\bf /p/}&\emph{i.plik} \rede{twisted}&\emph{ca.pra} \rede{spade}\\
{\bf /ph/} &\emph{a.phlum } \rede{trad. hearth}&\emph{phim.phru.wa} \rede{soap berry}\\
{\bf /k/}&\emph{saklum}\rede{frustration}&\emph{thaŋ.kra} \rede{store for grains}\\
{\bf /kh/}&(-)&\emph{ʈu.khruk} \rede{head}\\
{\bf /s/}&(-)&\emph{mik.srumba} \rede{blind person}\\
{\bf /n/}&\emph{nlu.ya.ha} \rede{they said}&(-)\\
\lspbottomrule
\end{tabular}
\caption{Complex onsets with liquids}\label{onsets-liq}
\end{table}
\begin{table}
\begin{tabular}{lll}
\lsptoprule
&{\bf /w/}&{\bf /y/}\\
\midrule
{\bf /p/}&(-)&\emph{pyaŋ.na} \rede{He/she gave it to me.}\\
{\bf /ph/}&\emph{tam.phwak} \rede{hair}&\emph{ci.sa.bhya} \rede{It cooled down.} \\
{\bf /t/}&\emph{twa} \rede{forehead}&(-)\\
{\bf /ʈh/}&\emph{ʈhwaŋ} \rede{smelly} (\textsc{ideoph})&(-)\\
{\bf /c/}&\emph{cwa} \rede{heart}&\emph{cya} \rede{child}\\
{\bf /ch/}&\emph{chwa} \rede{sugarcane}&\emph{op.chyaŋ.me} \rede{firefly}\\
{\bf /k/}&(-)&\emph{kya} \rede{Come up!}\\
{\bf /kh/}&\emph{o.sen.khwak} \rede{bone}&\emph{khya} \rede{Go!}\\
{\bf /s/}&\emph{swak} \rede{secretly}&\emph{sya.na} \rede{He/she died.}\\
{\bf /n/}&\emph{nwak} \rede{bird}&\emph{(ayupma) nyu.sa.ha} \rede{I am tired.}\\
\lspbottomrule
\end{tabular}
\caption{Complex onsets with glides}\label{onsets-gli}
\end{table}
The second set of onsets has the shape NC, where N stands for an unspecified nasal and C for any stem-initial consonant. This type of onset is found only when one of the nasal prefixes is attached to a stem, never in monomorphemic syllables, and never in syllables inside a word. The value of the nasal is conditioned by the place of articulation of the following consonant. Based on auditory evidence, I conclude that the nasal is not syllabified. However, as the processes related to prosody or to morphophonology either exclude prefixes from their domain or they apply across \isi{syllable} boundaries as well, I could not find independent evidence for this claim. The nasal prefixes may have the following morphological content: {\sc 3pl.S/A} and \isi{negation} in verbs (see \Next[a] and \Next[b]), a second person possessive in nouns (see \Next[c]),
\largerpage
and a distal relation in spatial adverbs and \isi{demonstratives} (see \Next[d] and \Next[e]).
\ex.\a.\glll mbya.gha\\
/N-piʔ-a-ka=ha/\\
{\sc 3pl.A-}give{\sc -pst-2.P=nmlz.nsg}\\
\rede{They gave it to you.} %check
\b.\glll ŋkhyan.na\\
/N-kheʔ-a-n=na/\\
{\sc neg-}go{\sc [3sg]-pst-neg=nmlz.sg}\\
\rede{He did not go.}
\b.\glll mbaŋ\\
/N-paŋ/\\
{\sc 2sg.\textsc{poss}-}house\\
\rede{your house}
\b.\glll ŋkhaʔ.la\\
/N-khaʔ.la/\\
{\sc dist-}like\_this\\
\rede{like that}
\b.\glll nnhe\\
/N-nhe/\\
{\sc dist-}here\\
\rede{there}
The coda is restricted to \isi{nasals}, unaspirated plosives and, rarely, /r/ (cf. \sectref{rhotic} above). The plosives are often unreleased or neutralized to [ʔ] in the coda, unless they are at the end of a word. While the glottal stop frequently occurs in \isi{syllable} codas, it is never found at the end of a phonological word (as defined by the stress domain).
\figref{syll} summarizes the possible \isi{syllable} in Yakkha. If the form of a morpheme does not agree with the \isi{syllable structure}, several strategies may apply. If, for instance, a verbal stem ends in two consonants (C-s, C-t), as \emph{chimd} \rede{ask} or \emph{yuks} \rede{put}, and a vowel follows the stem in an inflected form, the stem-final consonant becomes the onset of the next \isi{syllable} (see \Next). If a consonant follows the stem, the final consonant of the stem is deleted (see \NNext).
%\begin{figure}[htp]
%\begin{tabular}{llcc}
%\lsptoprule
%\multicolumn{2}{l}{{\sc onset}}&{\sc nucleus}&{\sc coda}\\
%\midrule
%\multicolumn{2}{l}{any consonant (except /r/)}&any& unasp. plosive,\\
% \cline{1-2}
%obstruent&+ liquid, glide&vowel&nasal,\\
% \cline{1-2}
%nasal&+ any consonant (except /r/)& & /r/\\
% \hline
%\multicolumn{2}{l}{any consonant (except /r/)}&\multicolumn{2}{c}{diphthong}\\
%\lspbottomrule
%\end{tabular}
%\caption{The syllable}\label{syll}
%\end{figure}
\begin{figure}[htp]
\begin{center}
\begin{tabular}{ll|c|l}
\lsptoprule
\multicolumn{2}{l|}{{\sc onset}}&\multicolumn{1}{l|}{\sc nucleus}&{\sc coda}\\
\hline
\multicolumn{2}{l|}{any consonant (except /r/)}& \multirow{3}{*}{any vowel} & unasp. plosive,\\
\cline{1-2}
obstruent&+ liquid, glide & & nasal,\\
\cline{1-2}
nasal&+ any consonant (except /r/)& & /r/\\
\hline
\multicolumn{2}{l|}{any consonant (except /r/)}&\multicolumn{2}{l}{diphthong}\\
\lspbottomrule
\end{tabular}
\caption{The syllable}\label{syll}
\end{center}
\end{figure}
\ex.\a. \glll chim.duŋ.na\\
/chimd-u-ŋ=na/\\
ask{\sc -3.P[pst]-1sg.A=nmlz.sg}\\
\rede{I asked him.}
\b. \glll chim.daŋ\\
/chimd-a-ŋ/\\
ask{\sc -imp-1sg.P}\\
\rede{Ask meǃ}
\ex.\a. \glll chim.nen.na\\
/chimd-nen=na/\\
ask{\sc -1>2[pst]=nmlz.sg}\\
\rede{I asked you.}
\b. \glll men.chim.le\\
/men-chimd-le/\\
{\sc neg-}ask{\sc -cvb}\\
\rede{without asking}
\newpage
In certain morphological environments and in fast speech, more complex onsets are possible, with the form NCL (nasal-consonant-liquid/glide), but this is restricted to particular inflected verb forms, namely third person plural or negated nonpast forms of verbs with open stems (or with CVʔ stems) (see \Next). Each part of the onset belongs to another morpheme. The complex cluster is a consequence of the deletion of the stem vowel. This process is further restricted to stems with back vowels (/a/, /u/ and /o/).
\ex.\a. \glll nlwa.na\\
/N-luʔ-wa=na/\\
{\sc 3pl.A-}tell{\sc -npst[3.P]=nmlz.sg}\\
\rede{They will tell him.}
\b. \glll njwa.ŋan.na\\
/N-ca-wa-ŋa-n=na/\\
{\sc neg-}eat{\sc -npst-1sg.A[3.P]-neg=nmlz.sg}\\
\rede{I will not eat it.}
\section{The phonological treatment of Nepali and English loans}\label{loansphon}
The phonological features of Yakkha are also reflected by the treatment of \ili{Nepali} and English loans, as shown in Tables \ref{loans-nep} and \ref{loans-eng}. Several processes may apply to adjust non-native lexemes to Yakkha phonology. Apart from the regular processes discussed below, many changes in the vowel qualities can be encountered, but they cannot be ascribed to any regular sound change.
\begin{table}
\begin{center}
\begin{tabular}{lll}
\lsptoprule
{\sc yakkha} &{\sc nepali} &{\sc gloss}\\
\midrule
\emph{jum} & \emph{ˈjā.aũ} & \rede{Let us go.}\\
\emph{herum} & \emph{ˈhe.raũ} & \rede{Let us have a look.}\\
\emph{ʈhoŋ} & \emph{ʈhāũ} & \rede{place}\\
\emph{gumthali} & \emph{gaũthali } & \rede{swallow}\\
\emph{alenci} & \emph{alaĩci} & \rede{cardamom}\\
\emph{tuk.khi} & \emph{dukha} & \rede{sorrow, pain}\\
\emph{du.ru} & \emph{dudh} & \rede{(animals') milk}\\
\emph{chen} & \emph{ca.ˈhĩ} & (topic particle)\\
\emph{bhenik} & \emph{bi.ˈhā.na} & \rede{morning}\\
\emph{bhya} & \emph{ˈbi.hā} & \rede{wedding}\\
\lspbottomrule
\end{tabular}
\caption{\ili{Nepali} loans in Yakkha}\label{loans-nep}
\end{center}
\end{table}
\largerpage
As adjacent vowels are a marked structure in Yakkha, sequences of vowels, as well as vowels which are separated only by /h/, are typically changed to one vowel. The intervocalic /h/ is, however, not completely lost, but preserved as aspiration of the preceding consonant, as shown by the last three examples of \tabref{loans-nep}. This process happens irrespective of how the words are stressed in \ili{Nepali}.
\begin{table}
\begin{center}
\begin{tabular}{ll}
\lsptoprule
{\sc yakkha} &{\sc gloss}\\
\midrule
\emph{ˈroʈ} & \rede{road}\\
\emph{ˈphlim} & \rede{film}\\
\emph{ˈphren} & \rede{friend}\\
\emph{is.ˈʈep} & \rede{step}\\
\emph{is.ˈkul} & \rede{school}\\
\lspbottomrule
\end{tabular}
\caption{English loans in Yakkha}\label{loans-eng}
\end{center}
\end{table}
Another typical process is the change of nasal vowels to nasal consonants:\footnote{Marginally, nasal vowels may occur in Yakkha, but the environments are highly restricted, and a nasal realization of a vowel is always motivated by an underlying nasal consonant (cf. \sectref{morphophon}).} \isi{hortative} verb forms like \emph{jum} \rede{Let's go!} or \emph{herum} \rede{Let's have a look!} seem to have been built in analogy to the shape of Yakkha \isi{hortative} verb forms, which also end in \emph{-um}, at least in the transitive verbs. The words \emph{ʈhoŋ}, \emph{alenci} and \emph{gumthali} illustrate the same process (and also the change of \isi{diphthongs} to simple vowels).
Some loans show the neutralization of voiced and voiceless consonants that is typical for Eastern Kiranti, e.g., \emph{tukkhi} (from \ili{Nepali} \emph{dukha} \rede{sorrow, pain}). Probably, such words entered the Yakkha language in an earlier stage of the Nepali-Yakkha contact, when people were not yet bilingual. Nowadays there are many \ili{Nepali} loans in Yakkha that are pronounced as in \ili{Nepali}.
The word \emph{duru} (from \ili{Nepali} \emph{dudh} \rede{milk}) shows a strategy to satisfy the constraint against aspirated plosives at the end of the \isi{syllable} or word (and against aspirated voiced plosives in general).\footnote{The use of goat and cow's milk and other milk products is very rare in Yakkha culture (noted also by \citealt[128--30]{Russell1992_Yakha}), and, thus, the borrowing of this word is not surprising.}
Another typical process encountered was closing word-final open syllables by /k/. For example, \emph{belā} \rede{time} becomes [belak], \emph{bihāna} \rede{morning} becomes [bhenik] and \emph{duno \ti duna} \rede{leaf bowl} becomes [donak] in Yakkha. Words that end with consonants other than /k/ may also be modified to end in /k/, e.g., \emph{churuk} \rede{cigarette}, from \ili{Nepali} \emph{churoʈ}.
Some English loanwords, shown in \tabref{loans-eng}, illustrate that complex codas and voiced codas are not acceptable in Yakkha. Word-initial clusters of fricative and plosive are also marked, and a vowel is prothesized to yield a \isi{syllable} that corresponds at least to some of the prosodic constraints of Yakkha (but this also happens in the pronunciation of \ili{Nepali} native speakers). Finally, as Yakkha has no distinctions of length or tenseness for vowels, the difference between, e.g., English \emph{sheep} and \emph{ship} is usually not noticed or produced if such words are borrowed. Both words are pronouned with a short [i], that is however slightly more tense than in English \emph{ship}.\footnote{The words displayed in the tables occurred regularly in at least some speakers' idiolects. Nevertheless, I do not want to make any strong claims about what is borrowed and what results from code-switching, as this is not the purpose of my study.}
The words selected here illustrate how some of the principles of the Yakkha sound system and the phonological rules are applied to non-native material. The Yakkha phonology in borrowed lexemes is not equally prominent among speakers. It depends on many factors, most obviously the proficiency in the donor languages and the time-depth of the borrowing.
\section{Stress assignment}\label{stress}
\begin{table}[htp]
\begin{tabular}{ll}
\lsptoprule
{\sc Yakkha }& {\sc gloss}\\
\midrule
\emph{ˈom.phu} &\rede{verandah}\\
\emph{ˈkho.rek} & \rede{bowl}\\
\emph{ˈca.ram}& \rede{yard}\\
\emph{ˈko.ko.mek}& \rede{butterfly}\\
\emph{ˈol.lo.bak} &\rede{fast}\\
\emph{ˈtok.ca.li}&\rede{buttocks}\\
\emph{ˈyok.yo.rok}&\rede{beyond, a bit further}\\
\emph{ˈkam.ni.bak}&\rede{friend}\\
\emph{wa.ˈriŋ.ba}&\rede{tomato}\\
\emph{cuʔ.ˈlum.phi}&\rede{stele, pillar, stick}\\
\emph{nep.ˈnep.na}&\rede{short one}\\
\emph{op.ˈchyaŋ.me}&\rede{firefly}\\
\emph{cik.ci.ˈgeŋ.ba}&\rede{Bilaune tree}\\
\lspbottomrule
\end{tabular}
\caption{Default stress}\label{stresstab1}
\end{table}
\largerpage[-1]
This section deals with the rules for \isi{stress assignment} and the domain to which these rules apply.
The rules for \isi{stress assignment} can be laid out as follows: by default, the first \isi{syllable} carries main stress. Closed syllables, however, attract stress. If there are closed syllables, the main stress moves to the last closed \isi{syllable}, as long as it is not the final \isi{syllable} of a word, demonstrated by the examples in \tabref{stresstab1} for nouns,\footnote{Both simple and complex nouns (at least historically) can be found in this table. Their etymology does not affect \isi{stress assignment}, though.} and in \Next for inflected verbal forms. The forms in these examples differ with regard to the position of the last closed \isi{syllable} in the word, and thus, by the condition that makes the stress move from the first \isi{syllable} towards the end (but only up to the penultimate \isi{syllable}). Predicates that consist of more than one verbal stem behave like simple verbs in this respect (see \NNext).
\ex.\a. \glll ˈtum.me.cu.na\\
/tund-meʔ-ci-u=na/\\
understand{\sc -npst-du.A-3.P=nmlz.sg}\\
\rede{They (dual) understand him.}
\b. \glll ˌndum.men.ˈcun.na\\
/n-tund-meʔ-n-ci-u-n=na/\label{str-ex1}\\
{\sc neg-}understand{\sc -npst-neg-du.A-3.P-neg=nmlz.sg}\\
\rede{They (dual) do not understand him.}
\b. \glll ˌtum.meʔ.ˈnen.na\label{str-ex2}\\
/tund-meʔ-nen=na/\\
understand{\sc -npst-1>2=nmlz.sg}\\
\rede{I understand you.}
\ex.\a. \glll ˈluk.ta.khya.na\\
/lukt-a-kheʔ-a=na/\\
run{\sc -pst-V2.go-pst[3sg]=nmlz.sg}\\
\rede{He ran away.}
\newpage
\b. \glll luk.ta.ˈkhyaŋ.na\\
/lukt-a-kheʔ-a-ŋ=na/\\
run{\sc -pst-V2.go-pst-1sg=nmlz.sg}\\
\rede{I ran away.}
Examples like \emph{ˈkam.ni.bak} \rede{friend} show that the stress never moves to the final \isi{syllable}, even when the \isi{syllable} is heavy. Patterns where the final \isi{syllable} is stressed are possible though, because prefixes are not part of the stress domain. In monosyllabic nouns that host a possessive prefix, the stress generally remains on the stem, as in \Next.
\ex.\a. \glll a.ˈpaŋ\\
/a-paŋ/\\
{\sc 1sg.poss-}house\\
\rede{my house}
\b. \glll u.ˈphuŋ\\
/u-phuŋ/\\
{\sc 3sg.poss-}flower\\
\rede{his/her flower}
Yakkha has a category of obligatorily possessed nouns, and some of them, mostly kin terms, have undergone \isi{lexicalization}. They are all monosyllabic. With regard to stress, the prefix is no longer distinguished from the stem, as shown by examples like \emph{ˈa.mum} \rede{grandmother}, \emph{ˈa.pum} \rede{grandfather}, \emph{ˈa.na} \rede{elder sister}, \emph{ˈa.phu} \rede{elder brother}.\footnote{In the domain of \isi{kinship}, forms with first person singular inflection are also used in default contexts, when no particular possessor is specified. The default possessive prefix for nouns denoting part-whole relations is the third person singular \emph{u-}.} The words are, however, not morphologically opaque, as the first person possessive prefix \emph{a-} can still be replaced by other prefixes in a given context, and then, the stress pattern changes to the expected one, e.g., \emph{u.ˈmum} \rede{his grandmother}. An example for lexicalized obligatory possession beyond the domain of \isi{kinship} is the word \emph{ˈu.wa} \rede{liquid, nectar, water}.
The shift of stress described above occurs only in monosyllabic kin terms. In bisyllabic words, the stress is again on the first \isi{syllable} of the stem or on the \isi{syllable} that is closed. Terms like \emph{a.ˈnun.cha} \rede{younger sibling} (both sexes) or \emph{a.ŋo.ˈʈeŋ.ma} \rede{sister-in-law} illustrate this.
As Yakkha is a predominantly suffixing language, there are not many prefixes that could illustrate the fact that the domain of stress does not include prefixes. Apart from the \isi{possessive prefixes}, evidence is provided by reduplicated \isi{adjectives} and adverbs like \emph{pha.ˈphap} \rede{entangled, messy} or \emph{son.ˈson} \rede{slanted, on an angle}. The base for these words are verbal stems, in this case \emph{phaps} \rede{entangle, mess up} and \emph{sos} \rede{lie slanted}. Their stress pattern allows the conclusion that this kind of \isi{reduplication} is a prefixation (for the other morphophonological processes involved cf. \sectref{morphophon}).
Clitics generally do not affect \isi{stress assignment}, since they are attached to the phrase and thus to a unit that is built of words to which stress has already been assigned.\footnote{The term \rede{clitic} may have two readings: (i) affixes that are categorically unrestricted (represented by the equals sign \rede{=} instead of a hyphen \rede{-}), or (ii) phonologically bound words, like \isi{demonstratives}. The latter are written separately in the \isi{orthography} used in this work, as they may also appear independently and they have the ability to head phrases.} Examples are provided in \Next for \isi{case} clitics and in \NNext for discourse-structural clitics.
\ex.\a. \glll ˈkho.rek.ci\\
/khorek=ci/\\
bowl{\sc =nsg}\\
\rede{the bowls}
\b. \glll ˈtaŋ.khyaŋ.bhaŋ\\
/taŋkhyaŋ=phaŋ/\\
sky{\sc =abl}\\
\rede{from the sky}
\b. \glll ˈkam.ni.bak.ci.nuŋ\\
/kamnibak=ci=nuŋ/\\
friend{\sc =nsg=com}\\
\rede{with the friends}
\ex.\a. \glll a.ˈyu.bak.se\\
/a-yubak=se/\\
{\sc 1sg.poss-}goods{\sc =restr}\\
\rede{only my goods}
\b. \glll u.ˈkam.ni.bak.ko\\
/u-kamnibak=ko/\\
{\sc 3sg.poss-}friend{\sc =top}\\
\rede{his friend(, though)}
An exception to this rule is the nominalization in \emph{=na} and \emph{=ha}. These nominalizers may attach to the verbal inflection, in relative clauses, complement clauses or in main clauses (see \sectref{nmlz-uni}). They are categorically unrestricted (i.e., taking not only verbal hosts), and not an obligatory part of the verbal inflection. However, if they attach to the verb, they are part of the stress domain. If this was not the \isi{case}, \isi{stress assignment} as in \emph{luk.ta.ˈkhyaŋ.na} \rede{I ran away.} would be unexpected, because then the stress would be on the final \isi{syllable} of the stress domain, which violates the prosodic constraints of Yakkha. The anomalous behavior of the nominalizers is not unexpected in light of the fact that they are being reanalyzed from discourse markers to part of the \isi{inflectional morphology}.\footnote{For instance, they also show \isi{number} agreement with verbal arguments, with \emph{=na} indicating singular and \emph{=ha} indicating nonsingular or non-countable reference.}
It is hard to tell whether there is secondary stress. Even in words with five syllables, like in \Last[b], no secondary stress could be detected. Secondary stress was clearly audible in compounds such as those shown in \tabref{stresscomp}. It is found on the first \isi{syllable} of the second part of the compound, while the main stress remains on the first \isi{syllable} of the whole compound. Such compounds may override the general restriction against stress on word-final syllables. In inflected verb forms, secondary stress can be found on the verbal stem, e.g., in \emph{ˌndum.men.cu.ˈŋan.na} \rede{We (dual) do not understand him.}; cf. also examples \ref{str-ex1} and \ref{str-ex2} above.
\begin{table}[htp]
\resizebox{\textwidth}{!}{
\begin{tabular}{ll}
\lsptoprule
{\sc yakkha} & {\sc gloss}\\
\midrule
\emph{ˈko.len.ˌluŋ} &\rede{marble stone} (\rede{smooth-stone})\\
\emph{ˈpi.pi.ˌsiŋ} & \rede{straw, pipe} (\rede{([redup]suck-wood})\\
\emph{ˈyo.niŋ.ˌkhe.niŋ} & \rede{hither and thither} (\rede{while thither-while hither})\\
\emph{ˈmo.niŋ.ˌto.niŋ} & \rede{up and down} (\rede{while down-while up})\\
\emph{ˈsa.meʔ.ˌchoŋ} &\rede{protoclan} (\rede{clan-top})\\
\emph{ˈlim.bu.ˌkhim} & a clan name, composed of the term for the \ili{Limbu} ethnic group\\
& and a word for \rede{house} in many Kiranti languages\\
\lspbottomrule
\end{tabular}
}
\caption{Stress in compounds}\label{stresscomp}
\end{table}
Finally, one exception to the stress rules has to be mentioned. Yakkha has several triplicated ideophonic adverbs, where the first \isi{syllable} is the base and the second and third \isi{syllable} rhyme on the vowel, but replace the initial consonant with a liquid, a glide or a coronal stop, e.g., [se.re.ˈreː] \rede{drizzling}, or [hi.wi.ˈwiː] \rede{pleasantly breezy} (cf. \sectref{redup}). In addition to the \isi{triplication}, the vowel of the last \isi{syllable} is lengthened, and the stress is always on the last \isi{syllable} in these adverbs.
\section{Morphophonological processes}\label{morphophon}
This section discusses the various morphophonological processes in Yakkha. The domains to which certain rules and processes apply are not always congruent. The existence of more than one phonological domain and the problems for theoretical approaches that assume a prosodic hierarchy have already been discussed for \ili{Limbu}, another Eastern \mbox{Kiranti} language \citep{Hildebrandt2007Prosodic, Schieringetal2010The-prosodic}. Yakkha adds further support to challenges for the assumption that domains of prosodic rules are necessarily hierarchically ordered.
The following phonological domains could be identified in Yakkha morpho\-pho\-no\-logy: the rules for \isi{stress assignment} disregard prefixes and phrasal affixes. In contrast, the \isi{vowel harmony} establishes a relation between the prefix and the stem only, ignoring the suffixes. The \isi{voicing} rule has the broadest domain (cf. \sectref{voicing} below). Furthermore, some rules differentiate between morphologically simple and complex words (compounds). The \isi{voicing} rule and also various repair operations of marked structures like adjacent obstruents are sensitive to morpheme boundaries, the latter, more precisely, to stem boundaries.
\begin{figure}[htp]
\begin{center}
\begin{tabular}{l|l|l|l|l}
\lsptoprule
&{\bf prefix}&{\bf stem(s)}&{\bf suffixes}&{\bf clitics}\\
\hline
(1) &\cellcolor[gray]{.8} &\multicolumn{2}{c|}{stress assignment}&\cellcolor[gray]{.8}\\
\hline
(2-a)& \multicolumn{4}{c}{voicing/N\_}\\
\hline
(2-b)&\cellcolor[gray]{.8} &\multicolumn{3}{c}{voicing/V\_V}\\
\hline
(3)&\multicolumn{2}{c|}{vowel harmony}&\cellcolor[gray]{.8}&\cellcolor[gray]{.8}\\
\lspbottomrule
\end{tabular}
\caption{Summary of phonological domains}\label{w-domains}
\end{center}
\end{figure}
\figref{w-domains} provides an overview of the different domains to which the morphophonological processes apply.\footnote{The morphological structure of the word is slightly simplified in the table, disregarding complex predicates that consist of more than one verbal stem. Complex predicates are treated identically to simple words by the stress rule and the \isi{voicing} rule (except for the behavior of /c/).} \sectref{voicing} deals with the \isi{voicing} rule. The prefixation of underspecified \isi{nasals} is treated in \sectref{nas-pref}. A \isi{case} of \isi{vowel harmony} is described in \sectref{vow-har}. Adjacent vowels are not preferred in Yakkha, and strategies to avoid such undesirable sequences are treated in \sectref{strat-vow}. \sectref{h-w-m} deals with consonants in intervocalic environments. \sectref{ass-to-obs} describes assimilations. The employment of \isi{nasals} to repair marked sequences of adjacent obstruents as well as adjacent vowels in complex predicates is discussed in \sectref{nas-strat}. Finally, \sectref{sec-nasalcop} is concerned with a process of \isi{nasal copying} which is found in the verbal inflection of many Kiranti languages.
%\footnote{Suffixes that subcategorize exclusively for the stem can only be found in the verbal domain in Yakkha, in inflection as well as in infinitives, converbs and nominalizers. Nominal \isi{case} and \isi{number} markers are phrasal affixes, and furthermore, \isi{number} is optional on nouns.}
%\begin{figure}[htp]
%\begin{center}
%\begin{tabular}{lllll}
% &{\bf prefix}&{\bf stem(s)}&{\bf suffixes}&{\bf clitics}\\
%\midrule
%\cline{3-4}
%(1) & &\multicolumn{2}{|c|}{stress assignment}&\\
%\cline{2-5}
%(2-a)& \multicolumn{4}{|c|}{\isi{voicing}/N\_}\\
%\cline{2-5}
%(2-b)& &\multicolumn{3}{|c|}{\isi{voicing}/V\_V}\\
%\cline{2-5}
%(3)&\multicolumn{2}{|c|}{vowel harmony}&&\\
%\cline{2-3}
%\end{tabular}
%\caption{Summary of phonological domains}\label{w-domains}
%\end{center}
%\end{figure}
\subsection{Voicing}\label{voicing}
\begin{figure}[t]
\begin{center}
\begin{tabular}{l}
\lsptoprule
C.{\sc unvoiced} → C.{\sc voiced}/N\_\\
C.{\sc unvoiced} → C.{\sc voiced}/V\_V\\
C.{\sc unvoiced} → C.{\sc voiced}/\_L\\
\lspbottomrule
\end{tabular}
\caption{Voicing rules}\label{voic-fig}
\end{center}
\end{figure}
In Yakkha, unaspirated plosives and the affricate are voiced in intervocalic and postnasal environments and before \isi{liquids} and \isi{glides}, as schematized in \figref{voic-fig}, where C stands for unaspirated plosives and the affricate, N for \isi{nasals} and L for \isi{liquids} and \isi{glides}. Voicing predominantly applies at morpheme boundaries, but also inside words that, at least synchronically, cannot be split up further into separate morphemes. The rule is illustrated by example \Next, with the stem-final /k/ of the verb \emph{yokma} \rede{search}, and by \NNext, with the stem-initial /t/ of the verb \emph{tama} \rede{come}.
\largerpage
\ex.\a.\glll yoknenna.\\
/yok-nen=na/\\
search{\sc -1>2[pst]=nmlz.sg}\\
\rede{I looked for you.}
\b.\glll yogu\\
/yok-u/\\
search{\sc -3.P[imp]}\\
\rede{Look for itǃ}
\ex.\a.\glll tameʔna.\\
/ta-meʔ=na/\\
come{\sc [3sg]-npst=nmlz.sg}\\
\rede{He will come.}
\b.\glll ndamenna\\
/N-ta-meʔ-n=na/\\
{\sc neg-}come{\sc [3sg]-npst-neg=nmlz.sg}\\
\rede{He will not come.}
Some environments containing \isi{liquids} and \isi{glides} that trigger \isi{voicing} are given in \tabref{liqvoice}, with both monomorphemic and multimorphemic words. Some words are found with either pronunciation, and the current conclusion is that allegro speech leads to \isi{voicing}, and that this became the norm for some words, but not for others.
\begin{table}[htp]
\begin{tabularx}{\textwidth}{llX}
\lsptoprule
&{\scshape Yakkha} & {\scshape gloss}\\
\midrule
{\bf /pl/}&\emph{taplik \ti tablik} &\rede{story}\\
&\emph{hoblek} & [manner of throwing or pouring] \rede{the whole/ at once}\\
{\bf /pr/}&\emph{hobrek} & \rede{completely [rotten]}\\
&\emph{khibrum.ba} & \rede{fog} (also derogative for people of Caucasian phenotype)\\
{\bf /tr/}&\emph{hoŋdrup} & \rede{pig as present for in-laws}\\
{\bf /kw/}&\emph{cogwana}& \rede{he does it}\\
{\bf /pw/}&\emph{ubwaha}& \rede{he earns [money]}\\
{\bf /khy/}&\emph{maghyam} &\rede{old woman}\\
{\bf /tr/}&\emph{phetrak \ti phedrak} &\rede{petal}\\
{\bf /pr/}&\emph{capra \ti cabra} &\rede{spade with long handle}\\
{\bf /pl/}&\emph{lupliba \ti lubliba} &\rede{earthquake}\\
\lspbottomrule
\end{tabularx}
\caption{Voicing before \isi{liquids} and glides}\label{liqvoice}
\end{table}
As shown above, the \isi{voicing} rule applies to lexical stems, but it also applies to inflectional morphemes and phrasal affixes (see \Next). Thus, the domain for \isi{voicing} is bigger than the domain that is relevant for stress, as phrasal affixes undergo \isi{voicing}, and as prefixes may trigger \isi{voicing} as well.
\ex.\label{hongma}\a.\glll hoŋmacibego.\\
/hoŋma=ci=pe=ko/\\
river{\sc =nsg=loc=top}\\
\rede{in the rivers(, though)}
\b.\glll tummecuganabu.\\
/tum-meʔ-c-u-ka=na=pu/\\
understand{\sc -npst-du-3.P.-2.A=nmlz.sg=rep}\\
\rede{(People say that) you (dual) understand him/her.}
\newpage
After this outline of the basic properties of \isi{voicing} in Yakkha, let us now turn to its details.
The \isi{voicing} rule needs further specification for prefixes. While nasal prefixes trigger \isi{voicing}, vocalic prefixes are excluded from the \isi{voicing} domain, irrespective of other factors such as stress. I have shown in \sectref{stress} above that \isi{voicing} is triggered neither in \emph{a.ˈpaŋ} \rede{my house} nor in \emph{ˈa.pum} \rede{(my) grandfather}. Only prefixes that consist of a nasal trigger \isi{voicing}, as shown in \Next.
\ex.\a. \glll mbaŋ\\
/N-paŋ/\\
{\sc 2sg.poss-}house\\
\rede{your house}
\b. \glll ŋ-gamnibak\\
/N-kamnibak/\\
{\sc 2sg.poss-}friend\\
\rede{your friend}
In \sectref{stress} on \isi{stress assignment}, I mentioned reduplicated \isi{adjectives} and adverbs. They also provide further evidence for the restriction of the \isi{voicing} rule to nasal prefixes. I will exemplify this with the two \isi{adjectives} \emph{bumbum} \rede{compact and heavy} and \emph{tutu} \rede{far up} (cf. \sectref{redup} for more examples). The base of the adjective \emph{bumbum} has the corresponding verbal stem \emph{pups \ti pum} \rede{fold, press, tuck up}, while the base of \emph{tutu} is the adverbial root \emph{tu} \rede{uphill}. In analogy to the stress behavior, my default assumption is that the \isi{reduplication} is a prefixation, although the \isi{voicing} facts would support either option. The stem allomorph \emph{pum} is reduplicated to /pum-pum/ (the stem \emph{pups} surfaces only before vowels) and, subsequently, the stem undergoes \isi{voicing}, which is then spread to the first \isi{syllable} to preserve the identity between the base and the reduplicated morpheme. In contrast to this, in \emph{tutu} \rede{far up}, the intervocalic environment that results from the \isi{reduplication} does not trigger \isi{voicing}.
As stated in the beginning of this section, \isi{voicing} does not apply to aspirated plosives, at least not in the Tumok dialect (see \Next). Exceptions are found only in a handful of lexemes, mostly in ideophonic adverbs (see \sectref{sec-ideophone}). However, aspirated plosives (and the affricate) get voiced when they occur in \isi{function verb}s,\footnote{Function verbs are grammaticalized verbs, glossed as \rede{V2} (see Chapter \ref{verb-verb}.)} i.e., in word-medial position (see \NNext). These complex predicates also constitute one domain for \isi{stress assignment}, in contrast, for instance, to the southern neighbour language \ili{Chintang}, where each verbal stem in a \isi{complex predicate} constitutes a stress domain on its own \citep[57]{Bickeletal2007Free}.
\ex.\a.\glll ŋkhyanna.\\
/N-khy-a-n=na/\\
{\sc neg-}go{\sc [3sg]-pst-neg=nmlz.sg}\\
\rede{He did not go.}
\b.\glll memphaʔle.\\
/meN-phat-le/\\
{\sc neg-}help{\sc -cvb}\\
\rede{without helping}
\ex.\a.\glll kam cog-a-ghond-a-ga=i.\\
/kam cok-a-khond-a-ka=i/\\
work do{\sc -imp-V2.roam-imp-2=emph}\\
\rede{Go on working.}
\b.\glll hab-a-bhoks-a=na.\\
/hap-a-phoks-a=na/\\
cry{\sc -pst-V2.split[3sg]-pst=nmlz.sg}\\
\rede{She broke out in tears.}
Yakkha has a class of composite predicates that consist of a noun and a verb. They show varying degrees of morphosyntactic freedom, but they are generally not as tightly fused as the verb-verb predicates. This is also reflected by stress: noun and verb each have their own stress, even if this results in adjacent stress. Voicing, too, treats both components as separate items (see \Next).\footnote{These predicates form a lexical unit though, and the nouns do not enjoy the syntactic freedom that is expected of full-fledged arguments. These predicates are best understood as idiomatic phrases (cf. Chapter \ref{noun-verb}).}
\ex.\a.\glll ˈsa.ya ˈpok.ma\\
/saya pok-ma/\\
head.soul raise{\sc -inf}\\
\rede{to raise the head soul} (a ritual)
\b.\glll ˈluŋ.ma ˈtuk.ma\\
/luŋma tuk-ma/\\
liver pour{\sc -inf}\\
\rede{to love}
\b.\glll ˈsak ˈtu.ga.nai?\\
/sak tug-a=na=i/\\
hunger ache{\sc [3sg]-pst=nmlz.sg=q}\\
\rede{Are you hungry?/ Is he hungry?/ Are they hungry?}
Between vowels, voiced stops may further assimilate to their surrounding material and become continuants, as several alternations between intervocalic [b] and [w] show. Thus, \emph{kamnibak} \rede{friend} may also be pronounced [kamniwak], or the \isi{imperative} of \emph{apma} \rede{to come (from a visible distance on the same level)} can alternate between [aba] and [awa]. Like in Belhare \citep{Bickel1998Rhythm}, intervocalic /t/ may also become a continuant /r/, as some historical stem changes (e.g., \emph{*thut} → \emph{thur}) and in \isi{function verb}s show, e.g., the \isi{function verb} \emph{ris} that originates in the lexical stem \emph{tis} \rede{apply, invest}, or \emph{raʔ} originating in the lexical stem \emph{taʔ} \rede{bring (from further away)}.
The suffix \emph{-ci} does not get voiced, neither in verbal nor in nominal inflection, as example \ref{hongma} has already shown. This exceptional behavior might point towards a more complex historical form of this suffix. The only instance of a voiced marker \emph{-ci} is in the second person dual pronoun \emph{njiŋda} (you), which is complex at least from a historical perspective.
The affricate /tsʰ/ (written <c>) behaves exceptionally in other contexts, too. In the \isi{function verb} \emph{ca} \rede{eat} it does not undergo \isi{voicing} (see \Next[a]),\footnote{This function verb is the only one with initial /c/.} for which there is no neat explanation yet. Example \Next[b] shows that \isi{voicing} does apply to plosives in function verbs, and as example \NNext shows, stem-initial /c/ does get voiced in other environments. In some morphemes, the affricate shows free variation, as in the additive focus \isi{clitic} \emph{=ca}. It is found both voiced and unvoiced, neither related to individual nor to dialectal differences.
\largerpage
% \ili{Limbu} has \emph{-si/-chi} for dual and \emph{-si} for nonsingular patient \citep[75ff]{Driem1987A-grammar}.} and in the nonsingular \isi{clitic} \emph{=ci} for noun phrases, the affricate is never voiced.
\ex.\a. \glll incama\\
/in-ca-ma/\\
trade{\sc -V2.eat-inf}\\
\rede{to sell}
\b. \glll hambiʔma\\
/ham-piʔ-ma/\\
distribute{\sc -V2.give-inf}\\
\rede{to distribute (among people)}
\ex.\a. \glll njogwana.\\
/n-cok-wa=na/\\
{\sc 3pl.A-}do{\sc -npst=nmlz.sg}\\
\rede{They will do it.}
\b. \glll men-ja-le\\
/men-ca-le/\\
{\sc neg-}eat{\sc -cvb}\\
\rede{without eating}
Another exception to the \isi{voicing} rule has to be mentioned, as shown in \Next[a] and \Next[b]. Stem-final /t/ remains voiceless between vowels. If the stem ends with a nasal and /t/, \isi{voicing} applies, as in \Next[c], and stem-initial /t/ undergoes \isi{voicing} as well. The absence of \isi{voicing} at the end of stems can be explained with the history of the /-t/ final stems. Comparison with \ili{Chintang} and Belhare \citep{Bickel2003Belhare, Bickeletal2007Free} shows that there must have been geminated /tt/, resulting from a CVt stem to which the augment \emph{-t} was added (discussed in \sectref{stem}). Voicing does not apply when there is more than one underlying consonant between the vowels.
\ex.\a.\glll mituna.\\
/mit-u=na/\\
remember{\sc [pst]-3.P=nmlz.sg}\\
\rede{He remembered it.}
\b.\glll phatuci!\\
/phat-u-ci/\\
help{\sc -3.P[imp]-nsg.P}\\
\rede{Help themǃ}
\b.\glll chem endugana?\\
/chem ent-a-u-ka=na/\\
song apply{\sc -pst-3.P-2.A=nmlz.sg}\\
\rede{Did you put on music?}
\subsection{The prefixation of underspecified nasals}\label{nas-pref}
Yakkha has several nasal prefixes that do not constitute syllables of their own, but result in onsets that consist of prenasalized consonants. The prefixes are underspecified for the place of articulation, and thus they always assimilate to the place of articulation of the following consonant. The nasal prefixes also trigger \isi{voicing} stem-initially, as could already be seen in \sectref{voicing} above. These nasal prefixes have several morphemic values, already mentioned in \sectref{syllable}, and repeated here for convenience: they index third person plural S and A arguments on verbs \Next[a] and verbal \isi{negation} \Next[b]. The nasal prefixes also encode second person singular possessors on nouns \Next[c], and in adverbs, they encode a distal relation (see \Next[d]). If the nasal prefix is attached to a nasal-initial stem, it yields an initial nasal geminate (see \NNext).
\largerpage
\ex.\ag. m-by-a-ga-n=ha.\\
{\sc 3pl.A-}give{\sc -pst-2.P-neg=nmlz.nsg}\\
\rede{They gave it to you.}
\bg. ŋ-khy-a-n=na.\\
{\sc neg-}go{\sc [3sg]-pst-neg=nmlz.sg}\\
\rede{He did not go.}
\bg. m-baŋ\\
{\sc 2sg.poss-}house\\
\rede{your house}
\bg.ŋ-khaʔla\\
{\sc dist-}like\_this\\
\rede{like that}
\ex.\ag.m-ma\\
{\sc 2sg.poss-}mother\\
\rede{your mother}
\bg. n-nhe\\
{\sc dist-}here\\
\rede{there}
If the stem begins with a vowel or /w/, the nasal is realized as a velar nasal (see \Next). This might lead us to the conclusion that actually /ŋ/ is the underyling form and gets assimilated. This would, however, be the only instance of a morphophonological change from a velar nasal to [m] or [n] in Yakkha, and, thus, this option seems unlikely to me.
\ex. \ag. ŋ-og-wa-ci=ha.\\
{\sc 3pl.A-}peck{\sc -npst-3nsg.P=nmlz.nsg}\\
\rede{They (the roosters) peck them (the chicks).}
\bg. ŋ-ikt-haks-u-ci.\\
{\sc 3pl.A-}chase{\sc -V2.send-3.P[pst]-3nsg.P}\\
\rede{They chased them away.}
\bg. kham ŋ-wapt-u=ha.\\
soil {\sc 3pl.A-}scratch{\sc -3.P[pst]=nmlz.nsg}\\
\rede{They (the chicken) scratched the ground (they scrabbled about on the ground).}
A \isi{syllable} with a nasal before the consonant is marked in terms of the sonority hierarchy \citep{Jespersen1904_Lehrbuch, Selkirk1984_SyllableTheory, Hall2000Phonologie}. Therefore, the following process can be noticed: if the preceding word (in the same clause) ends with a vowel, the nasal will resyllabify as the coda of the preceding word (see \Next), just as in Belhare \citep[547]{Bickel2003Belhare}. I have shown above that the domains for stress and for \isi{voicing} are not identical. This process adds a third domain of phonological rules to the picture, encompassing two words in terms of \isi{stress assignment}, as each of the words carries its own stress. Even though the nasal belongs to the preceding word in terms of \isi{syllable structure}, the choice of the nasal is determined by the following consonant, which also undergoes \isi{voicing} due to the nasal. This suggests a sequence of morphophonological processes, of which this resyllabification is the last to apply.
\ex.\a.\glll liŋkhaci namnuŋ bagari\textbf{n} jog-a.\\
/liŋkha=ci nam=nuŋ bagari {\bf N}-cok-a/\\
Linkha{\sc =nsg} sun{\sc =com} bet {\sc 3pl-}do{\sc -pst} \\
\rede{The Linkha clan had a bet with the sun.} \source{11\_nrr\_01.003}
\b.\glll chuʔma\textbf{ŋ} gaksanoŋ, ...\\
/chuʔ-ma {\bf N}-kaks-a-n=hoŋ/\\
tie{\sc -inf} {\sc neg-}agree{\sc [3sg]-pst-neg=seq}\\
\rede{It (the cow) was not okay with being tied, ...} \source{11\_nrr\_01.011}
\b.\glll nna\textbf{m} borakhyamanna.\\
/nna {\bf N}-por-a-khy-a-ma-n=na/\\
that {\sc neg-}fall{\sc -pst-V2.go[3sg]-pst-neg=nmlz.sg}\\
\rede{That (stele) did not topple over.} \source{18\_nrr\_03.026}
\b.\glll ka heʔniŋca\textbf{m} mandiʔŋanna.\\
/ka heʔniŋ=ca {\bf N}-mandiʔ-ŋa-n=na/\\
{\sc 1sg} when{\sc =add} {\sc neg-}get\_lost{\sc -1sg-neg=nmlz.sg}\\
\rede{I would never get lost.}\source{18\_nrr\_03.015}
\subsection{Vowel harmony}\label{vow-har}
Vowel harmony in Yakkha applies only to one prefix, namely to the possessive prefix \emph{u-} for third person. It has an allomorph \emph{o-} that is triggered when the stressed \isi{syllable} of the stem contains the mid vowels /e/ or /o/, as illustrated by \tabref{vowelhar}. Suffixes do not undergo \isi{vowel harmony} in Yakkha, and neither do other prefixes.
One exceptional \isi{case} has to be mentioned, the inflected form \emph{khohetu} \rede{he/she carried it off}. This is a complex verb that consists of the two verbal stems \emph{khuʔ} \rede{carry (on back)} and \emph{het} (a V2, indicating caused \isi{motion} away from a reference point). Apparently, the V2 makes the vowel in the first stem change to [o]. However, this is the only instance of \isi{vowel harmony} that has been encountered beyond the domain defined above.
\begin{table}[htp]
\begin{tabular}{llcll}
\lsptoprule
\multicolumn{2}{l}{{\textsc{before} /e/ \textsc{and} /o/}} &&\multicolumn{2}{l}{{\textsc{before} /u/, /i/, /a/}}\\
\midrule
\emph{o-heksaŋbe} &\rede{behind her/him} &&\emph{u-paŋ} &\rede{her/his house}\\
\emph{o-hop} &\rede{her/his nest} &&\emph{u-hiŋgilik} &\rede{alive}\\
\emph{o-tokhumak} &\rede{alone} &&\emph{u-ʈukhruk} &\rede{her/his body}\\
\emph{o-senkhwak} &\rede{her/his bone} &&\emph{u-mik} &\rede{her/his eye}\\
\emph{o-yok} &\rede{her/his place/spot} &&\emph{u-tiŋgibhak} &\rede{its thorn}\\
\emph{o-poŋgalik} &\rede{(its) bud} &&\emph{u-ʈaŋ} &\rede{its horn}\\
\emph{o-phok} &\rede{her/his belly} &&\emph{u-muk} &\rede{her/his hand}\\
\emph{o-ʈesraŋ} &\rede{reverse} &&\emph{u-nabhuk} &\rede{her/his nose}\\
\lspbottomrule
\end{tabular}
\caption{Vowel harmony}\label{vowelhar}
\end{table}
\subsection{Operations to avoid adjacent vowels}\label{strat-vow}
The processes that avoid vowel hiatus apply to adjacent vowels as well as to vowels that are separated by a glottal stop.\footnote{Diachronically, stems ending in a glottal stop used to be CVt stems, and the /t/ got reduced to a glottal stop. Synchronically, stems ending in glottal stop often behave identical to stems that end in a vowel, in terms of morphophonological rules.} They are found in the verbal domain, since there are no suffixes or clitics beginning with a vowel in the nominal domain.
\subsubsection{Vowel deletion}
The suffixes \emph{-a} and \emph{-u} can get deleted when they are adjacent to another vowel. In sequences of /-a-u/, for instance, /a/ gets deleted (see \Next[a]). This rule, however, also interacts with the morphology. While the past (and \isi{imperative}) suffix \emph{-a} is deleted when it is followed by the third person patient marker \emph{-u}, the same sequence, when it results from the nonpast marker \emph{-wa}, results in the deletion of \emph{-u} (see \Next[b]).
\largerpage
\ex.\a.\glll tunduŋna.\\
/tund-\textbf{a}-u-ŋ=na/\\
understand{\sc -pst-3.P-1sg.A=nmlz.sg}\\
\rede{I understood her/him.}
\b.\glll tundwaŋna.\\
/tund-wa-\textbf{u}-ŋ=na/\\
understand{\sc -npst-3.P-1sg.A=nmlz.sg}\\
\rede{I understand her/him.}
Suffix sequences of the underlying form /-a-i/ also result in the deletion of the suffix \emph{-a} (see \Next). When /a/ is part of the stem, however, nothing gets deleted (see \Next[c]). Note also that intervocalic /h/ may become /y/, as in \Next[a].
\ex.\a.\glll kheiya.\\
/kheʔ-a-i=ha/\\
go{\sc -pst-1pl=nmlz.nsg}\\
\rede{We went.}
\b.\glll tundigha.\\
/tund-a-i-ka=ha/\\
understand{\sc [3.A]-pst-2pl-2=nmlz.nsg}\\
\rede{They understood you (plural).}
\b.\glll hakokŋa caiwa.\\
/hakok=ŋa ca-i-wa/\\
later{\sc =ins} eat{\sc -1pl-npst}\\
\rede{We will eat later.}
Underlying sequences of three vowels are possible with open (CV and CVʔ) stems, in past and \isi{imperative} forms with a third person patient. In these verb forms, both suffixes are deleted.
\ex.\a.\glll piŋ.na\\
/piʔ-a-u-ŋ=na/\\
give{\sc -pst-3.P-1sg.A=nmlz.sg}\\
\rede{I gave it to him.}
\b.\glll soŋ.na\\
/soʔ-a-u-ŋ=na/\\
look{\sc -pst-3.P-1sg.A=nmlz.sg}\\
\rede{I looked at it.}
\b.\glll haǃ\\
/haʔ-a-u/\\
bite{\sc -imp-3.P}\\
\rede{Bite (into) itǃ}
\b.\glll cam.na\\
/ca-a-u-m=na/\\
eat{\sc -pst-3.P-1pl.A=nmlz.sg}\\
\rede{We ate it.}
\subsubsection{Ablaut}
Ablaut is found only in one verb, in \emph{cama} \rede{eat}. Ablaut in some verbs is not unusual in Kiranti. The stem \emph{ca} has an allomorph \emph{co} that is not predictable from the phonological environment. It occurs when followed by other vowels, but not in all environments that would predict such a change if this was the condition. Its distribution over the paradigm is shown in Chapter \ref{verbalmorph}, on page \pageref{par-cama-pst}.
\subsubsection{Insertion of glides}
If the back vowels (/a/, /o/ and /u/) belong to a verbal stem and are followed by the suffix \emph{-a}, the glide /y/ is inserted to avoid vowel hiatus. The morphological environment for these vowel sequences is provided by intransitive verbs, as well as in in transitive verb forms with first or second person patients (see \Next). A similar process can be encountered with stems that end in /ʔ/, with /ʔ/ being replaced by /y/, as in \Next[d].
\ex.\a.\glll mima uhoŋbe uyana.\\
/mima u-hoŋ=pe u-a=na/\\
mouse {\sc 3sg.poss-}hole{\sc =loc} enter{\sc [3sg]-pst=nmlz.sg}\\
\rede{The mouse entered her mousehole.}
\b.\glll nam ayana.\\
/nam a-a=na/\\
sun descend{\sc [3sg]-pst=nmlz.sg}\\
\rede{The sun went down.}
\b.\glll tayana.\\
/ta-a=na/\\
come{\sc [3sg]-pst=nmlz.sg}\\
\rede{He came.}% (from a place beyond sighting distance)
\b.\glll soyaŋgana.\\
/soʔ-a-ŋ-ka=na/\\
look{\sc -pst-1sg.P-2.A=nmlz.sg}\\
\rede{You looked at me.}
\subsubsection{Gliding}
Front vowels of verbal stems may also be reduced to \isi{glides} when they are adjacent to /a/. The \isi{syllable} nucleus of the stem becomes part of the onset, and the word is again reduced by one \isi{syllable}, which is obvious because of the stress pattern. Examples \Next[a] and \Next[b] illustrate this for stems ending in glottal stops and \Next[c] shows the same process with an open stem.
\ex.\a.\glll ˈkhyaŋ.na\\
/kheʔ-a-ŋ=na/\\
go{\sc -pst-1sg=nmlz.sg}\\
\rede{I went.}
\b.\glll ˈpyaŋ.na\\
/piʔ-a-ŋ=na/\\
go{\sc [3sg.A]-pst-1sg.P=nmlz.sg}\\
\rede{He gave it to me.}
\b.\glll ˈsya.na\\
/si-a=na/\\
die{\sc [3sg]-pst=nmlz.sg}\\
\rede{He/she died.}
This may also happen when the stem has a back vowel. So far, this was only encountered for the verb \emph{luʔma} (see \Next). Other verbs, e.g., \emph{chuʔma} \rede{tie} appear in the expected form, e.g., \emph{chuyaŋna} \rede{he tied me (to something)}.
\ex.\a.\glll ˈlyaŋ.na\\
/luʔ-a-ŋ=na/\\
tell{\sc [3sg.A]-pst-1sg.P=nmlz.sg}\\
\rede{He told me.}
\b.\glll ˈlya.ha\\
/luʔ-a=ha/\\
tell{\sc [3sg.A;1.P]-pst=nmlz.nsg}\\
\rede{He told us.}
\subsection{Consonants in sonorous environment}\label{h-w-m}
\subsubsection{Intervocalic /h/ and /w/}
Intervocalic /h/ and /w/ also trigger vowel deletion. If the two vowels surrounding /w/ or /h/ have the same quality, the preceding vowel is deleted, even if this is the stem vowel. The deletion leads to new consonant clusters, i.e., to consonants followed by /w/ (see \Next[a]), or to aspirated voiced plosives (see \Next[b]).
\largerpage
\ex.\a.\glll njwan.na\\
/n-ca-wa-n=na/\\
{\sc neg-}eat{\sc [3sg.A]-npst-neg=nmlz.sg}\\
\rede{He/she does not eat it.}
\b.\glll tun.di.wa.gha\\
/tund-i-wa-ka=ha/\\
understand{\sc [3.A]-2pl.P-npst-2=nmlz.nsg}\\
\rede{He/they understand you (pl).}
If the vowels do not have the same quality and there is a transition from a close to an open vowel, intervocalic /h/ may also change to /y/ (see \Next).
\ex.\a.\glll tun.dwa.ci.ya\\
/tund-wa-ci=ha/\\
understand{\sc [3sg.A]-npst-3nsg.P=nmlz.nsg}\\
\rede{He/she understands them.}
\b.\glll ci.ya maŋ.cwa\\
/ci=ha maŋcwa/\\
get\_cold{\sc =nmlz.nsg} water\\
\rede{cold water}
The change of vowels to \isi{glides} and the realization of underlying /h/ as aspiration can even cross stem boundaries, as the following \isi{complex predicate}, consisting of three verbal stems, shows \Next. The underlying stems /piʔ/ and /heks/ fuse into [bhyeks].\footnote{The V2 \emph{-piʔ} indicates that a participant (the speaker, the subject or even someone else) is affected by the event, and the V2 \emph{-heks} specifies the temporal reference of the event as \isi{immediate prospective}. In pronunciation, they get fused to [bhyeks].}
\ex.\glll a.cya tu.ga.bhyek.sana\\
/a-cya tuk-a-piʔ-heks-a=na/\\
{\sc 1sg.poss}-child get\_ill{\sc [3sg]-pst-V2.give-V2.cut-pst=nmlz.sg}\\
\rede{My child is about to get ill.}
\subsubsection{Nasals in sonorous environment}\label{nas-son}
Nasals in sonorous environments are prone to phonological alternations. Nasal vowels are not part of the phoneme set of Yakkha. They may be generated, however, in intervocalic environments at morpheme boundaries, or when a nasal occurs between a vowel and a liquid or a glide. This happens when the \isi{negative converb} (marked by prefix and suffix: \emph{meN}-Σ-\emph{le}) attaches to an open stem, or to a stem with initial /w/, /y/ or /l/. The nasal in \emph{meN}-Σ-\emph{le} is not specified. If it attaches to stems that have initial consonants, it assimilates to their place of articulation. Examples are provided in \tabref{nasal-son}.
Another process producing nasal vowels was noticed in allegro forms of complex predicates such as \emph{ŋonsipma} \rede{feel shy} and \emph{thensipma} \rede{fit, suit}, which were pronounced \emph{ŋoĩsipma} and \emph{theĩsipma} in fast speech.
\begin{table}[htp]
\begin{center}
\begin{tabular}{lll}%das gleiche mit longtable für mehrseitige tabellen
\lsptoprule
{\sc stem}&{\sc citation form}&{\sc negative converb}\\
\midrule
/waʔ/&\emph{waʔma} \rede{wear, put on}&\emph{mẽ.waʔ.le} \rede{without wearing}\\
/a/&\emph{ama} \rede{descend}&\emph{mẽ.a.le} \rede{without descending}\\
/u/&\emph{uma} \rede{enter}&\emph{mẽ.u.le} \rede{without entering}\\
/lap/&\emph{lapma} \rede{seize, catch}&\emph{mẽ.lap.le} \rede{without catching}\\
/yok/&\emph{yokma} \rede{search}&\emph{mẽ.yok.le} \rede{without searching}\\
\lspbottomrule
\end{tabular}
\caption{Nasals in sonorous environment}\label{nasal-son}
\end{center}
\end{table}
\subsection{Assimilations}\label{ass-to-obs}
Syllable-final coronals assimilate to coronal fricatives, yielding a geminated fricative [sː] (written <ss>) (see \Next). This \isi{assimilation} is connected to stress. In unstressed syllabes, no \isi{assimilation} occurs, and the stem-final /t/ is simply deleted before fricatives (see \Next[c]). Occasionally, stem-final glottal stops can also undergo this \isi{assimilation}, but this is subject to free variation.
\ex. \a. \glll es.se\\
/et-se/\\
apply{\sc -sup}\\
\rede{in order to apply}
\b.\glll mis.saŋ\\
/mit-saŋ/\\
remember{\sc -sim}\\
\rede{remembering}
\b.\glll ki.si.saŋ\\
/kisit-saŋ/\\
be\_afraid{\sc -sim}\\
\rede{being afraid}
The following examples show that this process does not apply to the other plosives /k/ and /p/. Stems ending in a glottal stop are treated like open stems, illustrated by \Next[c]. Stems that have a coronal augment yield an underlying sequence of three consonants when followed by /s/. In this case, nothing gets assimilated. The general rule for augmented stems followed by consonants applies, i.e., the augment is simply omitted, as illustrated in \NNext.
\ex.\a.\glll ap.se\\
/ap-se/\\
shoot{\sc -sup}\\
\rede{in order to shoot}
\b.\glll cok.se\\
/cok-se/\\
do{\sc -sup}\\
\rede{in order to do}
\b.\glll so.se\\
/soʔ-se/\\
look{\sc -sup}\\
\rede{in order to look}
\ex.\a.\glll un.se\\
/und-se/\\
pull{\sc -sup}\\
\rede{in order to pull}
\b.\glll chep.se\\
/chept-se/\\
write{\sc -sup}\\
\rede{in order to write}
Furthermore, stems ending in a coronal stop, and occasionally also stems ending in a glottal stop, show regressive \isi{assimilation} to a velar place of articulation, as shown in \Next.
\ex.\a.\glll phak.khuba\\
/phat-khuba/\\
help{\sc -nmlz}\\
\rede{helper}
\b.\glll khek.khuba\\
/khet-khuba/\\
carry\_off{\sc -nmlz}\\
\rede{the one who carries it off}
\b.\glll sok.khuba\\
/soʔ-khuba/\\
look{\sc -nmlz}\\
\rede{the one who looks}
An optional regressive \isi{assimilation}, conditioned by fast speech, can be found in underlying sequences of \isi{nasals} followed by a palatal glide or a lateral approximant (/y/ or /l/), both stem-initially and stem-finally. In such environments, the nasal assimilates further, giving up its feature of nasality (see \Next).
\ex.\a.\glll lleŋmenna.\\
/N-leks-meʔ-n=na/\\
{\sc neg-}become{\sc [3sg]-npst-neg=nmlz.sg}\\
\rede{It will not happen./It is not alright.}
\b.\glll mẽyelle.\\
/meN-yen-le/\\
{\sc neg-}obey{\sc -cvb}\\
\rede{without listening/obeying}
\b.\glll yyupmaci.\\
/N-yupma=ci/\\
{\sc 2sg.poss-}tiredness{\sc =nsg}\\
\rede{your tiredness}\footnote{Some nouns are obligatorily marked for nonsingular, especially in experiential expressions.}
\subsection{Operations involving nasals}\label{nas-strat}
\subsubsection{Nasality assimilation}
The nasal consonants themselves also trigger several regressive \isi{assimilation} processes. The affected consonants may change in nasality or in place of articulation. Coronals and the glottal stop are particularly prone to assimilations, while the velar and the bilabial stop are less inclined to assimilate. Stem-final /t/ and /ʔ/ will assimilate completely if they are followed by stressed syllables starting in /m/ (see \Next[a]). Under the same condition, stems ending in velar stops (both plain and augmented) undergo nasal \isi{assimilation}, with the place of articulation being retained (see \Next[b] and \Next[c]).
\largerpage
\ex.\a.\glll pham.ˈmeŋ.na\\
/phat-me-ŋ=na/\\
help{\sc [3sg.A]-npst-1sg.P=nmlz.sg}\\
\rede{He/she helps me.}
\b.\glll peŋ.ˈmeʔ.na\\
/pek-meʔ=na/\\
break{\sc [3sg]-npst=nmlz.sg}\\
\rede{It breaks.}
\b.\glll naŋ.ˈmeʔ.na\\
/nakt-meʔ=na/\\
ask{\sc [3sg]-npst=nmlz.sg}\\
\rede{He asks.}
In stems that end in /n/ or /nd/ (with augmented /t/), the coda completely assimilates to [m]. In contrast to the \isi{assimilation} discussed above, this \isi{assimilation} is not sensitive to stress. For instance, stems like \emph{tund} \rede{understand} and \emph{yen} \rede{obey} have the infinitival forms \emph{tumma} and \emph{yemma}, respectively, with the stress falling on the first \isi{syllable}. Stems ending in a velar stop or in a bilabial stop never assimilate completely; their place of articulation is retained. Compare, e.g., \emph{pekma} \rede{break} (stem: \emph{pek}) with \Last[b] above. Following a general rule in Yakkha, augmented stems (ending in two consonants) block \isi{assimilation} and also other morphophonological processes, e.g., \emph{chepma} \rede{write} (stem: \emph{chept}). Furthermore, velar and bilabial \isi{nasals} never assimilate to other \isi{nasals}, in contrast to languages like Athpare and Belhare \citep{Ebert1997A-grammar, Bickel2003Belhare}.
\subsubsection{Nasalization of codas}\label{nas-cod}
Nasalization of obstruents does not only happen as \isi{assimilation} to nasal material. When obstruents are adjacent in complex predicates, the first obstruent, i.e., the stem-final consonant of the first stem, becomes a nasal in order to avoid a marked structure. Examples are provided in \tabref{nasalobs}.\footnote{The V2 \emph{-piʔ} has a suppletive form \emph{-diʔ}, which cannot be explained by phonological operations. It occurs only in intransitive uses of \emph{-piʔ \ti -diʔ} \rede{give} as a \isi{function verb}. The inflected forms show that the underlying stem is \emph{-piʔ}.} Within complex predicates this process is most frequently found in infinitival forms. In the inflected forms, morphological material (suffixes with vowel quality) gets inserted between the verbal stems, thus resolving the marked sequences of adjacent obstruents.
The nasal often retains the place of articulation of the underlying obstruent, but some assimilations are possible too, e.g., /sos-kheʔ-ma/ becoming \emph{soŋkheʔma} \rede{slide off} (slide-go). If the underlying obstruent is a glottal stop, the place of articulation of the nasal is always conditioned by the following consonant, e.g., \emph{han-cama} /haʔ-cama/ \rede{devour} (bite-eat).
\largerpage
As \tabref{nasalobs} shows, both simple (CVC) and augmented stems (CVC-s and \mbox{CVC-t}) are subject to this change from obstruent to nasal. The same change can be observed in reduplicated adverbs and \isi{adjectives}, e.g., in \emph{sonson} \rede{slanted} (derived from the verbal stem /sos/) or \emph{simsim} \rede{squinting, blinking} (derived from the verbal stem /sips/).
This process is also sensitive to stress. The last item of \tabref{nasalobs}, \emph{um.ˈkheʔ.ma}, with the stress on the second \isi{syllable}, can be contrasted with the nominalized \emph{ˈup.khu.ba} \rede{something that collapes}, with the stress on the first \isi{syllable}. Here, the stem appears in the general form of \emph{t}-augmented stems that are followed by consonants: the augment is simply omitted.
\begin{table}[htp]
\begin{center}
\begin{tabular}{lll}
\lsptoprule
\multicolumn{2}{l}{{\sc citation forms}} &{\sc stems}\\
\midrule
\emph{yuncama} &\rede{laugh, smile} &/yut/ + /ca/\\
\emph{suncama} &\rede{itch} &/sus/ + /ca/\\
\emph{incama} &\rede{play} &/is/ + /ca/\\
\emph{hancama} &\rede{devour} &/haʔ/ + /ca/\\
\emph{sendiʔma} &\rede{get stale} &/ses/ + /piʔ/\\
\emph{mandiʔma} &\rede{get lost} &/mas/ + /piʔ/\\
\emph{pendiʔma} &\rede{get wet} &/pet/ + /piʔ/\\
\emph{phomdiʔma} &\rede{spill} &/phopt/ + /piʔ/\\
\emph{sonsiʔma} &\rede{slide, slip} &/sos/ + /siʔ/\\
\emph{tomsiʔma} &\rede{get confused} &/tops/ + /siʔ/\\
\emph{yaŋsiʔma} &\rede{get exhausted} &/yak/ + /siʔ/\\
\emph{homkheʔma} &\rede{get damaged} &/hop/ + /kheʔ/\\
\emph{soŋkheʔma} &\rede{slide off} &/sos/ + /kheʔ/\\
\emph{umkheʔma} &\rede{collapse} &/upt/ + /kheʔ/\\
\lspbottomrule
\end{tabular}
\caption{Nasalization of obstruents stem-finally}\label{nasalobs}
\end{center}
\end{table}
\subsubsection{Insertion of nasals}
In addition to the nasalization of obstruents, \isi{nasals} can be inserted in complex predication, if the following condition is met: if the V2 in a \isi{complex predicate} starts with a vowel or in /h/, either the preceding consonants (the complete coda or only the augment of the first verbal stem) will become \isi{nasals}, or, when the first stem has CV or CVʔ shape, the default nasal /n/ will be inserted between the two stems. \tabref{nasalins} provides examples of citation forms of complex predicates with inserted \isi{nasals}, and their underlying stems.
The process is not a blind insertion of phonetic material, i.e., it is not simply epenthesis. Remarkably, it is triggered by the phonological quality of non-adjacent morphological material: the change of stops to \isi{nasals} or the insertion of \isi{nasals} is conditioned by the availability of \isi{nasals} in the morphology that attaches to the stem. The suffixes containing \isi{nasals} have to attach directly to the complex stem in order to trigger the insertion of \isi{nasals}. Compare the examples in \Next. In \Next[a] and \Next[b], the sequence /pt/ becomes [mn], and the following /h/ is realized as the aspiration of [n]. In \Next[c], the inflection does not immediately contain a nasal, and thus the phonological material of the stem remains as it is. It gets resyllabified, however, and the /h/ is realized as aspiration of the preceding consonant. Example \NNext, with the verb \emph{leʔnemma} \rede{let go, drop} illustrates the insertion of /n/ when a CV-stem (or CVʔ) and a vowel-initial stem are adjacent in complex predication. The same condition as in \Next can be observed. Only nasal material in the suffix string licenses the insertion of /n/ between the two verbal stems.
\ex.\a. \glll lem.nhaŋ.ma\\
/lept-haks-ma/\\
throw{\sc -V2.send-inf}\\
\rede{to throw away/out}
\b. \glll lem.nhaŋ.nen?\\
/lept-haks-nen/\\
throw{\sc -V2.send-1>2}\\
\rede{Shall I throw you out?}
\b. \glll lep.thak.suŋ.na\\
/lept-haks-u-ŋ=na/\\
throw{\sc -V2.send-3.P[pst]-1sg.A=nmlz.sg}\\
\rede{I threw her/him out.}
\ex.\a. \glll leʔ.nen.saŋ\\
/leʔ-end-saŋ/\\
drop{\sc -V2.insert-sim}\\
\rede{stretching down}
\b. \glll u.laŋ le.ʔen.du.ci.ya\\
/u-laŋ leʔ-end-a-u-ci=ha/\\
{\sc 3sg.poss-}leg drop{\sc -V2.insert-pst-3.P-nsg.P=nmlz.nsg}\\
\rede{It (the aeroplane) lowered its landing gear.}
\begin{table}[htp]
\resizebox*{\textwidth}{!}{
\small
\begin{tabular}{ll}
\lsptoprule
{\sc citation forms}&{\sc stems}\\
\midrule
\emph{hu.nhaŋ.ma} \rede{burn down} &/huʔ/ + /haks/\\
\emph{lem.nhaŋ.ma} \rede{throw away/out} &/lept/ + /haks/\\
\emph{khu.nhaŋ.ma} \rede{rescue} &/khus/ + /haks/\\
\emph{iŋ.nhaŋ.ma} \rede{chase off} &/ikt/ + /haks/\\
\midrule
\emph{pheʔ.na.ma} \rede{drop, leave at some place} &/phes/ + /a/\\
\emph{et.na.ma} \rede{enroll, install somewhere (and come back)}& /et/ + /a/\\
\emph{tik.na.ma} \rede{take along}& /tikt/ + /a/\\
\emph{tiʔ.na.ma} \rede{deliver (and come back), bring} &/tis/ + /a/\\
\emph{yuk.na.ma} \rede{put for s.b. and leave} &/yuks/ + /a/\\
\midrule
\emph{leʔ.nem.ma} \rede{drop} &/leʔ/ + /end/\\
\emph{hak.nem.ma} \rede{send down}& /hakt/ + /end/\\
\emph{aʔ.nem.ma} \rede{wrestle down}& /a/ + /end/\\
\emph{ak.nem.ma} \rede{kick down} &/ak/ + /end/\\
\emph{leʔ.nem.ma} \rede{drop} &/leʔ/ + /end/\\
\emph{lep.nem.ma} \rede{throw down} &/lept/ + /end/\\
\lspbottomrule
\end{tabular}
}
\caption{The insertion of \isi{nasals} in complex predication}\label{nasalins}
\end{table}
The insertion of /n/ can affect the coda of the first stem, too. Stems ending in /s/ may change to CV-ʔ when followed by a vowel-initial stem, as in \emph{tiʔnama} \rede{deliver} (/tis + a/). This again suggests a sequence of processes, i.e., the insertion of /n/, followed by the change of /s/ to [ʔ]. It is not clear, however, why these citation forms do not simply resyllabify, e.g., to [tisama] instead of [tiʔnama], because this resyllabification is exactly what happens in the corresponding inflected forms. Apparently, speakers prefer to keep morpheme boundaries and \isi{syllable} boundaries congruent in citation forms. Note that V2s starting in /h/ behave differently from V2s starting in a vowel, because a \isi{complex predicate} consisting of /khus/ + /haks/ does not become [khuʔ.nhaŋ.ma] but \emph{khu.nhaŋ.ma}.
\tabref{nas-sum} summarizes the processes of the preceding two sections, with examples for each process. To sum up, the insertion of \isi{nasals} and the transformation of obstruents to \isi{nasals} are employed to avoid marked structures such as adjacent vowels, adjacent obstruents, and impossible \isi{syllable} codas, while also maintaining the identity of morpheme boundaries and \isi{syllable} boundaries. This stands in contrast to inflected forms, where resyllabification is unproblematic.
\begin{table}[htp]
\resizebox*{\textwidth}{!}{
\small
\begin{tabular}{lll}
\lsptoprule
{\sc operation}&{\sc citation form }&{\sc V.lex + V2}\\
\midrule
/C{\tiny[1]}+C/ → {\bf N{\tiny[1]}}.C&\emph{hom.kheʔ.ma} \rede{get damaged} &/hop/ + /kheʔ/\\
/C{\tiny[1]}C{\tiny[2]}+V/ → C{\tiny[1]}.{\bf n}V&\emph{mak.ni.ma} \rede{surprise}& /maks/ + /i/\\
/C{\tiny[1]}C{\tiny[2]}+hV/ → {\bf N{\tiny[1]}.n}hV&\emph{lem.nhaŋ.ma} \rede{throw away/out} &/lept/ + /haks/\\
/s+hV/ → .{\bf n}hV&\emph{khu.nhaŋ.ma} \rede{rescue} &/khus/ + /haks/\\
/s+V/ → ʔ.{\bf n}V&\emph{maʔ.ni.ma} \rede{lose}& /mas/ + /i/\\
/V+V/ → Vʔ.{\bf n}V&\emph{aʔ.nem.ma} \rede{wrestle down}& /a/ + /end/\\
\lspbottomrule
\end{tabular}
}
\caption{Repair operations in complex predicates involving nasals}\label{nas-sum}
\end{table}
\subsection{Nasal copying}\label{sec-nasalcop}
In the verbal inflection of Kiranti languages, nasal morphemes can be realized up to three times in the suffix string, a process that was termed \rede{affix copying} or \rede{nasal copying}, e.g., in \citet{Driem1987A-grammar, Doornenbal2009A-grammar, Ebert2003Kiranti, Bickel2003Belhare}. Alternative analyses have been proposed to explain this process: \isi{recursive inflection} in \citet{Bickeletal2007Free} and radically underspecified segments in \citet{Zimmermann2012_Affix}.
Yakkha \isi{nasal copying} is illustrated by \Next. Suffixes that consist of \isi{nasals} or that contain \isi{nasals} occur more than once under certain conditions, and without any semantic consequences. There are no contrasting forms that lack the copied suffixes. It is morphologically most economical to assume regressive copying, with the last nasal suffix serving as base. A comparison of the inflected forms in \Next below supports this reasoning, because the slots after the suffixes \emph{-meʔ} and \emph{-u} are filled with varying material.\footnote{Note that the glosses \rede{1sg.A} and \rede{\textsc{excl}} refer to the same morpheme, if the structure of the whole paradigm is taken into account. It is defined by the property [non-\isi{inclusive}]. This collapse of markers is also found in the intransitive forms of the Belhare verbal inflection \citep{Bickel1995In-the-vestibule}. For the sake of the readability of the glosses, the morphological analysis as well as the \isi{alignment} patterns of particular morphemes are kept out of the glosses as far as possible.} What is remarkable about the \isi{nasal copying} is that the value of the underspecified nasal is determined by non-adjacent segments.
\ex.\a.\glll piŋ.ciŋ.ha\\
/piʔ-a-u-N-ci-ŋ=ha/\\
give\textsc{-pst-3.P-[N]-3nsg.P-1sg.A=nmlz.nsg}\\
\rede{I gave it to them.}
\b.\glll tun.dum.cim.ŋha\\
/tund-a-u-N-ci-m-ŋ=ha/\\
understand\textsc{-pst-3.P-[N]-3nsg.P-1pl.A-excl=nmlz.nsg} \\
\rede{We understand them.}
\b.\glll ndum.men.cun.ci.ga.nha\\
/n-tund-meʔ-N-ci-u-N-ci-ga-n=ha/\\
\textsc{neg-}Σ\textsc{-npst-[N]-du.A-3.P-[N]-3nsg.P-2.A-neg=nmlz.nsg}\\
\rede{You (dual) do not understand them.}
The motivation for this copying process might be a phonological repair operation to yield closed syllables.\footnote{Cf. \citet[22]{Schikowski2012_Morphology} for the same explanation on \ili{Chintang} suffix copying, although on p. 25 he points out that this explanation is not watertight, since some copying processes may even create open syllables.} Repair operations involving \isi{nasals} would not be uncommon for Yakkha, as I have pointed out in \sectref{nas-strat}. An obvious shortcoming of this explanation is that \isi{nasals} are not copied to all syllables that one would expect in light of a purely phonological condition (compare \Next[a] and \Next[b]).
\ex.\ag.ŋ-khy-a-ma-ga-n=na (*ŋkhyanmanganna).\\
{\sc neg-}go\textsc{-pst-prf-2-neg=nmlz.sg}\\
\rede{You have not come.}
\bg.ŋ-khy-a-ma-n-ci-ga-n=ha.\\
{\sc neg-}go\textsc{-pst-prf-[N]-du-2-neg=nmlz.nsg}\\
\rede{You (dual) have not come.}
An alternative analysis has been proposed by \citet{Zimmermann2012_Affix}, resulting from a \isi{comparison} of several Kiranti languages. In her approach, the copying is a morpheme-specific process, happening only in the vicinity of certain suffixes. In line with her observations, all instances of copied \isi{nasals} in Yakkha directly precede the suffix \emph{-ci} (with the two morphological values \rede{dual} and \rede{{\sc 3nsg.P}}, see the \isi{paradigm tables} in \sectref{paradigmtables}). Hence, it is the suffix \emph{-ci} that licenses the \isi{nasal copying} in Yakkha. The process as such and the phonological content of the copies are morphologically informed; they are based upon the presence of certain morphological markers. In the absence of \emph{-ci} nothing gets copied, and the same holds for inflectional forms in which no \isi{nasals} are available to serve as base. Hence, \isi{nasal copying} is not just the blind fulfillment of a phonological constraint, as epenthesizing any nasal material would be. On the other hand, since no semantic content is added by the nasal copies, the operation is not purely morphological either, but located at the boundary between phonology and morphology.
Another observation made is that the nasal suffixes compete with regard to which suffix will serve as base for the copying. If we compare \Next[a] and \Next[b], we can see that here, the preferred choice is /n/, instantiated by the \isi{negation} marker, although the closest available base in \Next[b] would be the velar nasal from the suffix \emph{-ŋ}. This shows that the choice is not determined by the linear succession of the available \isi{nasals}. The \isi{negation} is the only morphological contrast between the two verb forms, and the nasal that is copied changes from /ŋ/ to /n/, compared to \Next[a]. In \Next[c], there is a competition between /n/ and /m/ as bases, which is won by /m/. This selection principle holds throughout the inflectional paradigm, so that the hierarchy for the choice of the base must be /m/ > /n/ > /ŋ/.
\ex. \a.\glll tum.meŋ.cuŋ.ci.ŋha\\
/tund-meʔ-N-ci-u-N-ci-ŋ=ha/\\
Σ\textsc{-npst-[N]-du.A-3.P-[N]-3nsg.P-excl=nmlz.nsg}\\
\rede{We (dual, excl.) understand them.}
\b.\glll ndum.men.cun.ci.ŋa.nha\\
/n-tund-meʔ-N-ci-u-N-ci-ŋ(a)-n=ha/\\
{\sc neg-}Σ\textsc{-npst-[N]-du.A-3.P-[N]-3nsg.P-excl-neg=nmlz.nsg}\\
\rede{We (dual, excl.) do not understand them.}
\b.\glll ndun.dwam.cim.ŋa.nha\\
/n-tund-wa-u-N-ci-m-ŋ(a)-n=ha/\\
{\sc neg-}Σ\textsc{-npst-3.P-[N]-3nsg.P-1pl.A-excl-neg=nmlz.nsg}\\
\rede{We (plural) do not understand them.}
| {
"alphanum_fraction": 0.7165314453,
"avg_line_length": 67.0699728261,
"ext": "tex",
"hexsha": "9d7588a5d33f1a863ed630d04d3e95bf8d049de0",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "37a7473097d2c8ed7787bfda95096b940d2db6c5",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "langsci/66",
"max_forks_repo_path": "indexed/04_Phonology.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "37a7473097d2c8ed7787bfda95096b940d2db6c5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "langsci/66",
"max_issues_repo_path": "indexed/04_Phonology.tex",
"max_line_length": 1814,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "37a7473097d2c8ed7787bfda95096b940d2db6c5",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "langsci/66",
"max_stars_repo_path": "indexed/04_Phonology.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 33953,
"size": 98727
} |
We compared two different base classification models to analyze their ability to learn from these small handshape datasets; Prototypical Networks \cite{protonet} and DenseNet \cite{densenet}. Prototypical Networks is a model that was designed explicitly to deal with small sample sizes. On the other hand, DenseNet is currently a state of the art model in image classification with convolutional neural networks, and while it has not been explicitly designed for small datasets, it has shown exceptional performance in many different tasks.
In the case of DenseNet, we also experiment using Model-Agnostic Meta-Learning (MAML) \cite{DBLP:journals/corr/FinnAL17} and Transfer Learning \cite{tan2018} training techniques, in addition to the traditional training process.
Transfer Learning is a well known technique to jump-start the training of neural networks for a problem A using datasets from a similar problem B. The weights of a network trained on B are used as initial weights in the training of the network for A. Retraining the network for A is called finetuning, and may retrain only a subset of the weights of the network. However, it still may require large amounts of data for the finetuning phase.
Finally, MAML is a meta-learning technique for few-shot learning, that involves learning subtasks. In this context, each subtask corresponds
In the following subsections we describe in more detail each of these models.
\subsubsection{Wide-DenseNet}
We selected a DenseNet based architecture as it is a state of the art model in many domains and can handle small datasets with low error rate \cite{pmlr-v80-pham18a}.
DenseNet \cite{densenet} works by concatenating the feature-maps of a convolutional block to the feature-maps of all the previous convolutional blocks and using the resulting set of feature-maps as input for the next convolutional block. In this way, each convolutional block receives all the collective knowledge of the previous layers maintaining the global state of the network which can be accessed.
We employed a variation on DenseNet called Wide-DenseNet which follows the strategy used by wide residual networks \cite{He2015DeepRL}. Wide-DenseNet consists on decreasing the depth of the network and increasing the width of the layers. This way the model can be trained faster by optimizing feature reuse and obtain higher accuracy.
Additionally, we use Squeeze and Excitation blocks (SE blocks) \cite{Hu2017SqueezeandExcitationN} to improve the performance of the DenseNet model. Convolutional networks construct informative features by fusion both spatial and chanel-wise information within local receptive fields at each layer. SE blocks focus on channel-wise information, improving the quality of the representations produced by the network by modeling the interdependency between channels to perform feature recalibration. SE blocks can be included in any model that uses convolutional layers to improve its performance at low computational cost. We use SE blocks between dense and transition blocks, see figure \ref{fig:densenet}.
\begin{figure*}[!ht]
\centerline{\includegraphics[width=0.85\textwidth]{images/background/densenet.png}}
\caption{DenseNet using 2 dense blocks and SE blocks.}
\label{fig:densenet}
\end{figure*}
\subsubsection{Transfer Learning} \label{models:tl}
Gathering new training data for deep neural networks can be an expensive and time consuming task. Transfer learning provides a way to utilize already available data from a source domain and transfer the acquired knowledge from this source domain to a target domain. By doing transfer learning we can obtain much better initializations of the parameters of the model before training in the target domain.
In the past, transfer learning has been used for handshapes, sign language and gesture recognition \cite{farhadi2007}\cite{quiroga2017study}\cite{allard2017} demonstrating the advantages of this technique.
In this work, we are doing Network-based \cite{tan2018} transfer learning. In this type of transfer learning a part of the network pretrained on the source domain is reused for the training in the target domain. The objective is for the neural network to acquire abstract information from the source domain and transfer this knowledge to the target task.
To obtain a good performance from transfer learning it is commonly needed for the source dataset to be larger than the target dataset. Since the information extracted from the target dataset has a higher value than the information from the source dataset, the data from the target dataset will be more helpful in fitting the target task. In addition to this, it is important for the source domain to be well related to the target domain. If the relation between both domains differ too much it is possible to get a negative transfer which can diminish the performance obtained by using transfer learning to the point of obtaining less performance by applying it \cite{weiss2016}.
\subsubsection{Model-Agnostic Meta-Learning} \label{sec:models:maml}
Like Prototypical Networks, Model-Agnostic Meta-Learning (MAML) \cite{DBLP:journals/corr/FinnAL17} is a technique designed to tackle the problem of few-shot learning. MAML learns how to improve a model so that it can learn a new task in only a few steps by training on many different tasks, commonly phrased as learning to learn. MAML does this by learning over multiple tasks and updating the parameters of the models based on the improvement obtained after training on each task.
More formally, given a set of tasks $T$ each consisting of a loss function $L$ and a set of elements with their corresponding labels. MAML requires a distribution over tasks $p(T)$ that we want to adapt to. Given those distributions, we proceed with the next two steps, task training and meta training. In the task training we sample $K$ tasks. For each task $T_i$, the model is trained on a set of elements extracted from the task distribution using the loss function $L_i$ belonging to $T_i$. With the updated parameters the model is then tested on new data from $T_i$. Once tested on each task the loss obtained from these tests will be added and utilized as loss for the initial model on the meta training obtaining a new initial model with better initial parameters that will grant a bigger improvement for each task on fewer steps.
We made some modifications on the original MAML to work with bigger datasets in a supervised way. Each task $T_i$ is split in 2 subsets, a training subset $Tt$ and a meta training subset $Tm$. The subsets are composed of datasets $D={x,y}$ where $x$ is an image and $y$ the label of that image. Each subset has an equal size $b$ and its labels are mirrored $y\in Tt \iff y\in Tm$. Each dataset with size $n$ has $n/2b$ tasks $T$. We consider our model as a function $f_\theta$ with parameters $\theta$. In each training iteration $\theta$ will change to $\theta'$. Each iterations consists of 2 steps, a training and a meta training step. In the training step we start by storing the value of $\theta$ in $\theta'$, then $\theta$ is updated to fit $Tt_i$. In the meta training step the new $\theta$ is used to calculate the gradients with $Tm_i$ : $\nabla L_i( f_\theta(Tm_i))$ and these gradients are applied to $\theta'$ finishing the iteration.
\begin{algorithm}[!h]
\caption{Model-Agnostic Meta-Learning for Few-Shot Supervised Learning}
\SetAlgoLined
\KwIn{A set of tasks T }
Initialize $\theta$\;
\While{not done}{
\For{$T_i$ in $T$}{
Save the parameters $\theta$ in $\theta'$\;
Train $\theta$ using the training subset: $\theta = \theta - \alpha \nabla L_i( f_\theta(Tt_i))$ \;
Get the gradients using the meta training subset: $\nabla L_i( f_\theta(Tm_i))$ \;
Apply the gradients to $\theta'$: $\theta' = \theta' - \alpha \nabla L_i( f_\theta(Tm_i))$\;
}
}
Train $f_\theta$ in the objective dataset with less samples
\end{algorithm}
\subsubsection{Prototypical Networks}
\label{models:protonet}
Prototypical Networks \cite{protonet} is a meta-learning model for the problem of few-shot classification, where a classifier must generalize to new classes not seen in the training set, given only a small number of examples of each new class. The ability of an algorithm to perform few-shot learning is typically measured by its performance on n-shot, k-way classification tasks. First a model is given a set of query samples Q belonging to a new, previously unseen class. Then, it receives a support set, S, consisting of n examples, each from k different unseen classes. Finally, the algorithm has to determine the classes of Q, given the samples of S, see figure \ref{fig:protonet:handshape}.
\begin{figure}[!ht]
\centerline{\includegraphics[width=1.0\columnwidth]{images/background/protonets.png}}
\caption{Prototypical Networks given a set of query samples and support set}
\label{fig:protonet:handshape}
\end{figure}
Prototypical Networks apply an inductive bias in the form of class prototypes to achieve impressive few-shot performance. The key assumption is the existence of an embedding in which samples from each class cluster around a single prototypical representation which is simply the mean of the individual samples. This idea streamlines n-shot classification in the case of $n > 1$ as classification is simply selecting the label of the closest class prototype, see figure \ref{fig:protonet}.
Schemes for few shot classification tasks like Prototypical Networks can also be of use for training small datasets.
\begin{figure}[!ht]
\centerline{\includegraphics[width=1.15\columnwidth]{images/background/prototypical-networks.png}}
\caption{Prototypical networks in the few-shot and zero-shot scenarios. \textbf{Left}: Few-shot prototypes
$\mathbf{c}_k$ are computed as the mean of embedded support examples for each class. \textbf{Right}: Zero-shot prototypes $\mathbf{c}_k$ are produced by embedding class meta-data $\mathbf{v}_k$. In either case, embedded query points are classified via a softmax over distances to class prototypes.}
\label{fig:protonet}
\end{figure}
| {
"alphanum_fraction": 0.7915762478,
"avg_line_length": 123.6341463415,
"ext": "tex",
"hexsha": "77b8b1f141ddaeb04dcb0c95ec6523adec73b078",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e4f94c73b328622175921b8e2b8c3b9ea462c51e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ulises-jeremias/recognizing-handshapes-in-small-datasets",
"max_forks_repo_path": "sections/background.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e4f94c73b328622175921b8e2b8c3b9ea462c51e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ulises-jeremias/recognizing-handshapes-in-small-datasets",
"max_issues_repo_path": "sections/background.tex",
"max_line_length": 949,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e4f94c73b328622175921b8e2b8c3b9ea462c51e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ulises-jeremias/recognizing-handshapes-in-small-datasets",
"max_stars_repo_path": "sections/background.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2296,
"size": 10138
} |
\part{Introduction}
\chapter{Introduction}
\par
The {\bf SPOOLES} package is used to solve two types of
real or complex linear systems:
\begin{itemize}
\item
$AX = Y$ or $(A + \sigma B)X = Y$ where $A$ and B are square.
$A$ and $B$ can be real or complex,
symmetric, Hermitian or nonsymmetric.
The factorization can proceed with or without pivoting for
numerical stability.
The factor matrices can be stored with or without dropping small
entries.
\item
Minimize $\|AX_{*,j} - Y_{*,j}\|_2$ for each column of the solution
matrix $X$ and right hand side matrix $Y$.
This is done by computing a $QR$ factorization of $A$ and then
solving $R^T R X = A^T Y$ or $R^H R X = A^H Y$.
\end{itemize}
In both cases, the linear systems can be permuted to reduce the
fill in the factor matrices.
\par
The {\bf SPOOLES} software is written in an object oriented fashion
in the C language.
Parts of the software run in serial mode,
multithreading using Solaris or POSIX threads, and with MPI.
\par
The software objects are naturally partitioned into three families of
objects.
\par
\begin{center}
\begin{tabular}{|l|l|}
\multicolumn{2}{c}{\bf Utility objects} \\ \hline
{\tt A2} & dense two dimensional array \\
{\tt Coords} & object to hold coordinates in any number of dimensions \\
{\tt DV} & double precision vector \\
{\tt Drand} & random number generator \\
{\tt I2Ohash} & hash table for the factor submatrices \\
{\tt IIheap} & simple heap object \\
{\tt IV} & int vector \\
{\tt IVL} & int list object \\
{\tt Ideq} & simple dequeue object \\
{\tt Lock} & abstract mutual exclusion lock \\
{\tt Perm} & permutation vector object \\
{\tt Utilities} & various vector and linked list utility methods \\
{\tt ZV} & double precision complex vector \\
\hline
\end{tabular}
\end{center}
\par
\begin{center}
\begin{tabular}{|l|l|}
\multicolumn{2}{c}{\bf Ordering objects} \\ \hline
{\tt BKL} & Block Kernihan-Lin algorithm object \\
{\tt BPG} & bipartite graph object \\
{\tt DSTree} & domain/separator tree object \\
{\tt EGraph} & element graph object \\
{\tt ETree} & front tree object \\
{\tt GPart} & graph partitioning algorithm object \\
{\tt Graph} & graph object \\
{\tt MSMD} & multi-stage minimum degree algorithm object \\
{\tt Network} & network object for solving max flow problems \\
{\tt SolveMap} & map of submatrices to processes for solves \\
{\tt Tree} & tree object \\
\hline
\end{tabular}
\end{center}
\par
\begin{center}
\begin{tabular}{|l|l|}
\multicolumn{2}{c}{\bf Numeric objects} \\ \hline
{\tt Chv} & block chevron object for fronts \\
{\tt ChvList} & object to hold lists of {\tt Chv} objects \\
{\tt ChvManager} & object to manager instances of {\tt Chv} objects \\
{\tt DenseMtx} & dense matrix object \\
{\tt FrontMtx} & front matrix object \\
{\tt ILUMtx} & simple preconditioner matrix object \\
{\tt InpMtx} & sparse matrix object \\
{\tt Iter} & Krylov methods for iterative solves \\
{\tt PatchAndGoInfo} &
modified factors in the presence of zero or small pivots \\
{\tt Pencil} & object to contain $A + \sigma B$ \\
{\tt SemiImplMtx} & semi-implicit factorization matrix object \\
{\tt SubMtx} & object for dense or sparse submatrices \\
{\tt SubMtxList} & object to hold lists of {\tt SubMtx} objects \\
{\tt SubMtxManager} &
object to manager instances of {\tt SubMtx} objects \\
{\tt SymbFac} & algorithm object to compute a symbolic factorization \\
\hline
\end{tabular}
\end{center}
The {\tt MT} directory contains all the multithreaded methods
and drivers programs.
The {\tt MPI} directory contains all the {\tt MPI} methods and drivers.
The {\tt misc} directory contains miscellaneous methods and drivers.
\par
Each of the following objects that hold numeric entries ---
{\tt A2}, {\tt Chv}, {\tt DenseMtx}, {\tt FrontMtx},
{\tt ILUMtx},
{\tt InpMtx},
{\tt Pencil},
{\tt SemiImplMtx} and
{\tt SubMtx} --- can hold real or complex entries.
An object knows its {\tt type},
{\tt 1} for real (define'd constant {\tt SPOOLES\_REAL})
or
{\tt 2} for complex (define'd constant {\tt SPOOLES\_COMPLEX}).
Since C does not yet have a standard structure for complex numbers,
we have followed the FORTRAN convention of storing the real and
imaginary parts of a complex number in consecutive memory locations.
Internally, we unroll the complex arithmetic into real arithmetic.
The user need not be burdened by this process if (s)he uses the
input/output methods for the different object.
For example,
{\tt DenseMtx\_setRealEntry()} sets an entry of a real dense matrix,
while {\tt DenseMtx\_setComplexEntry()} sets an entry of a complex
dense matrix.
\par
All the heavily used computational tasks have been expanded where
possible into BLAS2 or BLAS3 kernels, for both the real and complex
cases.
There are a multitude of driver programs that test the
functionality of the objects.
A common output of a driver program is a file that can be input
into Matlab to check the errors of the computations.
This convention inspires confidence in the correctness of the
kernel computations.
\par
\section{Software Design}
\label{chapter:softwareDesign}
\par
The {\bf SPOOLES} library is written in the C language and uses
object oriented design.
There are some routines that manipulate native C data types such as
vectors, but the vast bulk of the code is centered around objects,
data objects and algorithm objects.
By necessity, the implementation of an object is through the C {\tt
struct} data type.
We use the following naming convention --- a method (i.e., function)
associated with an object of type {\tt Object} has the form
\centerline{{\it (return value type)}
{\tt Object\_}{\it methodName}{\tt (Object * obj}, $\ldots${\tt )};}
The method's name begins with the name of the object it is
associated with and the first parameter in the calling sequence is
a pointer to the instance of the object.
Virtually the only exception to this rule is the {\it constructor}
method.
\centerline{\tt Object * Object\_new(void) ;}
\noindent
Two objects, the {\tt Chv} and {\tt DenseMtx} objects, have
methods that return the number of bytes needed to hold their data,
e.g.,
\centerline{\tt
int Chv\_nbytesNeeded(int nD, int nL, int nU, int type, int symflag) ;}
\par
Scan the directory structure of the source code and you will notice
a number of subdirectories --- each deals with an object.
For example,
the {\tt Graph} directory holds code and documentation for an
object that represents a graph:
its {\tt doc} subdirectory holds \LaTeX files with documentation;
its {\tt src} subdirectory holds C files that contain methods
associated with the object ;
% (each method is a C function of the form {\tt Graph\_*});
and its {\tt driver} subdirectory holds driver programs to test or
validate some behavior of the object.
\par
The directory structure is fairly flat --- no object directory
contains another --- because the C language does not support
inheritance.
This can be inelegant at times.
For example, a bipartite graph (a {\tt BPG} object)
{\it is--a} graph (a {\tt Graph} object), but instead of {\tt BPG}
inheriting from {\tt Graph} data fields and methods from {\tt Graph},
we must use the {\it has--a} relation.
A {\tt BPG} object contains a pointer to a {\tt Graph} object
that represents the adjacency structure.
The situation is even more cumbersome for the objects that deal
with trees of one form or another: an elimination tree {\tt ETree}
and a domain/separator tree {\tt DSTree} each contain a pointer to
a generic tree object {\tt Tree} in their structure.
\par
Predecessors to this library were written in C++ and
Objective-C.\footnote{The knowledgeable reader is encouraged to
peruse the source to discover the prejudices both pro and con
towards these two languages.}
The port to the present C library was painless, almost mechanical.
We expect the port back to C++ and/or Objective-C to be simple.
\par
Objects are one of two types:
{\it data objects} whose primary function is to store data
and
{\it algorithm objects} whose function is to manipulate some data
object(s) and return new information.
Naturally this distinction can be fuzzy --- algorithm objects have
their own data that may be persistent and data objects can execute
some simple functionality --- but it holds in general.
To be more explicit, data objects have the following properties:
\begin{itemize}
\item
There is a delicate balance between encapsulation and openness.
The C language does not support any private or protected data
fields, so the C {\tt struct} that holds the data for an object
is completely open.
As an example, the {\tt Graph} object has a function to return the
size of and pointers to a vector that contains an adjacency list,
namely
\begin{verbatim}
void Graph_adjAndSize(Graph *g, int v, int *psize, int **padj)
\end{verbatim}
where the pointers {\tt psize} and {\tt padj} are filled with the
size of the adjacency structure and a pointer to its vector.
One can get this same information by chasing pointers as follows.
\begin{verbatim}
vsize = g->adjIVL->sizes[v] ;
vadj = g->adjIVL->p_ind[v] ;
\end{verbatim}
One can do the latter but we encourage the former.
As an experiment we replaced every instance of
{\tt Graph\_adjAndSize()} with the appropriate pointer chasing
(and a similar operation for the {\tt IVL} object) and achieved
around a ten per cent reduction in the ordering time.
For a production code, this savings might drive the change in code,
but for our research code we kept the function call.
\item
Persistent storage needs to be supported.
% Currently this means file storage, but in the future we expect to
% need to ``bundle'' a data object into a block of storage to be
% passed or communicated to a different processor.
Each data object has eight different methods to deal with file I/O.
Two methods deal with reading from and writing to a file whose
suffix is associated with the object name, e.g., {\tt *graph\{f,b\}}
for a formatted or binary file to hold a {\tt Graph} object.
Four methods deal with reading and writing objects from and to a
file that is already opened and positioned, necessary for composite
objects (e.g., a {\tt Graph} object contains an {\tt IVL} object).
Two methods deal with writing the objects to a formatted file to be
examined by the user.
We strongly encourage any new data object added to the library to
supply this functionality.
\item
Some data objects need to have compact storage requirements.
Two examples are our {\tt Chv} and {\tt SubMtx} objects.
Both objects need to be communicated between processes
in the MPI implementation, the former during the factorization,
the latter during the solve.
Each has a workspace buffer that contains all the information
needed to {\it regenerate} the object upon reception by another
process.
\item
By and large, data objects have simple methods.
A {\tt Graph} object does {\bf not} have methods to find a good
bisector; this is a sufficiently sophisticated function that it
should be implemented by an algorithm object.
The major exception to this rule is that our {\tt FrontMtx} object
{\it contains} the factorization data but also {\it performs} the
factorization, forward and backsolves.
In the future we intend to separate these two functionalities.
For example, one can implement an alternative forward and backsolve
by using methods to {\it access} the factor data stored in the {\tt
FrontMtx} object.
As a second example, massive changes to the storage format,
e.g., in an out-of-core implementation, can be encapsulated in the
access methods for the data, and any changes to the factorization
or solve functions could be minimal.
\end{itemize}
Algorithm objects have these properties.
\begin{itemize}
\item
Algorithm objects use data objects.
Some data objects are created within an algorithm objects method;
these are owned by the algorithm object and free'd by that object.
Data objects that are passed to algorithm objects can be queried
or {\it temporarily} changed.
\item
They do not destroy or free data objects that are passed to them.
Any side effects on the data objects should be innocent, e.g.,
when a {\tt Graph} object is passed to the graph partitioning
object ({\tt GPart}) or the multistage minimum degree object
({\tt MSMD}), on return the adjacency lists may not be in the input
order, but they contain the values they had on input.
\item
Algorithm objects should support diagnostic, logfile and debug output.
This convention is not entirely thought out or supported at present.
The rationale is that an algorithm object should be able to respond
to its environment to a greater degree than a data object.
\end{itemize}
\par
Data and algorithm objects share two common properties.
\begin{itemize}
\item
Each object has four basic methods: to allocate storage for an
object, set the default fields of an object,
clear the data fields of an object,
and free the storage occupied and owned by an object.
\item
Ownership of data is very rigidly defined.
In most cases,
an object owns all data that is allocated inside one of its
methods,
and when this does not hold it is very plainly documented.
For example, the bipartite graph object {\tt BPG} has a data field
that points to a {\tt Graph} object.
One of its initialization methods has a {\tt Graph} pointer in its
calling sequence.
The {\tt BPG} object then owns the {\tt Graph} object and when it
is free'd or has its data cleared, the {\tt Graph} object is free'd
by a call to its appropriate method.
\end{itemize}
\par
By and large these conventions hold throughout the library.
There are fuzzy areas and objects still ``under construction''.
Here are two examples.
\begin{itemize}
\item
We have an {\tt IIheap} object that maintains integer
$\langle$ key, value $\rangle$ pairs in a priority heap.
Normally we think of a heap as a data structure, but another
perspective is that of a continuously running algorithm that
supports insert, delete and identification of a minimum pair.
\item
Our {\tt BPG} bipartite graph object is a data object,
but it has a method to find the Dulmage-Mendelsohn decomposition,
a fairly involved algorithm used to refine a separator of a graph.
At present, we are not willing to create a new algorithm object
just to find the Dulmage-Mendelsohn decomposition, so we leave
this method to the domain of the data object.
The desired functionality, identifying minimal
weight separators for a region of a graph, can be modeled using
max flow techniques from network optimization.
We also provide a {\tt BPG} method that finds this
Dulmage-Mendelsohn decomposition by solving a max flow problem on
a bipartite network.
Both these methods have been superceded by the {\tt Network} object
that contains a method to find a max flow and one or more min-cuts
of a network (not necessarily bipartite).
\end{itemize}
\par
The {\bf SPOOLES} software library is continuously evolving in an
almost organic fashion.
Growth and change are to be expected, and welcomed, but some
discipline is required to keep the complexity, both code and human
understanding, under control.
The guidelines we have just presented have two purposes:
to let the user and researcher get a better understanding of
the design of the library, and to point out some conventions
to be used in extending the library.
\par
\section{Changes from Release 1.0}
\label{section:intro:changes-1.0}
\par
There are two major changes from the first release of the {\bf
SPOOLES} package: we now support complex linear systems, and the
storage format of the sparse factor matrices has changed from a
one-dimensional data decomposition to a two-dimensional
decomposition.
The factors are now submatrix based, and thus allow a parallel
solve to be much faster than in Release 1.0.
\par
In the first release, all numeric objects had a {\tt `D`} as the
leading letter in their name, e.g., {\tt DA2}, {\tt DChv}, etc.
A natural way to implement complex data types would be to write
``parallel'' objects, e.g., {\tt ZA2}, {\tt ZChv}, etc, as is done
in LINPACK and LAPACK for subroutine names.
However, a {\tt DA2} and {\tt ZA2} object share so much common code
that it is a better decision to combine the real and complex
functionality into one object.
This is even more pronounced for the {\tt FrontMtx} object where
there is virtually no code that is dependent on whether or not the
matrix is real or complex.
\par
Virtually no new work has been done on the ordering objects and methods.
Their algorithms were state of the art two years ago, but a recent
comparison with the {\bf EXTREME} \cite{hr98-msndtalk}
and {\bf METIS} \cite{karypis98metis} packages
on a large collection
of finite element problems shows that the {\bf SPOOLES} orderings
are still competitive.
\par
The serial, multithreaded and MPI code has been modified to force
greater sharing of code between the environments.
``What'' is done is identical in the three cases.
The multithreaded and MPI codes share the same ``choreography'',
in other words, who does what and how.
The main differences between multithreaded and MPI
are that the data structures are global versus local,
and that explicit message passing is done in the latter.
This common structure of the codes has a nonzero impact on the speed
and efficiency of the individual codes, but the gains from a common
code base are well worth the cost.
\par
The MPI methods have been extensively reworked since the first
release.
A number of bugs and logic errors have been detected and fixed.
The code appears to be more robust than the first release.
\par
\section{Changes from Release 2.0}
\label{section:intro:changes-2.0}
\par
Release 2.2 is partly a maintenance release.
Some bugs were found and fixed in the MPI factors and solves.
Some minor new methods were added to the
{\tt DenseMtx}, {\tt FrontMtx}, {\tt InpMtx}
and {\tt Utilities} directories.
The multithreaded methods and drivers have been removed from the
{\tt FrontMtx} directory and placed in a new {\tt MT} directory,
much like the {\tt MPI} methods have their own directory.
\par
Some new functionality has been added.
\begin{itemize}
\item
There are now multithreaded
and distributed matrix-matrix multiply methods.
See the {\tt MT} and {\tt MPI} directories.
\item
The {\tt FrontMtx} object now supports more robust reporting of
errors encountered during the factorization.
There is one additional parameter in the factorization calling
sequences, an error return that signals that the factorization has
failed.
\item
In response to customer requirements, we have added some
``patch-and-go'' functionality to the sparse $LU$ and $U^TDU$
factorizations without pivoting.
There are applications in optimization and structural analysis
where pivoting is not necessary for stabilty, but where the
location of small or zero pivots on the diagonal is meaningful.
Normally the factorization would be ustable or stop, but special
action is taken, the factors are ``patched'' and the factorization
continues.
\par
There is a new {\tt PatchAndGoInfo} object that encapsulates the
``patch-and-go'' strategy and gathers optional statistics about the
action that was taken during the factorization.
This object is attached to the {\tt FrontMtx} object which passes
it unchanged to the {\tt Chv} object that performs the
factorization of each front.
If the user does not need this functionality, no changes are
necessary to their code, i.e., no calling sequences are affected.
\item
New MPI broadcast methods for the {\tt Graph}, {\tt IVL} and
{\tt ETree} objects have been added to the library.
\item
The {\tt Iter} directory contains the following Krylov accelerators
for the iterative solution of linear systems:
Block GMRES, BiCGStab, conjugate gradient and transpose-free QMR.
Each is available in both left- and right-preconditioned forms.
The preconditioner that these methods use is a {\tt FrontMtx}
object that contains a drop tolerance approximate factorization.
The {\tt ILUMtx} object contains a simple vector-based drop
tolerance factorization object.
(The {\tt FrontMtx} approximate factorization is submatrix-based in
both its data structures and computational kernels, and supports
pivoting for numerical stability, which the {\tt ILUMtx} object
does not.)
We have not written Krylov methods that use the {\tt ILUMtx}
object, but it would be simple to replace the {\tt FrontMtx}
preconditioner with the {\tt ILUMtx} preconditioner.
\item
The {\tt SemiImplMtx} object contains
a {\it semi-implicit} factorization,
a technique that can require less storage and solve operations than
the present explicit factorization. It is based on the equation
$$
\left \lbrack \begin{array}{cc}
A_{0,0} & A_{0,1} \cr
A_{1,0} & A_{1,1}
\end{array} \right \rbrack
=
\left \lbrack \begin{array}{cc}
L_{0,0} & 0 \cr
L_{1,0} & L_{1,1}
\end{array} \right \rbrack
\left \lbrack \begin{array}{cc}
U_{0,0} & U_{0,1} \cr
0 & U_{1,1}
\end{array} \right \rbrack,
=
\left \lbrack \begin{array}{cc}
L_{0,0} & 0 \cr
A_{1,0}U_{0,0}^{-1} & L_{1,1}
\end{array} \right \rbrack
\left \lbrack \begin{array}{cc}
U_{0,0} & L_{0,0}^{-1}U_{0,1} \cr
0 & U_{1,1}
\end{array} \right \rbrack.
$$
A solve of $AX = B$
with the explicit factorization does the following steps
\begin{itemize}
\item solve $L_{0,0} Y_0 = B_0$
\item solve $L_{1,1} U_{1,1} X_1 = B_1 - L_{1,0} Y_0$
\item solve $U_{0,0} X_0 = Y_0 - U_{0,1} X_1$
\end{itemize}
while an implicit factorization has the following form.
\begin{itemize}
\item solve $L_{0,0} U_{0,0} Z_0 = B_0$
\item solve $L_{1,1} U_{1,1} X_1 = B_1 - A_{1,0} Z_0$
\item solve $L_{0,0} U_{0,0} X_0 = B_0 - A_{0,1} X_1$
\end{itemize}
The difference is that the semi-implicit factorization stores
and computes with
$A_{1,0}$ and $A_{0,1}$ instead of $L_{1,0}$ and $U_{0,1}$,
(this can be a modest savings in storage and operation count),
and performs two solves with $L_{0,0}$ and $U_{0,0}$
instead of one.
This technique works with either a direct or approximate
factorization of $A$.
The semi-implicit factorization is constructed via a
post-processing of any factorization computed by the {\tt
FrontMtx} object.
\end{itemize}
| {
"alphanum_fraction": 0.7606992437,
"avg_line_length": 42.5452793834,
"ext": "tex",
"hexsha": "f2345f221d24f384e548e0151eb6669b17f30fbc",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-08-29T18:41:28.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-08-29T18:41:28.000Z",
"max_forks_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "alleindrach/calculix-desktop",
"max_forks_repo_path": "ccx_prool/SPOOLES.2.2/documentation/ReferenceManual/partIntro.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711",
"max_issues_repo_issues_event_max_datetime": "2018-01-25T16:08:31.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-09-21T17:03:55.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "alleindrach/calculix-desktop",
"max_issues_repo_path": "ccx_prool/SPOOLES.2.2/documentation/ReferenceManual/partIntro.tex",
"max_line_length": 72,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "alleindrach/calculix-desktop",
"max_stars_repo_path": "ccx_prool/SPOOLES.2.2/documentation/ReferenceManual/partIntro.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5684,
"size": 22081
} |
\subsection{Topological continuity}\label{subsec:topological_continuity}
\begin{definition}\label{def:local_continuity}
We say that the function \( f: X \to Y \) between topological spaces is \term{continuous} at the point \( x_0 \in X \) if \( f(x_0) \) is a limit \hyperref[def:local_convergence]{point} of \( f \) at \( x_0 \).
If limit point is unique (e.g. in \hyperref[def:separation_axioms/T2]{Hausdorff spaces}), this condition can be formulated by \enquote{interchanging} \( \lim \) and \( f \) as follows:
\begin{equation*}
f(x_0) = f\left( \lim_{x \to x_0} x \right) = \lim_{x \to x_0} f(x).
\end{equation*}
\end{definition}
\begin{definition}\label{def:global_continuity}
We say that the function \( f: X \to Y \) between topological spaces is \term{everywhere continuous} or simply \term{continuous} if and of the following conditions hold:
\begin{thmenum}
\thmitem{def:global_continuity/limits} \( f \) is continuous at every point of \( X \) in the sense of \fullref{def:local_continuity}.
\thmitem{def:global_continuity/open} For every open set \( V \in T \), the \hyperref[thm:def:function/preimage]{preimage} \( f^{-1}(V) \) is open.
\thmitem{def:global_continuity/closed} For every closed set \( F \in F_{\mscrT_Y} \), the preimage \( f^{-1}(F) \) is closed.
\thmitem{def:global_continuity/base} There exists a \hyperref[def:topological_base]{base} \( \mscrB_{\mscrT_Y} \subseteq T_Y \), such that for every \( V \in B_{\mscrT_Y} \), the preimage \( f^{-1}(V) \) is open.
\thmitem{def:global_continuity/subbase} There exists a \hyperref[def:topological_subbase]{subbase} \( P_{\mscrT_Y} \subseteq T_Y \), such that for every \( V \in P_{\mscrT_Y} \), the preimage \( f^{-1}(V) \) is open.
\thmitem{def:global_continuity/closure} For every set \( A \subseteq X \), \( f(\cl(A)) \subseteq \cl(f(A)) \).
\end{thmenum}
We denote the set of all continuous functions from \( X \) to \( Y \) by \( C(X, Y) \).
\end{definition}
\begin{proof}
\ImplicationSubProof{def:global_continuity/limits}{def:global_continuity/open} Follows from \fullref{def:local_convergence/neighborhoods}.
\ImplicationSubProof{def:global_continuity/open}{def:global_continuity/closed} If \( F \in F_{\mscrT_Y} \) is a closed set, \( Y \setminus F \) is open, therefore \( f^{-1}(Y \setminus F) = X \setminus f^{-1}(F) \) is also open. Hence, \( f^{-1}(F) \) is closed.
\ImplicationSubProof{def:global_continuity/open}{def:global_continuity/base} \( \mscrT \) is a base of itself.
\ImplicationSubProof{def:global_continuity/base}{def:global_continuity/subbase} Every base is also a subbase.
\ImplicationSubProof{def:global_continuity/subbase}{def:global_continuity/limits} Follows from the equivalences in \fullref{def:local_convergence}.
\ImplicationSubProof{def:global_continuity/closed}{def:global_continuity/closure} Note that
\begin{equation*}
A
\reloset {\ref{thm:function_image_preimage_composition/preimage_of_image}} \subseteq
f^{-1}(f(A))
\reloset {\ref{thm:def:function_preimage/monotonicity}} \subseteq
f^{-1}(\cl(f(A))).
\end{equation*}
Apply \( f \circ \cl \) to the above chain of inclusions to obtain
\begin{equation*}
f(\cl(A))
\subseteq
f(\underbrace{\cl}_{\ref{def:global_continuity/closed}}(f^{-1}(\cl(f(A)))))
\reloset {\ref{thm:function_image_preimage_composition/image_of_preimage}} \subseteq
\cl(f(A)),
\end{equation*}
which proves the implication.
\ImplicationSubProof{def:global_continuity/closure}{def:global_continuity/closed} Fix a closed set \( F \subseteq Y \). Then
\begin{equation}\label{def:global_continuity/closure_implies_closed_right}
f(\cl(f^{-1}(F)))
\reloset {\ref{def:global_continuity/closure}} \subseteq
\cl(f(f^{-1}(F)))
\reloset {\ref{thm:function_image_preimage_composition/image_of_preimage}} \subseteq
\cl(F)
=
F.
\end{equation}
Since \( \cl \) is monotone, we have
\begin{equation}\label{def:global_continuity/closure_implies_closed_left}
f(\cl(f^{-1}(F)))
\supseteq
f(f^{-1}(F))
\reloset {\ref{thm:function_image_preimage_composition/preimage_of_image}} \supseteq
F.
\end{equation}
From \eqref{def:global_continuity/closure_implies_closed_right} and \eqref{def:global_continuity/closure_implies_closed_left} it follows that
\begin{equation*}
F = f(\cl(f^{-1}(F))).
\end{equation*}
By taking the preimage, we obtain
\begin{equation*}
f^{-1}(F)
=
f^{-1}(f(\cl(f^{-1}(F))))
\reloset {\ref{thm:function_image_preimage_composition/image_of_preimage}} \supseteq
\cl(f^{-1}(F)).
\end{equation*}
Therefore, \( f^{-1}(F) \) is closed.
\end{proof}
\begin{definition}\label{def:homeomorphism}
We say that the continuous function \( f: X \to Y \) is \term{open} (resp. \term{closed}), if the image \( f(U) \) of an open (resp. closed) in \( \mscrT_X \) set is open (resp. closed) in \( \mscrT_Y \).
If \( f \) is an open bijection, we say that \( f \) is a \term{homeomorphism}. If \( f \) is only an open injection, we say that \( f \) is a \term{homeomorphic embedding}.
\end{definition}
\begin{definition}\label{def:parametric_curve}
Let \( I \) be an interval (of any type) in \( \BbbR \) with endpoints \( a < b \), not necessarily finite. Depending on the use case, we define a \term{parametric curve} on \( I \) by any of the non-equivalent definitions
\begin{thmenum}
\thmitem{def:parametric_curve/function} A continuous function \( \gamma: I \to X \) is called a parametric curve.
\thmitem{def:parametric_curve/image} The image \( \img(\gamma) \) of a parametric curve \( \gamma \) is also called a parametric curve.
\thmitem{def:parametric_curve/equivalence_class} The equivalence class of all continuous functions from \( I \) to \( X \) with
\begin{equation*}
\gamma \cong \beta \iff \img(\gamma) = \img(\beta) \text{ and the endpoints of } \gamma \text{ and } \beta \text{ coincide}
\end{equation*}
is also called a parametric curve.
\end{thmenum}
The points \( \gamma(a) \) and \( \gamma(b) \) are called the \term{endpoints} of the curve, \( \gamma(a) \) is the \term{start} and \( \gamma(b) \) is the \term{end}. We say that \( \gamma \) \term{connects} \( a \) and \( b \).
Parametric curves on \( I = [0, 1] \) are also called \term{paths}.
We define some fundamental types of curves:
\begin{thmenum}
\thmitem{def:parametric_curve/closed} The curve \( \gamma \) is called \term{closed} if its endpoints coincide, i.e. \( \gamma(a) = \gamma(b) \).
\thmitem{def:parametric_curve/simple} The curve \( \gamma \) is called \term{simple} if the function \( \gamma: I \to Y \) is injective with the possible exception of the endpoints (in which case we speak of \term{simple closed curves}.
\end{thmenum}
If \( \gamma: I \to X \) is a parametric curve, related curves are:
\begin{thmenum}
\thmitem{def:parametric_curve/function_graph}\mcite[def. 1.20]{ИвановТужилин2017}The \hyperref[def:multi_valued_function/graph]{graph} \( \gph(\gamma) \) of \( \gamma \) is a the image of the curve \( \overline{\gamma}(t, x) \coloneqq (t, \gamma(x)) \) in the topological space \( I \times X \).
\thmitem{def:parametric_curve/implicit}\mcite[def. 1.24]{ИвановТужилин2017}If \( M \) is a subset of \( X \) and if there exists a curve \( \gamma: I \to X \) such that \( \imag(\gamma) = M \), we call \( M \) an \term{implicit parametric curve}.
\end{thmenum}
\end{definition}
\begin{definition}\label{def:parametric_hypersurface}
In analogy to \fullref{def:parametric_curve} (and with the caveats of \fullref{def:parametric_curve}), we define \term{parametric hypersurfaces} as follows:
Let \( \xi \) is a potentially infinite cardinal number, let \( \card \mscrK = \xi \) and let \( \{ I_\alpha \}_{\alpha \in \mscrK} \) be a family of intervals in \( \BbbR \). We define a parametric hypersurface to be a continuous image from the \hyperref[def:topological_product]{product space} \( \prod_{\alpha \in \mscrK} I_\alpha \) to \( Y \).
We call \( \xi \) the \term{dimension} of the hypersurface.
\end{definition}
\begin{definition}\label{def:fundamental_groupoid}
\todo{Define fundamental groupoids}.
\end{definition}
| {
"alphanum_fraction": 0.688075618,
"avg_line_length": 59.7971014493,
"ext": "tex",
"hexsha": "af02f311fae10c799baabcd88e28939b3abcb913",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "89a91b5182f187bc1aa37a2054762dd0078a7b56",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "v--/anthology",
"max_forks_repo_path": "src/topological_continuity.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "89a91b5182f187bc1aa37a2054762dd0078a7b56",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "v--/anthology",
"max_issues_repo_path": "src/topological_continuity.tex",
"max_line_length": 350,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "89a91b5182f187bc1aa37a2054762dd0078a7b56",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "v--/anthology",
"max_stars_repo_path": "src/topological_continuity.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2649,
"size": 8252
} |
\chapter{External protocols}
\section{Packages}
\subsection{Package for ordinary reader features}
\label{sec:package-ordinary-reader-features}
The package for ordinary reader features is named
\texttt{eclector.reader}. To use features of this package, we
recommend the use of explicit package prefixes, simply because this
package shadows and exports names that are also exported from the
\texttt{common-lisp} package. Importing this package will likely
cause conflicts with the \texttt{common-lisp} package otherwise.
\subsection{Package for readtable features}
\label{sec:package-readtable-features}
The package for readtable-related features is named
\texttt{eclector.readtable}. To use features of this package, we
recommend the use of explicit package prefixes, simply because this
package shadows and exports names that are also exported from the
\texttt{common-lisp} package. Importing this package will likely
cause conflicts with the \texttt{common-lisp} package otherwise.
\subsection{Package for parse result construction features}
\label{sec:package-parse-result}
The package for features related to the creation of client-defined
parse results is named \texttt{eclector.parse-result}. Although this
package does not shadow any symbol in the \texttt{common-lisp}
package, we still recommend the use of explicit package prefixes to
refer to symbols in this package.
\subsection{Package for CST features}
\label{sec:package-cst-features}
The package for features related to the creation of concrete syntax
trees is named \texttt{eclector.concrete-syntax-tree}. Although this
package does not shadow any symbol in the \texttt{common-lisp}
package, we still recommend the use of explicit package prefixes to
refer to symbols in this package.
\section{Ordinary reader features}
In this section, symbols written without package marker are in the
\texttt{eclector.reader} package
\seesec{sec:package-ordinary-reader-features}
\Defun {read} {\optional (input-stream \texttt{*standard-input*})\\
(eof-error-p \texttt{t})
(eof-value \texttt{nil})
(recursive-p \texttt{nil})}
This function is the main entry point for the ordinary reader. It is
entirely compatible with the standard \commonlisp{} function with the
same name.
\Defvar {*client*}
This variable is used by several generic functions called by
\texttt{read}. The default value of the variable is \texttt{nil}.
Client code that wants to override or extend the default behavior of
some generic function of \sysname{} should bind this variable to some
standard object and provide a method on that generic function,
specialized to the class of that standard object.
\Defgeneric {read-common} {client input-stream eof-error-p eof-value}
This generic function is called by \texttt{read}, passing it the value
of the variable \texttt{*client*} and the corresponding parameters.
Client code can add methods on this function, specializing them to the
client class of its choice. The actions that \texttt{read} need to
take for different values of the parameter \texttt{recursive-p} have
already been taken before \texttt{read} calls this generic function.
\Defgeneric {note-skipped-input} {client input-stream reason}
This generic function is called whenever the reader skips some input
such as a comment or a form that must be skipped because of a reader
conditional. It is called with the value of the variable
\texttt{*client*}, the input stream from which the input is being read
and an object indicating the reason for skipping the input. The
default method on this generic function does nothing. Client code can
supply a method that specializes to the client class of its choice.
When this function is called, the stream is positioned immediately
\emph{after} the skipped input. Client code that wants to know both
the beginning and the end of the skipped input must remember the
stream position before the call to \texttt{read} was made as well as
the stream position when the call to this function is made.
\Defvar {*skip-reason*}
This variable is used by the reader to determine why a range of input
characters has been skipped. To this end, internal functions of the
reader as well as reader macros can set this variable to a suitable
value before skipping over some input. Then, after the input has been
skipped, the generic function \texttt{note-skipped-input} is called
with the value of the variable as its \textit{reason} argument.
As an example, the method on \texttt{note-skipped-input} specialized
to\\
\texttt{eclector.concrete-syntax-tree:cst-client} relays the reason
and position information to the client by calling the\\
\texttt{eclector.concrete-syntax-tree:record-skipped-input} generic
function \seesec{sec:cst-reader-features}
\Defgeneric {read-token} {client input-stream eof-error-p eof-value}
This generic function is called by \texttt{read-common} when it has
been detected that a token should be read. This function is
responsible for accumulating the characters of the token and then
calling \texttt{interpret-token} (see below) in order to create and
return a token.
\Defgeneric {interpret-token} {client input-stream token escape-ranges}
This generic function is called by \texttt{read-token} in order to
create a token from accumulated token characters. The parameter
\textit{token} is a string containing the characters that make up the
token. The parameter \textit{escape-ranges} indicates ranges of
characters read from \textit{input-stream} and preceded by a character
with single-escape syntax or delimited by characters with
multiple-escape syntax. Values of \textit{escape-ranges} are lists of
elements of the form \texttt{(\textit{start}\ .\ \textit{end})} where
\texttt{\textit{start}} is the index of the first escaped character
and \texttt{\textit{end}} is the index \textit{following} the last
escaped character. Note that \texttt{\textit{start}} and
\texttt{\textit{end}} can be identical indicating no escaped
characters. This can happen in cases like \texttt{a||b}. The
information conveyed by the \textit{escape-ranges} parameter is used
to convert the characters in \textit{token} according to the
\emph{readtable case} of the current readtable before a token is
constructed.
\Defgeneric {interpret-symbol-token} {client input-stream token \\
position-package-marker-1 position-package-marker-2}
This generic function is called by the default method on
\texttt{interpret-token} when the syntax of the token corresponds to
that of a valid symbol. The parameter \textit{input-stream} is the
input stream from which the characters were read. The parameter
\textit{token} is a string that contains all the characters of the
token. The parameter \textit{position-package-marker-1} contains the
index into \textit{token} of the first package marker, or \texttt{nil}
if the token contains no package markers. The parameter
\textit{position-package-marker-2} contains the index into
\textit{token} of the second package marker, or \texttt{nil} if the
token contains no package markers or only a single package marker.
The default method on this generic function calls
\texttt{interpret-symbol} (see below) with a symbol name string and a
package indicator.
\Defgeneric {interpret-symbol} {client input-stream package-indicator \\
symbol-name internp}
This generic function is called by the default method on
\texttt{interpret-symbol-token} as well as the default
\texttt{\#:}~reader macro function to resolve a symbol name string and
a package indicator to a representation of the designated symbol. The
parameter \textit{input-stream} is the input stream from which
\textit{package-indicator} and \textit{symbol-name} were read. The
parameter \textit{package-indicator} is a either
\begin{itemize}
\item a string designating the package of that name
\item the keyword \texttt{:current} designating the current package
\item the keyword \texttt{:keyword} designating the keyword package
\item \texttt{nil} to indicate that an uninterned symbol should be
created
\end{itemize}
The \textit{symbol-name} is the name of the desired symbol.
The default method uses \texttt{cl:find-package} (or
\texttt{cl:*package*} when \textit{package-indicator} is
\texttt{:current}) to resolve \textit{package-indicator} followed by
\texttt{cl:find-symbol} or \texttt{cl:intern}, depending on
\textit{internp}, to resolve \textit{symbol-name}.
A second method which is specialized on \textit{package-indicator}
being \texttt{nil} uses \texttt{cl:make-symbol} to create uninterned
symbols.
\Defgeneric {call-reader-macro} {client input-stream char readtable}
This generic function is called when the reader has determined that
some character is associated with a reader macro. The parameter
\textit{char} has to be used in conjunction with the
\textit{readtable} parameter to obtain the macro function that is
associated with the macro character. The parameter
\textit{input-stream} is the input stream from which the reader macro
function will read additional input to accomplish its task.
The default method on this generic function simply obtains the reader
macro function for \textit{char} from \textit{readtable} and calls it,
passing \textit{input-stream} and \textit{char} as arguments. The
default method therefore does the same thing that the standard
\commonlisp{} reader does.
\Defgeneric {find-character} {client name}
This generic function is called by the default
\texttt{\#\textbackslash}~reader macro function to find a character by
name. \textit{name} is the name that has been read converted to upper
case. The function has to either return the character designated by
\textit{name} or \texttt{nil} if no such character exists.
\Defgeneric {make-structure-instance} {client name initargs}
This generic function is called by the default \texttt{\#S}~reader
macro function to construct structure instances. \textit{name} is a
symbol naming the structure type of which an instance should be
constructed. \textit{initargs} is a list the elements of which
alternate between symbols naming structure slots and values for those
slots.
There is no default method on this generic function since there is no
portable way to construct structure instances given only the name of
the structure type.
\Defgeneric {evaluate-expression} {client expression}
This generic function is called by the default \texttt{\#.}~reader
macro function to perform read-time evaluation. \textit{expression}
is the expression that should be evaluated as it was returned by a
recursive \texttt{read} call and potentially influenced by
\textit{client}. The function has to either return the result of
evaluating \textit{expression} or signal an error.
The default method on this generic function simply returns the result
of \texttt{(cl:eval expression)}.
\Defgeneric {check-feature-expression} {client feature-expression}
This generic function is called by the default \texttt{\#+}- and
\texttt{\#-}-reader macro functions to check the well-formedness of
\textit{feature-expression} which has been read from the input stream
before evaluating it. For compound expressions, only the outermost
expression is checked regarding the atom in operator position and its
shape -- child expressions are not checked. The function returns an
unspecified value if \textit{feature-expression} is well-formed and
signals an error otherwise.
The default method on this generic function accepts standard
\commonlisp{} feature expression, i.e.\ expressions recursively
composed of symbols, \texttt{:not}-expressions,
\texttt{:and}-expressions and \texttt{:or}-expressions.
\Defgeneric {evaluate-feature-expression} {client feature-expression}
This generic function is called by the default \texttt{\#+}- and
\texttt{\#-}-reader macro functions to evaluate
\textit{feature-expression} which has been read from the input stream.
The function returns either true or false if
\textit{feature-expression} is well-formed and signals an error
otherwise.
For compound feature expressions, the well-formedness of child
expressions is not checked immediately but lazily, just before the
child expression in question is evaluated in a subsequent
\texttt{evaluate-feature-expression} call. This allows feature
expression like \texttt{\#+(and my-cl-implementation (special-feature
a b))} to succeed when the \texttt{:my-cl-implementation} feature is
absent.
The default method on this generic function first calls
\texttt{check-feature-expression} to check the well-formedness of
\textit{feature-expression}. It then evaluates
\textit{feature-expression} according to standard \commonlisp{}
semantics for feature expressions.
\Defgeneric {fixup} {client object seen-objects mapping}
This generic function is potentially called to apply
circularity-related changes to the object constructed by the reader
before it is returned to the caller. \textit{object} is the object
that should be modified. \textit{seen-objects} is a \texttt{eq}-hash
table used to track already processed objects (see below).
\textit{mapping} is a hash table of substitutions, mapping marker
objects to replacement objects. A method specialized on a class,
instances of which consists of parts, should modify \textit{object} by
scanning its parts for marker objects, replacing found markers with
replacement object and recursively calling \texttt{fixup} for all
parts. \texttt{fixup} is called for side effects -- its return value
is ignored.
Default methods specializing on the \textit{object} parameter for
\texttt{cons}, \texttt{array}, \texttt{standard-object} and
\texttt{hash-table} process instances of those classes in the obvious
way.
An unspecialized \texttt{:around} method queries and updates
\textit{seen-objects} to ensure that each object is processed exactly
once.
\Defgeneric {wrap-in-quote} {client material}
This generic function is called by the default \texttt{'}-reader macro
function to construct a quotation form in which \textit{material} is
the quoted material.
The default method on this generic function returns a result
equivalent to \texttt{(list 'common-lisp:quote material)}.
\Defgeneric {wrap-in-quasiquote} {client form}
This generic function is called by the default \texttt{`}-reader macro
function to construct a quasiquotation form in which \textit{form} is
the quasiquoted material.
The default method on this generic function returns a result
equivalent to \texttt{(list 'eclector.reader:quasiquote form)}.
\Defgeneric {wrap-in-unquote} {client form}
This generic function is called by the default \texttt{,}-reader macro
function to construct an unquote form in which \textit{form} is the
unquoted material.
The default method on this generic function returns a result
equivalent to \texttt{(list 'eclector.reader:unquote form)}.
\Defgeneric {wrap-in-unquote-splicing} {client form}
This generic function is called by the default \texttt{,@}-reader
macro function to construct a splicing unquote form in which
\textit{form} is the unquoted material.
The default method on this generic function returns a result
equivalent to \texttt{(list 'eclector.reader:unquote-splicing form)}.
\subsubsection{Readtable Initialization}
\label{sec:readtable-initialization}
The standard syntax types and macro character associations used by the
ordinary reader can be set up for any readtable object implementing
the readtable protocol \seesec{sec:readtable-features} The following
functions are provided for this purpose:
\Defun {set-standard-syntax-types} (readtable)
This function sets the standard syntax types in \textit{readtable}
(See HyperSpec section 2.1.4.)
\Defun {set-standard-macro-characters} (readtable)
This function sets the standard macro characters in \textit{readtable}
(See HyperSpec section 2.4.)
\Defun {set-standard-dispatch-macro-characters} (readtable)
This function sets the standard dispatch macro characters, that is
sharpsign and its sub-characters, in \textit{readtable} (See HyperSpec
section 2.4.8.)
\Defun {set-standard-syntax-and-macros} (readtable)
This function sets the standard syntax types and macro characters in
\textit{readtable} by calling the above three functions.
\section{Readtable Features}
\label{sec:readtable-features}
In this section, symbols written without package marker are in the
\texttt{eclector.readtable} package
\seesec{sec:package-readtable-features}
TODO
\section{Parse result construction features}
\label{sec:parse-result-construction-features}
In this section, symbols written without package marker are in the
\texttt{eclector.parse-result} package
\seesec{sec:package-parse-result}
This package provides clients with a reader that behaves similarly to
\texttt{cl:read} but returns custom parse result objects controlled by
the client. Some parse results correspond to things like symbols,
numbers and lists that \texttt{cl:read} would return, while others, if
the client chooses, represent comments and other kind of input that
\texttt{cl:read} would discard. Furthermore, clients can associate
source location information with parse results.
Clients using this package must bind the special variable
\texttt{eclector.reader:*client*} around calls to \texttt{read} to an
instance for which methods on the generic functions described below
are applicable. Suitable client classes can be constructed by using
\texttt{parse-result-client} as a superclass and at least defining a
method on the generic function \texttt{make-expression-result}.
\Defun {read} {client \optional (input-stream \texttt{*standard-input*})\\
(eof-error-p \texttt{t})
(eof-value \texttt{nil})}
This function is the main entry point for this variant of the reader.
It is in many ways similar to the standard \commonlisp{} function
\texttt{read}. The differences are:
\begin{itemize}
\item A client instance must be supplied as the first argument.
\item The first return value, unless \textit{eof-value} is returned,
is an arbitrary parse result object created by the client, not
generally the read object.
\item The second return value, unless \textit{eof-value} is returned,
is a list of ``orphan'' results. These results are return values of
\texttt{make-skipped-input-result} and arise when skipping input at
the toplevel such as comments which are not lexically contained in
lists: \texttt{\#|orphan|\# (\#|not orphan|\#)}.
\item The function does not accept a \textit{recursive} parameter
since it sets up a dynamic environment in which calls to
\texttt{eclector.reader:read} behave suitably.
\end{itemize}
\Defclass {parse-result-client}
This class should generally be used as a superclass for client classes
using this package.
\Defgeneric {source-position} {client stream}
This generic function is called in order to determine the current
position in \textit{stream}. The default method calls
\texttt{cl:file-position}.
\Defgeneric {make-source-range} {client start end}
This generic function is called in order to turn the source positions
\textit{start} and \textit{end} into a range representation suitable
for \textit{client}. The returned representation designates the range
of input characters from and including the character at position
\textit{start} to but not including the character at position
\textit{end}. The default method returns \texttt{(cons start end)}.
\Defgeneric {make-expression-result} {client result children source}
This generic function is called in order to construct a parse result
object. The value of the \textit{result} parameter is the raw object
read. The value of the \textit{children} parameter is a list of
already constructed parse result objects representing objects read by
recursive \texttt{read} calls. The value of the \textit{source}
parameter is a source range, as returned by \texttt{make-source-range}
and \texttt{source-position} delimiting the range of characters from
which \textit{result} has been read.
This generic function does not have a default method since the purpose
of the package is the construction of \emph{custom} parse results.
Thus, a client must define a method on this generic function.
\Defgeneric {make-skipped-input-result} {client stream reason source}
This generic function is called after the reader skipped over a range
of characters in \textit{stream}. It returns either \texttt{nil} if
the skipped input should not be represented or a client-specific
representation of the skipped input. The value of the \textit{source}
parameter designates the skipped range using a source range
representation obtained via \texttt{make-source-range} and
\texttt{source-position}.
Reasons for skipping input include comments, the \texttt{\#+} and
\texttt{\#-} reader macros and \texttt{*read-suppress*}. The
aforementioned reasons are reflected by the value of the
\textit{reason} parameter as follows:
\begin{tabular}{ll}
Input & Value of the \textit{reason} parameter\\
\hline
Comment starting with \texttt{;} & \texttt{(:line-comment . 1)}\\
Comment starting with \texttt{;;} & \texttt{(:line-comment . 2)}\\
Comment starting with $n$ \texttt{;} & \texttt{(:line-comment . $n$)}\\
Comment delimited by \texttt{\#|} \texttt{|\#} & \texttt{:block-comment}\\
\texttt{\#+\textit{false-feature-expression}} & \texttt{:reader-macro}\\
\texttt{\#-\textit{true-feature-expression}} & \texttt{:reader-macro}\\
\texttt{*read-suppress*} is true & \texttt{*read-suppress*}
\end{tabular}
The default method returns \texttt{nil}, that is the skipped input is
not represented as a parse result.
\section{CST reader features}
\label{sec:cst-reader-features}
In this section, symbols written without package marker are in the
\texttt{eclector.concrete-syntax-tree} package
\seesec{sec:package-cst-features}
\Defun {cst-read} {\optional (input-stream \texttt{*standard-input*})\\
(eof-error-p \texttt{t})
(eof-value \texttt{nil})}
This function is the main entry point for the CST reader. It is
mostly compatible with the standard \commonlisp{} function
\texttt{read}. The differences are:
\begin{itemize}
\item The return value, unless \textit{eof-value} is returned, is an
instance of a subclass of \texttt{concrete-syntax-tree:cst}.
\item The function does not accept a \textit{recursive} parameter
since it sets up a dynamic environment in which calls to
\texttt{eclector.reader:read} behave suitably.
\end{itemize}
| {
"alphanum_fraction": 0.7851327906,
"avg_line_length": 44.6011904762,
"ext": "tex",
"hexsha": "1c1451fee69fe1eec47fca773a5a3f12cebd6b87",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c630466177b9af2b6d23014fa601165c8cad24e2",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "lisp-mirror/eclector",
"max_forks_repo_path": "documentation/chap-external-protocols.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c630466177b9af2b6d23014fa601165c8cad24e2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "lisp-mirror/eclector",
"max_issues_repo_path": "documentation/chap-external-protocols.tex",
"max_line_length": 91,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c630466177b9af2b6d23014fa601165c8cad24e2",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "lisp-mirror/eclector",
"max_stars_repo_path": "documentation/chap-external-protocols.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5441,
"size": 22479
} |
%
%\eucommentary{Please provide the following:
%\begin{compactitem}
%\item
%a table showing number of person/months required (table 3.4a)
%\item
%a table showing 'other direct costs' (table 3.4b) for participants where
%those costs exceed 15\% of the personnel costs (according to the budget
%table in section 3 of the administrative proposal forms)
%\end{compactitem}}
%
\TheProject applies for a total budget of \textbf{\euro 5,956,255.00} as the amount
required achieving the objectives. The total budget is described in the subsequent
sections together with the staff effort necessary to implement the action (Table 3.4.1).
The necessary physical resources, the quantities of each and when they would be
needed has been carefully determined on the base of the following criteria:
\begin{enumerate}
\item \textbf{Historical information} the partners involved are well experienced in the
Jupyter development, software for education, processing and scale-up; past experiences
from each partner has been taken into consideration to evaluate what, and in what
quantities, different resources will be needed in the project.
\item \textbf{Work plan structure} the deliverables and milestones identified in the project work plan.
\item \textbf{The inputs necessary to resource planning}; further, the timetable of activities has helped to identify when each resource will be needed in the project.
\item \textbf{Resource pool description} the resources available in the consortium have
been carefully analysed in order to avoid any duplicating of existing resources
and allocate efficiently and effectively the resources necessary.
\item \textbf{Cost estimating} The approximated costs of the resources needed to
complete successfully the project activities has been estimated on the base
of: a) Resource planning results, b) Activity duration estimation (Gantt and effort form),
c) Commercially available data on durables and consumables needed,
d) Preliminary Risk Assessments.
The financial allocation among the 11 partners reflects the tasks committed by each partner
and the collaborative nature of the project itself. On the whole, the financial allocation is
well-balanced and homogeneous.
\end{enumerate}
\subsubsection{Management Level Description of Resources and Budget}
\label{sect:budget-details}
\paragraph{Staff efforts}
\eucommentary{Please indicate the number of person/months over the whole
duration of the planned work, for each work package, for each participant.
Identify the work-package leader for each WP by showing the relevant
person-month figure in bold.}
The \TheProject project is gathering sites with core developers from
the Jupyter project and history in open source software development,
and brings them together with domain specialists from a range of
domains. The major investment of the project is in software
development, which is realised through person time and displayed in table~3.4.1.
\ifgrantagreement.\else{} %
\wpfig[label=fig:staffeffort,caption=Summary of Staff Efforts]
\fi
\paragraph{Travel, dissemination, and outreach}
The nature of this proposal -- of providing a framework that allows
design and deployment of innovative services -- means that the project
has the potential to have high impact for EOSC. At the same time, it
requires input from and engagement with a significant number of
stakeholders, including potential users of the services such as
scientists, developers of other services and other EOSC-funded
projects, other open science software projects, and the developing
EOSC itself. Consequently, requirements capture, networking, feedback,
training and education workshops and outreach activities are all
important, and the second highest cost for this project.
\subparagraph{Guidelines for travel and dissemination}
\label{sect:budget-details-travel}
We use the following guidelines for expected travel expenses:
\euro{2500} for attendance of a typical one week international
conference outside Europe (including travel, subsistence,
accommodation and registration), \euro{1250} for a corresponding
conference in Europe, \euro{750} for a one-week visit of a project
partner, for instance for coding sprints and one-to-one
research visits. We expect a similar cost per week while hosting
visitors. For the half-yearly project meetings, we expect on average a
cost of \euro{500} for travel, accommodation and subsistence.
Anticipated activities:
\begin{enumerate}
\item \emph{Project meetings}: For the 9 project meetings that take place every 6 months, we expect
the PI from each site to attend all of them (cost of 9 * 500 = \euro{4500}). For
a researcher, we also expect that they attend all such project meetings
(\euro{4500}).
\footnote{PI and researcher roles are defined for each site in section \ref{sec:participants}}
We include in this item local expenses for the
organization and catering of meetings, travel, accommodation and
subsistence of attendants.
\item \emph{Hosting visitors}: We expect that the site spends \euro{2000} per year to host
external visitors contributing to the project (total \euro{8000}).
\item \emph{Site visits}: We expect the researcher to carry out 3 one-week visits to other sites
(each at \euro{750}) every year, totalling 3 * 4 * 750 = \euro{9000}
over 4 years).
\item
\emph{Conference dissemination}: We expect the researcher to attend on average 1
international conference and 1 European meeting per year (cost of 4 *
2500 + 4 * 1250 = \euro{15000}) and the investigator to attend the
equivalent of one international or two
European gatherings (totals \euro{10000}).
\item \emph{Advisory board}: For organisation, catering and attendance
of 5 advisory board members to 5 meetings (kickoff and then
annually), we budget \euro{750} per person and meeting, and allocate
the total of \euro{18750} to \site{SRL}'s travel budget only.
%\item
%\emph{Dissemination material}: We expect that each site spends an
%average of \euro{2000} to participate to the production of
%dissemination material for the project such as website, logos, flyers,
%posters, goodies, explainer comics or videos, hiring professionals
%when needed.
\end{enumerate}
Where there are multiple investigators per site, they will share the
travel and associated costs outlined above. Where there are multiple
researchers, or researchers not employed for the full 48 months, the
travel budget is adapted accordingly.
\subparagraph{Guidelines for outreach costs}
\label{sect:budget-outreach-publication-charges}
\emph{Publication charges}: We also request \euro{3000} per partner to pay for open
access publication charges. (Some partners have other means do pay
these costs, and for them these are not needed.)
\label{sect:budget-outreach-workshops}
\emph{Workshops}: We request funds for dissemination and outreach
activities such as workshops that facilitate community building,
provide training and disseminate best practice and encourages
sustained contributions of the community to the project and beyond the
lifetime of the funding. For a one-week workshop that we organise,
we will typically use meeting rooms of the project partners to minimise
cost, and assume a cost of 400 EUR per participant to provide location
and subsidise accommodation and catering for attendees. A workshop for 10 people
will thus cost about \euro{4000}. Participants
donate their time and need to fund their travel from other sources. By
partially contributing to the attendance cost, we hope to enable PhD
students to engage with the project and expect positive effects on the
sustainability of the activities, by embedding the tools and knowledge
with the next generation of scientists.
To support dissemination of the project, we also expect modest
expenses for websites and logo design, flyers, posters, goodies, explainer
comics or hiring professional support when needed.
Details are given in the tables in section \ref{resources.summary} below
and in the work packages.
\bigskip
\subsubsection{Resource summaries for consortium member sites}
\label{resources.summary}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Guidelines for completion of partner specific resource summary:
%
%
% Please explain how many person months for each person are
% requested. Say who is the local lead. Say anything that helps to
% understand why people are recruited as you plan, in particular if
% this deviates from having one research for 48 months. We can also
% use this bit of the proposal (and the table, see below) to address
% any other unusual arrangements.
%
%
% The table should contain all non-staff costs (the EU requests that
% this table must be present if the non-staff costs exceed
% 15% of the total cost, but it is good practice and will show
% openness and transparency that we show the data for all partners).
%
% Link back from the table to the work packages and tasks for which
% the expenses are required. Add information that makes it easier to
% understand why the expenses are justified.
%
% To refer to a task in a work package, use "\taskref{WP-ID}{TASK-ID}" where
% WP-ID is the ID of the work package:
% WP#: WP-ID - full title
% ----------------------
% WP1: 'management' - Management
% WP2: 'community' - Community Building and Engagement
% WP3: 'component-architecture' - Component Architecture
% WP4: 'UI' - User interfaces
% WP5: 'hpc' - High Performance Computing
% WP6: 'dksbases' - Data/Knowledge/Software-Bases
% WP7: 'social-aspects' - Social Aspects
% WP8: 'dissem' - Dissemination
%
%
% and "TASK-ID" is the ID of the task. You can set this using
%
% \begin{task}[id=TASK-ID,title=Math Search Engine,lead=JU,PM=10,lead=JU]
%
% To refer to deliverables, use "\delivref{WP-ID}{DELIV-ID}" where DELIV-ID is
% the ID of the deliverable that can be set like this:
%
% \begin{wpdeliv}[due=36,id=DELIV-ID,dissem=PU,nature=DEM]
% {Exploratory support for semantic-aware interactive widgets providing views on objects
% represented and or in databases}
% \end{wpdeliv}
%
%
% The table is pre-populated with entries most sites are likely
% to need. If a line does not apply to you, just delete it. If you need
% an extra line, then add it. Use common sense: the number of rows should not
% be very big, but at the same time it is useful to give some breakdown/explanation
% of costs.
%
%
% Eventually, try to create you entry similar in style to the others.
% (The Southampton entry is fully populated, so use this as guidance
% if in doubt.)
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
In this section we briefly describe the requested resources. See the
participant descriptions in the description of the consortium for the
specific role of each member.
Some partners do not require all the costs outlined above in
section~\ref{sect:budget-details} and have accordingly reduced their
requirements below. \bigskip
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\paragraph{Resources Simula Research Laboratory}
% See line 122 in
% https://docs.google.com/spreadsheets/d/1vXm5Z2pIWIf6UCWWkXDQ4Fti8PM6xBnpXNhehUAKmuk/edit#gid=322027044 for details
%
% Travel
% Assume PI (50%) and 2 FTE researchers
%
% Project meetings:
% 3 * 9000 = 27,000
% Hosting visitors = 8000
% Site visits 2 * 9000 = 18000
% Conference dissemination = 2 * 15000 + 1 * 10000 = 40000
%
% Publication costs: 3000
% Workshops: 4 x 10 people = 16,000
%
%
%
% Cloud computing
% financial audit
% laptops
\site{SRL} requests 124 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{SRL}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 98250 & Travel for PI and 2 researchers and the advisory board (see
\ref{sect:budget-details-travel})\\\hline
\textbf{Workshops} & 16000 & Workshops (40 attendees in total) (see \ref{sect:budget-details-travel})\\\hline
\textbf{Publication charges}
& 3000 & Open access publication charges (see \ref{sect:budget-outreach-publication-charges})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
\textbf{Other goods and services}
& 3500 & Financial audit \\\hline
& 7500 & Consumables (3 High Performance laptops for workshops,
sprints, dissemination ) \\\hline
\textbf{Total}
& 128250\\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at Simula
Research Laboratory
(all in \texteuro)}\vspace*{-1em}
\end{table}
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% \paragraph{Resources Facility}
%%
%% \site{...} requests
%% X person months for
%%
%% \taskref{wpid}{taskid}
%%
%%
%% \bigskip
%% \begin{table}[H]
%% \begin{tabular}{|r|r|p{8.5cm}|}
%% \hline
%% \textbf{2: \site{SRL}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
%% \textbf{Travel}
%% & XXX & Travel (see \ref{sect:budget-details-travel})\\\hline
%% \textbf{Publication charges}
%% & XXX & Open access publication charges (see \ref{sect:budget-outreach-publication-charges})\\\hline
%% %%\textbf{Equipment}
%% %% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
%% \textbf{Other goods and services}
%% & XXX &
%% \\\hline %\taskref{WP-ID}{TASK-ID} \delivref{WP-ID}{DELIV-ID}
%% \textbf{Total}
%% & XXX\\\cline{1-2}
%% \end{tabular}
%% \caption{Overview: Non-staff resources to be committed at CNRS (all in \texteuro)}\vspace*{-1em}
%% \end{table}
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\paragraph{Resources CNRS Observatoire Astronomique de Strasbourg}
\site{CDS} requests 18 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{CDS}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 13750 & Travel for PI and $\approx$0.4 researchers (see
\ref{sect:budget-details-travel})\\\hline
\textbf{Workshops} & 4000 & Workshop (10 attendees in total) (see \ref{sect:budget-details-travel})\\\hline
\textbf{Publication charges}
& 3000 & Open access publication charges (see \ref{sect:budget-outreach-publication-charges})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
\textbf{Total}
& 20750 \\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at CNRS-ObAS (all in \texteuro)}\vspace*{-1em}
\end{table}
\paragraph{Resources \'Ecole Polytechnique}
\site{EP} requests 52 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{EP}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 45000 & Travel for PI and 1 researchers (see
\ref{sect:budget-details-travel})\\\hline
\textbf{Workshops} & 8000 & Workshop (20 attendees in total) (see \ref{sect:budget-details-travel})\\\hline
\textbf{Publication charges}
& 3000 & Open access publication charges (see \ref{sect:budget-outreach-publication-charges})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
\textbf{Other goods and services}
& 5000 & Financial audit \\\hline
\textbf{Total}
& 61000 \\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at \'Ecole Polytechnique
(all in \texteuro)}\vspace*{-1em}
\end{table}
\paragraph{Resources EGI}
\site{EGI} requests 24 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{EGI}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 20750 & Travel for PI and 0.5 researchers (see
\ref{sect:budget-details-travel})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
\textbf{Other goods and services}
& 4500 & Financial audit \\\hline
& 180000 & Cloud computing (180k; 5k/month) for
operation of services including \taskref{eosc}{eu-binder}.
\\\hline
\textbf{Total}
& 205250 \\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at EGI (all in \texteuro)}\vspace*{-1em}
\end{table}
\paragraph{Resources European XFEL}
\site{XFEL} requests 123 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{XFEL}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 79500 & Travel for PI and 2 researchers (see
\ref{sect:budget-details-travel})\\\hline
\textbf{Workshops} & 16000 & Workshops (40 attendees in total) (see \ref{sect:budget-details-travel})\\\hline
\textbf{Publication charges}
& 3000 & Open access publication charges (see \ref{sect:budget-outreach-publication-charges})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
\textbf{Other goods and services}
& 6500 & Financial audit \\\hline
& 7500 & Consumables (3 High Performance laptops for workshops,
sprints, dissemination ) \\\hline
\textbf{Total}
& 112500 \\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at European XFEL (all in \texteuro)}\vspace*{-1em}
\end{table}
\paragraph{Resources INSERM}
\site{INSERM} requests 39 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{INSERM}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 37300 & Travel for PI and $\approx$0.8 researchers (see
\ref{sect:budget-details-travel})\\\hline
\textbf{Workshops} & 4000 & Workshop (10 attendees in total) (see \ref{sect:budget-details-travel})\\\hline
\textbf{Publication charges}
& 3000 & Open access publication charges (see \ref{sect:budget-outreach-publication-charges})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
\textbf{Other goods and services}
% & 3500 & Financial audit \\\hline
& 2500 & Consumables (1 High Performance laptop for workshops,
sprints, dissemination) \\\hline
\textbf{Total}
& 46800 \\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at INSERM
(all in \texteuro)}\vspace*{-1em}
\end{table}
\paragraph{Resources QuantStack}
\site{QS} requests 48 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{QS}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 51000 & Travel for PI and 1 researcher (see
\ref{sect:budget-details-travel})\\\hline
\textbf{Workshops} & 8000 & Workshops (40 attendees in total) (see \ref{sect:budget-details-travel})\\\hline
\textbf{Publication charges}
& 3000 & Open access publication charges (see \ref{sect:budget-outreach-publication-charges})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
\textbf{Other goods and services}
& 4000 & Financial audit \\\hline
& 2500 & Consumables (1 High Performance laptop for workshops,
sprints, dissemination ) \\\hline
\textbf{Total}
& 68500 \\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at QuantStack (all in \texteuro)}\vspace*{-1em}
\end{table}
\paragraph{Resources University of Oslo}
\site{UIO} requests 27 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{UIO}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 28060 & Travel for PI and $\approx$0.5 researchers (see
\ref{sect:budget-details-travel})\\\hline
\textbf{Workshops} & 8000 & Workshop (20 attendees in total) (see \ref{sect:budget-details-travel})\\\hline
\textbf{Publication charges}
& 3000 & Open access publication charges (see \ref{sect:budget-outreach-publication-charges})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
\textbf{Other goods and services}
% & 3500 & Financial audit \\\hline
& 2500 & Consumables (1 High Performance laptop for workshops,
sprints, dissemination) \\\hline
\textbf{Total}
& 41560 \\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at University
of Oslo
(all in \texteuro)}\vspace*{-1em}
\end{table}
\paragraph{Resources University Paris-Sud}
\site{UPSUD} requests 42 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{UPSUD}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 35375 & Travel for PI and $\approx$0.75 researchers (see
\ref{sect:budget-details-travel})\\\hline
\textbf{Workshops} & 13500 & 2 workshops (34 attendees in total) (see the guidelines \ref{sect:budget-details-travel})\\\hline
% \textbf{Publication charges}
% & 3000 & Open access publication charges (see \ref{sect:budget-outreach-publication-charges})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
% \textbf{Other goods and services}
% & 3500 & Financial audit \\\hline % Done internally for this kind of expenses at Paris-Sud
% & 2500 & Consumables (1 High Performance laptop for workshops,
% sprints, dissemination) \\\hline
\textbf{Total}
& 48875 \\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at University
Paris Sud
(all in \texteuro)}\vspace*{-1em}
\end{table}
\paragraph{Resources University of Silesia}
\site{SIL} requests 24 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{SIL}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 25750 & Travel for PI and 0.5 researchers (see
\ref{sect:budget-details-travel})\\\hline
%\textbf{Workshops} & 8000 & Workshop (20 attendees in total) (see \ref{sect:budget-details-travel})\\\hline
\textbf{Publication charges}
& 3000 & Open access publication charges (see \ref{sect:budget-outreach-publication-charges})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
\textbf{Other goods and services}
% & 3500 & Financial audit \\\hline
& 52000 & Contracted development \\\hline
& 2500 & Consumables (1 High Performance laptop for workshops,
sprints, dissemination) \\\hline
\textbf{Total}
& 83250 \\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at University of Silesia
(all in \texteuro)}\vspace*{-1em}
\end{table}
\paragraph{Resources Wild Tree Tech}
\site{WTT} requests 36 person months to provide the effort required.
\bigskip
\begin{table}[H]
\begin{tabular}{|r|r|p{8.5cm}|}
\hline
\textbf{\site{WTT}} & \textbf{Cost (\euro)} & \textbf{Justification} \\\hline
\textbf{Travel} & 36145 & Travel for PI and 0.75 researchers (see
\ref{sect:budget-details-travel})\\\hline
%%\textbf{Equipment}
%% & 0 & \\\hline %\taskref{WP-ID}{TASK-ID}
\textbf{Other goods and services}
& 4000 & Financial audit \\\hline
& 2500 & Consumables (1 High Performance laptop for workshops,
sprints, dissemination) \\\hline
\textbf{Total}
& 42645 \\\cline{1-2}
\end{tabular}
\caption{Overview: Non-staff resources to be committed at Wild Tree Tech
(all in \texteuro)}\vspace*{-1em}
\end{table}
| {
"alphanum_fraction": 0.6937898871,
"avg_line_length": 41.0134228188,
"ext": "tex",
"hexsha": "53e4f8a95267ad39881672a15868e0a585dc4601",
"lang": "TeX",
"max_forks_count": 11,
"max_forks_repo_forks_event_max_datetime": "2019-01-29T14:37:36.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-12-17T14:44:02.000Z",
"max_forks_repo_head_hexsha": "b32705d3939c6bb77c387602d950fa52cddf971b",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "embray/proposal",
"max_forks_repo_path": "resources.tex",
"max_issues_count": 42,
"max_issues_repo_head_hexsha": "b32705d3939c6bb77c387602d950fa52cddf971b",
"max_issues_repo_issues_event_max_datetime": "2020-01-24T14:14:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-12-05T16:06:52.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "embray/proposal",
"max_issues_repo_path": "resources.tex",
"max_line_length": 167,
"max_stars_count": 9,
"max_stars_repo_head_hexsha": "b32705d3939c6bb77c387602d950fa52cddf971b",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "embray/proposal",
"max_stars_repo_path": "resources.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-08T10:06:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-01-09T09:41:28.000Z",
"num_tokens": 6769,
"size": 24444
} |
\documentclass[]{article}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\usepackage[margin=1in]{geometry}
\usepackage{hyperref}
\hypersetup{unicode=true,
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{color}
\usepackage{fancyvrb}
\newcommand{\VerbBar}{|}
\newcommand{\VERB}{\Verb[commandchars=\\\{\}]}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\usepackage{framed}
\definecolor{shadecolor}{RGB}{248,248,248}
\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\BuiltInTok}[1]{#1}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}}
\newcommand{\ExtensionTok}[1]{#1}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ImportTok}[1]{#1}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\NormalTok}[1]{#1}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\RegionMarkerTok}[1]{#1}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{0}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
%%% Use protect on footnotes to avoid problems with footnotes in titles
\let\rmarkdownfootnote\footnote%
\def\footnote{\protect\rmarkdownfootnote}
%%% Change title format to be more compact
\usepackage{titling}
% Create subtitle command for use in maketitle
\providecommand{\subtitle}[1]{
\posttitle{
\begin{center}\large#1\end{center}
}
}
\setlength{\droptitle}{-2em}
\title{}
\pretitle{\vspace{\droptitle}}
\posttitle{}
\author{}
\preauthor{}\postauthor{}
\date{}
\predate{}\postdate{}
\begin{document}
\hypertarget{data-visualization-with-ggplot2}{%
\section{Data Visualization with
ggplot2}\label{data-visualization-with-ggplot2}}
Visualizing data is an area where R really shines. There are many ways
to plot data with R and these include base R,
\texttt{lattice},\texttt{grid} and , \texttt{ggplot2}. The only one we
will work with is \texttt{ggplot2}, which is now (I have no data to back
this up), the de-facto standard for visualizing data in R. Given that
\texttt{ggplot2} is general package for creating essentially ALL types
of visualizations, it can seem quite complex (and it is). What I hope
you will get out of this section is a basic understanding of how to
create a figure and, most importantly, how to find help and examples
that you can build off of for your own visualizations. If you want to
read more about why some people choose base plotting vs
\texttt{ggplot2}, the twitter/blogosphere ``argument'' between
\href{https://simplystatistics.org/2016/02/11/why-i-dont-use-ggplot2/}{Jeff
Leek} and \href{http://varianceexplained.org/r/why-I-use-ggplot2/}{David
Robinson} is worth some time.
\hypertarget{lesson-outline}{%
\subsection{Lesson Outline}\label{lesson-outline}}
\begin{itemize}
\tightlist
\item
\protect\hyperlink{examples-of-greatness}{Examples of greatness}
\item
\protect\hyperlink{basics-of-ggplot2}{Basics of \texttt{ggplot2}}
\item
\protect\hyperlink{example-explained}{Example explained}
\end{itemize}
\hypertarget{exercise}{%
\subsection{Exercise}\label{exercise}}
\begin{itemize}
\tightlist
\item
\protect\hyperlink{exercise-41}{Exercise 4.1}
\end{itemize}
\hypertarget{examples-of-what-is-possible}{%
\subsection{Examples of what is
possible}\label{examples-of-what-is-possible}}
Before we get started, I do like to show what is possible. A couple of
geospatial examples of maps created in R.
A few examples of maps built with R show this:
\begin{figure}
\centering
\includegraphics{figures/ecs21321-fig-0011-m.jpg}
\caption{Trophic State Modeling Results}
\end{figure}
\begin{figure}
\centering
\includegraphics{figures/bike_ggplot.png}
\caption{London Bike Hires}
\end{figure}
\begin{figure}
\centering
\includegraphics{figures/FbMap.jpg}
\caption{Facebook Users}
\end{figure}
More examples from Jeff's work
\includegraphics{figures/fig-2-full.png} from: Raposa et al.~(2018).
Top-down and bottom-up controls on overabundant New England salt marsh
crab populations. PeerJ. \url{https://doi.org/10.7717/peerj.4876}
\begin{figure}
\centering
\includegraphics{figures/water-10-00604-g006.jpg}
\caption{heatmaps}
\end{figure}
from: Kuhn et al.~(2018) Performance of national maps of watershed
integrity at watershed scales. Water.
\url{https://doi.org/10.3390/w10050604}
And some cool examples using \texttt{ggplot2} with \texttt{plotly}.
\url{http://blog.revolutionanalytics.com/2014/11/3-d-plots-with-plotly.html}
Lastly, so that you know that there are many (often cool) mistakes that
lead up to a final visualization there is
\href{http://accidental-art.tumblr.com/}{Accidental aRt}. And for a
specific example \ldots{}
And the map I showed earlier of the trophic state probability had as one
of its early iterations this
\href{http://accidental-art.tumblr.com/post/96720455195/was-trying-to-mess-with-projections-in-ggplot}{``psychadelic
doughnut''}
\begin{figure}
\centering
\includegraphics{figures/tumblr_nbfye5hrjR1smu039o1_500.jpg}
\caption{pd}
\end{figure}
(\textbf{ht to Anne Kuhn, my office mate, for the name})
A few other great links that I have recently found are also useful for
inspiration. First, is a repository on GitHub that has most (all?) of
the currently available color palletes in R:
\url{https://github.com/EmilHvitfeldt/r-color-palettes}. Second, the
\href{https://www.r-graph-gallery.com/}{R graph gallery} is a fantastic
resource for seeing all that is possible for visualization in R and the
code on how to do it!!
Now that we are sufficiently motivated, lets take a step back to the
very basics.
\hypertarget{introduction-to-ggplot2-scatterplot}{%
\subsection{\texorpdfstring{Introduction to \texttt{ggplot2}:
scatterplot}{Introduction to ggplot2: scatterplot}}\label{introduction-to-ggplot2-scatterplot}}
When you first get a dataset and are just starting to explore it, you
want do be able to quickly visualize different bits and pieces about the
data. I tend to do this, initially, with base R. But since our time is
short, we are going to focus our efforts just on \texttt{ggplot2}.
A lot has been written and discussed about \texttt{ggplot2}. In
particular see \href{http://ggplot2.org/}{here},
\href{http://docs.ggplot2.org/current/}{here} and
\href{https://github.com/karthikram/ggplot-lecture}{here}. The gist of
all this, is that \texttt{ggplot2} is an implementation of something
known as the ``grammar of graphics.'' This separates the basic
components of a graphic into distinct parts (e.g.~like the parts of
speech in a sentence). You add these parts together and get a figure.
Before we start developing some graphics, we need to do a bit of package
maintenance. If \texttt{ggplot2} had not be installed (it should be by
now), install it and make sure to load up the package with
\texttt{library()}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{install.packages}\NormalTok{(}\StringTok{"ggplot2"}\NormalTok{)}
\KeywordTok{library}\NormalTok{(}\StringTok{"ggplot2"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
With that finished, we can now use \texttt{ggplot2}. First thing we need
to do is to create our ggplot object. Everything will build off of this
object. The bare minimum for this is the data (handily,
\texttt{ggplot()} is expecting a data frame) and \texttt{aes()}, or the
aesthetics layers. Oddly (at least to me), this is the main place you
specify your x and y data values.
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{# aes() are the "aesthetics" mappings. When you simply add the x and y}
\CommentTok{# that can seem a bit of a confusing term. You also use aes() to }
\CommentTok{# change color, shape, size etc. of some items }
\NormalTok{iris_gg <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(iris,}\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{Petal.Length,}\DataTypeTok{y=}\NormalTok{Petal.Width))}
\NormalTok{iris_gg}
\end{Highlighting}
\end{Shaded}
\includegraphics{figures/unnamed-chunk-2-1.pdf}
Great, nothing plotted\ldots{} All we did at this point is create an
object that contains our data and what we want on the x and y axes. We
haven't said anything about what type of plot we want to make. That
comes next with the use of geometries or \texttt{geom\_}'s.
So if we want to simply plot points we can add that geometry to the
ggplot object.
A side note on syntax. You will notice that we add new ``things'' to a
ggplot object by adding new functions. In concept this is somewhat
similar to the piping we talked about earlier. Essentially it takes the
output from the first function as the input to the second. So to add
points and create the plot, we would do:
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{#Different syntax than you are used to}
\NormalTok{iris_gg }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{geom_point}\NormalTok{()}
\end{Highlighting}
\end{Shaded}
\includegraphics{figures/unnamed-chunk-3-1.pdf}
It is usually preferrable to save this to an object.
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{#This too can be saved to an object}
\NormalTok{iris_scatter <-}\StringTok{ }\NormalTok{iris_gg }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_point}\NormalTok{()}
\CommentTok{#Call it to show the plot}
\NormalTok{iris_scatter}
\end{Highlighting}
\end{Shaded}
\includegraphics{figures/unnamed-chunk-4-1.pdf}
Not appreciably better than base, in my opinion. But what if we want to
add some stuff\ldots{}
First a title and some axis labels. These are part of \texttt{labs()}.
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{#Getting fancy to show italics and greek symbols}
\NormalTok{iris_scatter <-}\StringTok{ }\NormalTok{iris_scatter }\OperatorTok{+}
\StringTok{ }\KeywordTok{labs}\NormalTok{(}\DataTypeTok{title=}\StringTok{"Association Between Iris Petal measurements"}\NormalTok{,}
\DataTypeTok{x=}\StringTok{"Petal Length"}\NormalTok{, }\DataTypeTok{y=}\StringTok{"Petal Width"}\NormalTok{)}
\NormalTok{iris_scatter}
\end{Highlighting}
\end{Shaded}
\includegraphics{figures/unnamed-chunk-5-1.pdf}
Now to add some colors, shapes etc to the point. Look at the
\texttt{geom\_point()} documentation for this.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{iris_scatter <-}\StringTok{ }\NormalTok{iris_scatter }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_point}\NormalTok{(}\KeywordTok{aes}\NormalTok{(}\DataTypeTok{color=}\NormalTok{Species, }\DataTypeTok{shape=}\NormalTok{Species),}\DataTypeTok{size=}\DecValTok{2}\NormalTok{)}
\NormalTok{iris_scatter}
\end{Highlighting}
\end{Shaded}
\includegraphics{figures/unnamed-chunk-6-1.pdf}
You'll notice we used \texttt{aes()} again, but this time inside of the
geometry. This tells ggplot2 that this aes only applies to the points.
Other geometries will not be affected by this.
In short, this is much easier than using base. Now \texttt{ggplot2}
really shines when you want to add stats (regression lines, intervals,
etc.).
Lets add a loess line with 95\% confidence intervals
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{iris_scatter_loess <-}\StringTok{ }\NormalTok{iris_scatter }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{(}\DataTypeTok{method =} \StringTok{"loess"}\NormalTok{)}
\NormalTok{iris_scatter_loess}
\end{Highlighting}
\end{Shaded}
\includegraphics{figures/unnamed-chunk-7-1.pdf}
Try that in \texttt{base} with so little code!
Or we could add a linear regression line with:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{iris_scatter_lm <-}\StringTok{ }\NormalTok{iris_scatter }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{(}\DataTypeTok{method=}\StringTok{"lm"}\NormalTok{)}
\NormalTok{iris_scatter_lm}
\end{Highlighting}
\end{Shaded}
\includegraphics{figures/unnamed-chunk-8-1.pdf}
And if we are interested in the regressions by group we could do it this
way.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{iris_scatter_lm_group <-}\StringTok{ }\NormalTok{iris_scatter }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{(}\DataTypeTok{method=}\StringTok{"lm"}\NormalTok{, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{group=}\NormalTok{Species))}
\NormalTok{iris_scatter_lm_group}
\end{Highlighting}
\end{Shaded}
\includegraphics{figures/unnamed-chunk-9-1.pdf}
Or, if we wanted our regression lines to match the color.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{iris_scatter_lm_color <-}\StringTok{ }\NormalTok{iris_scatter }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{(}\DataTypeTok{method=}\StringTok{"lm"}\NormalTok{, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{color=}\NormalTok{Species))}
\NormalTok{iris_scatter_lm_color}
\end{Highlighting}
\end{Shaded}
\includegraphics{figures/unnamed-chunk-10-1.pdf}
Notice, that we specified the \texttt{aes()} again, but for
\texttt{geom\_smooth()}. We only specified the x and y in the original
\texttt{ggplot} object, so if want to do something different in the
subsequent functions we need to overwrite it for the function in which
we want a different mapping (i.e.~groups).
In short, some of the initial setup for ggplot is a bit more verbose
than base R, but when we want to do some more complex plots it is much
easier in \texttt{ggplot2}.
Before we get into another exercise, lets look at some of the other
geometries. The best place to do this is excellent \texttt{ggplot2}
documentation of the \href{http://docs.ggplot2.org/current/}{geom
functions}.
\hypertarget{example-explained}{%
\subsection{Example explained}\label{example-explained}}
Now that we have the basics of \texttt{ggplot2} down, let's take a
closer look at our example in \texttt{nla\_analysis.R}.
\hypertarget{excercise-4.1}{%
\subsection{Excercise 4.1}\label{excercise-4.1}}
For this exercise we will work on creating a new plot from scratch. One
of the concepts I hope to get across is that creating a plot is as much
knowing data manipulation as it is knowing the details of your plotting
system (\texttt{ggplot2} in our case). Add some new code at the end of
our \texttt{nla\_anlaysis.R} that does the following
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Create a new data frame with the state by state average of total
nitrogen, total phosphorus, and chlorophyll \emph{a}
\item
Using this newly created data frame, plot mean total nitrogen on the
x-axis, mean total phosphorus on the y-axis, and size and color the
points based on the chlorophyll (extra credit if you log transform
these data)
\item
Try to use \texttt{ggplotly} from the \texttt{plotly} package to
create an interactive version of this plot.
\item
Don't forget to comment your code
\end{enumerate}
This will be a challenging excercise as it includes nearly all of the
tidyverse components we have talked about.
\end{document}
| {
"alphanum_fraction": 0.7538393791,
"avg_line_length": 38.3270042194,
"ext": "tex",
"hexsha": "78c9ec52b7125710a00d4af34fb0213e6be62974",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f4e5b4af98da45d4ddeeb331a1b241a2ed041a15",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "jhollist/kdhe_region7_r",
"max_forks_repo_path": "lessons/04_data_viz_with_ggplot2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f4e5b4af98da45d4ddeeb331a1b241a2ed041a15",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "jhollist/kdhe_region7_r",
"max_issues_repo_path": "lessons/04_data_viz_with_ggplot2.tex",
"max_line_length": 206,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f4e5b4af98da45d4ddeeb331a1b241a2ed041a15",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "jhollist/kdhe_region7_r",
"max_stars_repo_path": "lessons/04_data_viz_with_ggplot2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5655,
"size": 18167
} |
\section{Conclusions and Future Work} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\label{sec:cc}
Type switching is an open alternative to the visitor design pattern that overcomes
the restrictions, inconveniences, and difficulties in teaching and using
visitors. Our implementation significantly
outperforms the visitor design pattern in most cases and roughly equals it otherwise.
This is the case even though we use a library implementation and highly optimized
production-quality compilers. An important benefit of our solution is that it does not
require any changes to the \Cpp{} object-model or require any computations at load
time.
To provide a complete solution, we use the same syntax for closed sets of types, where our
performance roughly equals the equivalent built-in features in functional languages,
such as Haskell and OCaml.
We prove the uniqueness of vtbl-pointers in the presence of RTTI. This is
potentially useful in other compiler optimizations that depend on the
identity of subobjects. Our memoization device can also become valuable in
optimizations that require mapping run-time values to execution paths,
and is especially useful in library setting.
%We describe three techniques that can be used to implement type switching, type
%testing, pattern matching, predicate dispatching, and other facilities that
%depend on the run-time type of an argument as well as demonstrate their efficiency.
%
%The \emph{Memoization Device} is an optimization technique that maps run-time values
%to execution paths, allowing to take shortcuts on subsequent runs with the same
%value. The technique does not require code duplication and in typical cases adds
%only a single indirect assignment to each of the execution paths. It can be
%combined with other compiler optimizations and is particularly suitable for use
%in a library setting.
%
%The \emph{Vtable Pointer Memoization} is a technique based on memoization device that
%employs uniqueness of virtual table pointers to not only speed up execution, but
%also properly uncover the dynamic type of an object. This technique is a
%backbone of our fast type switch as well as memoized dynamic cast optimization.
%
%The \emph{TPL Dispatcher} is yet another technique that can be used to
%implement best-fit type switching on tagged classes. The technique has its pros
%and cons in comparison to vtable pointer memoization, which we discuss in the paper.
%
%These techniques can be used in a compiler and library setting, and support well
%separate compilation and dynamic linking. They are open to class extensions and
%interact well with other \Cpp{} facilities such as multiple inheritance and
%templates. The techniques are not specific to \Cpp{} and can be adopted in other
%languages for similar purposes.
%
%Using these techniques, we implemented a library for efficient type switching
%in \Cpp{}. We used it to rewrite a code that relied heavily on
%visitors, and discovered that the resulting code became much shorter, simpler,
%and easier to maintain and comprehend.
| {
"alphanum_fraction": 0.7751592357,
"avg_line_length": 59.2452830189,
"ext": "tex",
"hexsha": "3a1cfbf2d2c8d84b5a90df0b44af221df41a113a",
"lang": "TeX",
"max_forks_count": 108,
"max_forks_repo_forks_event_max_datetime": "2021-11-18T11:06:59.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-02-13T17:39:07.000Z",
"max_forks_repo_head_hexsha": "eef288eb9fe59712ff153dd70791365391b7b118",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "akrzemi1/Mach7",
"max_forks_repo_path": "media/papers/TypeSwitch/WG21/sec-conclusions.tex",
"max_issues_count": 62,
"max_issues_repo_head_hexsha": "eef288eb9fe59712ff153dd70791365391b7b118",
"max_issues_repo_issues_event_max_datetime": "2021-11-14T22:02:14.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-12T07:59:17.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "akrzemi1/Mach7",
"max_issues_repo_path": "media/papers/TypeSwitch/WG21/sec-conclusions.tex",
"max_line_length": 97,
"max_stars_count": 1310,
"max_stars_repo_head_hexsha": "eef288eb9fe59712ff153dd70791365391b7b118",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "akrzemi1/Mach7",
"max_stars_repo_path": "media/papers/TypeSwitch/WG21/sec-conclusions.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-18T04:44:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-04T03:44:04.000Z",
"num_tokens": 626,
"size": 3140
} |
\documentclass{article}
\usepackage[fancyhdr,pdf]{latex2man}
\input{common.tex}
\begin{document}
\begin{Name}{3}{unw\_init\_remote}{David Mosberger-Tang}{Programming Library}{unw\_init\_remote}unw\_init\_remote -- initialize cursor for remote unwinding
\end{Name}
\section{Synopsis}
\File{\#include $<$libunwind.h$>$}\\
\Type{int} \Func{unw\_init\_remote}(\Type{unw\_cursor\_t~*}\Var{c}, \Type{unw\_addr\_space\_t~}\Var{as}, \Type{void~*}\Var{arg});\\
\section{Description}
The \Func{unw\_init\_remote}() routine initializes the unwind cursor
pointed to by \Var{c} for unwinding in the address space identified by
\Var{as}. The \Var{as} argument can either be set to
\Var{unw\_local\_addr\_space} (local address space) or to an arbitrary
address space created with \Func{unw\_create\_addr\_space}().
The \Var{arg} void-pointer tells the address space exactly what entity
should be unwound. For example, if \Var{unw\_local\_addr\_space} is
passed in \Var{as}, then \Var{arg} needs to be a pointer to a context
structure containing the machine-state of the initial stack frame.
However, other address-spaces may instead expect a process-id, a
thread-id, or a pointer to an arbitrary structure which identifies the
stack-frame chain to be unwound. In other words, the interpretation
of \Var{arg} is entirely dependent on the address-space in use;
\Prog{libunwind} never interprets the argument in any way on its own.
Note that \Func{unw\_init\_remote}() can be used to initiate unwinding
in \emph{any} process, including the local process in which the
unwinder itself is running. However, for local unwinding, it is
generally preferable to use \Func{unw\_init\_local}() instead, because
it is easier to use and because it may perform better.
\section{Return Value}
On successful completion, \Func{unw\_init\_remote}() returns 0.
Otherwise the negative value of one of the error-codes below is
returned.
\section{Thread and Signal Safety}
\Func{unw\_init\_remote}() is thread-safe. If the local address-space
is passed in argument \Var{as}, this routine is also safe to use from
a signal handler.
\section{Errors}
\begin{Description}
\item[\Const{UNW\_EINVAL}] \Func{unw\_init\_remote}() was called in a
version of \Prog{libunwind} which supports local unwinding only
(this normally happens when defining \Const{UNW\_LOCAL\_ONLY} before
including \File{$<$libunwind.h$>$} and then calling
\Func{unw\_init\_remote}()).
\item[\Const{UNW\_EUNSPEC}] An unspecified error occurred.
\item[\Const{UNW\_EBADREG}] A register needed by \Func{unw\_init\_remote}()
wasn't accessible.
\end{Description}
\section{See Also}
\SeeAlso{libunwind(3)}, \SeeAlso{unw\_create\_addr\_space(3)},
\SeeAlso{unw\_init\_local(3)}
\section{Author}
\noindent
David Mosberger-Tang\\
Email: \Email{[email protected]}\\
WWW: \URL{http://www.nongnu.org/libunwind/}.
\LatexManEnd
\end{document}
| {
"alphanum_fraction": 0.7564766839,
"avg_line_length": 36.1875,
"ext": "tex",
"hexsha": "9b4dc7997ae61fa9e46b69095a0949fd136cde90",
"lang": "TeX",
"max_forks_count": 3629,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T21:52:28.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-25T23:29:16.000Z",
"max_forks_repo_head_hexsha": "72bee25ab532a4d0636118ec2ed3eabf3fd55245",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pyracanda/runtime",
"max_forks_repo_path": "src/coreclr/pal/src/libunwind/doc/unw_init_remote.tex",
"max_issues_count": 37522,
"max_issues_repo_head_hexsha": "72bee25ab532a4d0636118ec2ed3eabf3fd55245",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T23:58:30.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-11-25T23:30:32.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pyracanda/runtime",
"max_issues_repo_path": "src/coreclr/pal/src/libunwind/doc/unw_init_remote.tex",
"max_line_length": 155,
"max_stars_count": 12278,
"max_stars_repo_head_hexsha": "72bee25ab532a4d0636118ec2ed3eabf3fd55245",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pyracanda/runtime",
"max_stars_repo_path": "src/coreclr/pal/src/libunwind/doc/unw_init_remote.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z",
"num_tokens": 846,
"size": 2895
} |
\chapter{Appendix Numerical Software}
Computers are getting more powerful over time but size of the problems we're solving scales with the increased performance. Tools for acquiring and storing data are improving at an even faster pace than processors. It turns out the communication is the real bottleneck to scaling many algorithms. The capacity of fast memory close to the computing resource (cache) grows very slowly in time. Commodity hardware is now being used to deploy large distributed computing systems running Apache Hadoop and Spark. The possibility for computing on ever larger data sets is tantalizing for the machine learning community.
These notes discuss the a number of numerical computing libraries the author has used in various machine learning and image processing projects. There is a lot of effort underway to integrate and distribute some of the algorithms in these libraries to the Hadooop ecosystem.
Many of the libraries discussed here are available in Matlab. Intel MLK \& IPP, Arpack, UMFPACK, and SDPA have been integrated in the autor's klMatrix library see the section on the KL Libraries below for specifics.
\section*{BLAS MLK \& Atlas}
\section*{Graphviz - Graph Visualization Software}
Graphviz ( http://www.graphviz.org ) is open source graph visualization software. Graph visualization is a way of representing structural information as diagrams of abstract graphs and networks. It has important applications in networking, bioinformatics, software engineering, database and web design, machine learning, and in visual interfaces for other technical domains.
Graphviz is used to generate collaboration, inheritance, and call diagrams the KL documentation. There is an API that is used in the KL framework to facilitate graph visualization.
\section*{ARPACK}
ARPACK++ is an object-oriented version of the Fortran ARPACK package. ARPACK is designed to compute a few eigenvalues and eigenvectors of large scale sparse matrices and pencils via the Arnoldi process for finding eigenvalues called. These methods utilize Krylov Subspace Projections for iterative solution that avoids matrix multiplication. ARPACK implements the implicit restarted Arnoldi method which reduces the storage requirements of the traditional Lanczos iteration for Hermitian matrices and Arnoldi iteration for general matrices. The key to the Krylov method is to calculate the linear subspace of $\Real^{(n,n)}$ induced by span of the first m powers of the image of $b$ under a linear operator $A$, $\kappa_m(A,b) | A \in \mathbb R^{(n,n)}
b\ in \mathbb R^n = \{b, Ab (A)^2b, \ldots (A)^mb \}$. This avoids direct matrix matrix operations when finding the first few eigenvector, eigenvalue pairs in a large system of linear
equations.
\section*{ATLAS}
Automatically Tuned Linear Algebra software.
\section*{METIS}
METIS is a software library for finite element analysis and graph partitions. It also can be used to reduce the fill order of
sparse matrices.
\section*{SDPA}
SDPA is a software library for solving SDPs using on the Mehrotra-type predictor-corrector infeasible primal-dual interior-point method. It is implemented C++ language and utilizes the machine dependent BLAS such as Intel MKL, ATLAS. LAPACK routines are used for matrix computations. Efficient methods to compute the search directions exploiting the sparsity of the data matrices are implemented. Sparse or dense Cholesky factorization for the Schur complemetn matrix is automatically selected. The calculation of the Schur complement
matrix is implemented in reentrant code. A sparse version of SDPA is available that uses METIS and SPOOLES libraries for finding a proper sparse structure of the problem.
\section*{SPOOLS}
SPOOLES is a library for solving sparse real and complex linear systems of equations. SPOOLES can factor and solve square linear systems of equations with symmetric structure, and it can compute multiple minimum degree, generalized nested dissection and multisection orderings of matrices with symmetric structure. SPOOLES utilizes a variety of Krylov iterative methods. The preconditioner is a drop tolerance factorization.
\section*{SuperLU}
SuperLU ( http://crd-legacy.lbl.gov/~xiaoye/SuperLU/) is a general purpose library for the direct solution of large, sparse, nonsymmetric systems of linear equations on high performance machines. The library is written in C and is callable from either C or Fortran. The library routines will perform an LU decomposition with partial pivoting and triangular system solves through forward and back substitution. The LU factorization routines can handle non-square matrices but the triangular solves are performed only for square matrices. The matrix columns may be preordered (before factorization) either through library or user supplied routines. This pre-ordering for sparsity is completely separate from the factorization. Working precision iterative refinement subroutines are provided for improved backward stability. Routines are also provided to equilibrate the system, estimate the condition number, calculate the relative backward error, and estimate error bounds for the refined solutions.
\section*{SuiteSparse}
Tim Davis' ( http://www.cise.ufl.edu/~davis/welcome.html) collection of sparse matrix software. Tim is also the curator of The University of Florida Sparse Matrix Collection (http://www.cise.ufl.edu/research/sparse/matrices/) a must see for anyone interested in sparse
matrices and visualization.
AMD: symmetric approximate minimum degree
BTF: permutation to block triangular form
CAMD: symmetric approximate minimum degree
CCOLAMD: constrained column approximate minimum degree
COLAMD: column approximate minimum degree
CHOLMOD: sparse supernodal Cholesky factorization and update/downdate
CSparse: a concise sparse matrix package
CXSparse: an extended version of CSparse
KLU: sparse$ LU$ factorization, for circuit simulation
LDL: a simple $LDL^T$ factorization
UMFPACK: sparse multifrontal $LU$ factorization
RBio: MATLAB toolbox for reading/writing sparse matrices
UFconfig: common configuration for all but CSparse
SuiteSparseQR: multifrontal sparse $QR$
\section*{SuiteSparse AMD}
AMD is a set of routines for pre-ordering a sparse matrix prior to numerical factorization. It uses an approximate minimum degree ordering algorithm to find a permutation matrix P so that the Cholesky factorization $PAP^\dag =LL^\dag$ has fewer (often much fewer) nonzero entries than the Cholesky factorization of A. The algorithm is typically much faster than other ordering methods and minimum degree ordering algorithms that compute an exact degree . Some methods, such as approximate deficiency [Rothberg and Eisenstat 1998] and graph-partitioning based methods [Hendrickson and Rothberg 1999; Karypis and Kumar 1998; Pellegrini et al. 2000; Schulze 2001] can produce better orderings, depending on the matrix. The algorithm starts with an undirected graph representation of a symmetric sparse matrix . Node $i$ in the graph corresponds to row and column i of the matrix, and there is an edge $(i,j)$ in the graph if $a_{ij}$ is nonzero. The degree of a node is initialized to the number of off diagonal non-zeros in row $i$, which is the size of the set of nodes adjacent to $i$ in the graph.
\section*{SuiteSparse UMFPACK}
UMFPACK is a set of routines for solving systems of linear equations, $Ax = b$, when $A$ is sparse and unsymmetric. It is based on the Unsymmetric-pattern MultiFrontal method. UMFPACK factorizes $PAQ$, $PRAQ$ and $PR^{-1}AQ$, into the product $LU$, where $L$ and $U$ are lower and upper triangular, respectively, $P$ and $Q$ are permutation matrices, and $R$ is a diagonal matrix of row scaling factors (or $R = I$ if row-scaling is not used). Both $P$ and $Q$ are chosen to reduce fill-in (new nonzeros in $L$ and $U$ that are not present in $A$). The permutation $P$ has the dual role of reducing fill-in and maintaining numerical accuracy (via relaxed partial pivoting and row interchanges). The sparse matrix $A$ can be square or rectangular, singular or non-singular, and real or complex (or any combination). Only square matrices $A$ can be used to solve $Ax = b$ or related systems. Rectangular matrices can only be factorized.
\section*{fftw}
fftw is a highly optimized library for calculating the discrete Fourier transform. \cite{G_ieeetrans_FFTW}. Generally fftw performs better than Intel IPP. fftw can be sped up by compiling with the Intel Compiler.
\section*{Simulation and modeling with the kl Software
Framework}Class, interaction and collaboration diagrams are presented below for a modeling framework implemented by the author. The framework is implemented in C++. The simulation of various univariate and multivariate random number generators along with the distribution tests from the CDHC library are included as well.
Features of this framework include:
\begin{itemize}
\item utilizing optimized BLAS libraries
\item the up to date methods for univariate random number generation
\item wrappers for Intel performance primitives and GSL
\item multiple memory management facilities
\end{itemize}
We will use the term $EPDF_X$ to mean the empirical probability density function. There are a variety of univariate tests to help determine which parametric distribution your data belongs to. These fall under the category of Goodness of Fit testing. For a parametric family the null hypothesis $H_o : X=_d p(x| \theta)$ is tested against the alternative that $X$ does not belong to the family $p(x|\theta)$ There are also family of test to determine whether two $EPDF$'s come from the same distribution. | {
"alphanum_fraction": 0.8065753993,
"avg_line_length": 130.2972972973,
"ext": "tex",
"hexsha": "bc4be93a72aa180797848598b71b3a1b0a22f02b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6c5229ef7b943455a4e890f0ec62764adf9a2c40",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "brucebcampbell/machine-learning-notes",
"max_forks_repo_path": "Software.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6c5229ef7b943455a4e890f0ec62764adf9a2c40",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "brucebcampbell/machine-learning-notes",
"max_issues_repo_path": "Software.tex",
"max_line_length": 1098,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6c5229ef7b943455a4e890f0ec62764adf9a2c40",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "brucebcampbell/machine-learning-notes",
"max_stars_repo_path": "Software.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2108,
"size": 9642
} |
\documentclass[12pt,letterpaper]{article}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{pdfpages}
\usepackage[left=1in, right=1in, top=0.5in, bottom=1in]{geometry}
\title{ChessAce: Problem Statement}
\author{Team MIF(G18): Jerry Ke, Mengshan Cui, Harry Fu}
\date{}
\begin{document}
\maketitle
\section{Team meeting plan}
Team meeting shall happen every Tuesday, Wednesday in ITB 236, and Friday in Thode library. If it is necessary, an additional weekend meeting will be announced at least twenty-four hour ahead. The weekend meeting will be on either Saturday or Sunday based on discussion.\\
One dedicated person shall record the discussion topics, ideas, conflicts, and decision happen or made during the meeting. One dedicated person shall prepare the detailed agendas that need to discuss, place items in the order of priority and estimate real time of each topic. Roles can switch among the team members via discussion.\\
\section{Rules of agendas}
$\bullet$ Record of the meeting shall be kept till the end of the project development.\\
$\bullet$ Late attendance is forgivable if it is in 15 minutes after the decided meeting time. A warning should be given after 15 minutes but before 30 minutes. The second warning shall be recorded. Any late attendance after 30 minutes or a complete absence shall be recorded as well. If the late attendance or absence is caused by force majeure, then the recorded may be erased after discussion.\\
$\bullet$ All members have the responsibility to be physically and mentally present.\\
$\bullet$ All members have the responsibility to stop any irrational, emotional conflicts or personal attacks happen immediately during the meeting.\\
$\bullet$ Every meeting has to end with written statement or decision made. Leave ahead must come with a valid reason.\\
$\bullet$ At the end of the meeting, everyone should understand their deliverable for the next meeting.\\
\section{Team member roles}
$\bullet$ Team leader: Jerry Ke\\
$\bullet$ Software Developer: Harry Fu, Jerry Ke, Mengshan Cui\\
$\bullet$ Negotiator: Harry Fu\\
$\bullet$ Documentation: Mengshan Cui\\
$\bullet$ Revisor \& Latex Formatting: Jerry Ke, Mengshan Cui, Harry Fu\\
$\bullet$ Git Management: Harry Fu\\
\section{Team communication plan}
In-campus we will communicate face to face at Thode library to work together. Off-campus, we are going to use a social media application called “WeChat” to share all the work details, important files and talk about meeting time. If any particular situation happened first we will talk on WeChat if no one response we will use the phone call. All the documentation will be posted and edited on the Google doc then post a Latex version on Gitlab. All programming works will be posted and edited on the Gitlab, team members can access Gitlab and create branches to work on them.\\
\section{Git workflow plan}
The development of ChessMaster will happen on Gitlab. The origin/master branch will be the centralized branch which aims to stable code release. Right now, origin/develop and origin/release branch is necessary for the development. Develop branch should contain the evolving codes that are not stable enough. Release branch should provide the stable codes ready to merge back to master branch, but minor bug fixes may happen before actual release. At this point, September 28th, Git management will not consider Feature and Hotfix branch, since the structure and number of features of the project are not as sophisticated as most of the other MMO or RPG games. Team MIF will use tags to mark major document or code updates, which correspond to the milestones established at the beginning of the semester.\\
\section{Proof of concept demonstration plan}
Even though chess is a relative simple project to implement, there are still many existing risks that Team MIF has to overcome. Risks separates into two categories, which are implementation and tests. Implementation risk mainly associate with “attack-range” detection, which prevents king from suiciding. \\
Because there is no test implementation in the original ChessOOP project, we have to design all the test plans by ourselves. The hard part in software testing is we have to consider all expected output for our program. However, since chess involves tremendous amount of variety, it is never impossible to include all the scenarios.\\
To show the above risks can be overcomed, Team MIF plans to implement a preliminary prototype with fundamental functions with minimal bugs and complete chess functions. GUI may not be included in the first POC demonstration, but will be added later on.\\
\section{Technology}
$\bullet$ Programming language: based on Java\\
$\bullet$ IDE: Microsoft Visual Studio/Eclipse/Atom\\
$\bullet$ Development platform: Gitlab\\
$\bullet$ Work flow chart: GanttProject\\
$\bullet$ Documentation: Latex and PDF file\\
\section{Coding style}
The project will refer to Google Java Style with some changes. Tab character will be used instead of 2 or 4 spaces for indentation between continuation lines and contents of a block. Multiple variables will be permitted in one variable declaration (int a, b), and local variables will be declared at the start of their containing block or block-like construct, not declared when needed. Identifier expression may use special prefixes or suffixes which is not allowed in Google Java Style.\\
\section{Project schedule}
Refer to ChessAce\_team18.gan/pdf under the same directory. Screenshots below.
\includepdf[pages=-]{ChessAce_team18.pdf}
\section{Project review (for Revision 1)}
\end{document} | {
"alphanum_fraction": 0.7966011683,
"avg_line_length": 94.15,
"ext": "tex",
"hexsha": "1ab0ffb19222b00255bde40620e579488d1fed26",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c16e3089ffa5635b1b37dd6f02000a9804b6b07b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "JryXK/ChessAce",
"max_forks_repo_path": "Doc/Rev1/DevelopmentPlan/DevPlan.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c16e3089ffa5635b1b37dd6f02000a9804b6b07b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JryXK/ChessAce",
"max_issues_repo_path": "Doc/Rev1/DevelopmentPlan/DevPlan.tex",
"max_line_length": 805,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "c16e3089ffa5635b1b37dd6f02000a9804b6b07b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "JryXK/ChessAce",
"max_stars_repo_path": "Doc/Rev1/DevelopmentPlan/DevPlan.tex",
"max_stars_repo_stars_event_max_datetime": "2019-02-20T15:48:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-20T15:48:24.000Z",
"num_tokens": 1232,
"size": 5649
} |
\chapter{Luca}
\restartlist{enumerate}
\liteversiondetermination{Exclude}{%
\begin{enumerate}
\item \sd, go right and up to the next screen, \cs[2:30]. Don't save.
\item \sd\ in locker room. Don't do the tutorial. \sd by mashing another button (like \textbf{R1}) at the same time as confirm, walk down, \sd
\item Walk down to next screen, \sd. Whistle \cs[0:30], walk right to next screen.
\item \sd, run to the cafe. \sd, \skippablefmv+\cs[1:20], \sd
\item Run left to next screen, then left to the docks.
\item Talk to O'aka.
\end{enumerate}
}
\liteversiondetermination{Include}{%
\begin{enumerate}
\item Talk to O'aka on the first docks screen, before going into the Machina Fights. Do the following shop:
\end{enumerate}
}
\begin{shop}{10890}
\begin{itemize}
\item Sell
\begin{itemize}
\item All Weapons and Armor, including longsword.
\end{itemize}
\item Buy
\begin{itemize}
\item Stunning Steel, Equip
\end{itemize}
\item If you have 1100 gil left over, lend O'aka 1100 gil.
\end{itemize}
\end{shop}
\begin{enumerate}[resume]
\item Walk up after finishing with O'aka and grab the Chest on the north side of the dock.
\item Run to the next screen.
\end{enumerate}
\begin{battle}{Machina}
\begin{itemize}
\item \textit{For the first two encounters:}
\begin{itemize}
\tidusf Defend
\kimahrif Defend
\luluf Thunder
\end{itemize}
\item \textit{For the third encounter:}
\begin{itemize}
\item \textit{First Wave}
\begin{itemize}
\tidusf Attack
\kimahrif Attack
\luluf Thunder a different Machina
\tidusf Attack
\kimahrif \od\ Seed Cannon \textit{if no crits else} Attack
\end{itemize}
\item \textit{Second Wave}
\begin{itemize}
\tidusf Defend
\kimahrif Defend
\luluf Thunder
\end{itemize}
\item \textit{Third Wave}
\begin{itemize}
\tidusf Attack
\kimahrif Attack or \od\ Seed Canon
\luluf Thunder a different Machina
\end{itemize}
\end{itemize}
\end{itemize}
\end{battle}
\liteversiondetermination{Exclude}{%
\begin{enumerate}[resume]
\item If anyone is Critical HP, use Potions.
\item Run right.
\end{enumerate}
}
\begin{battle}[3000]{Oblitzerator}
\begin{itemize}
\kimahrif Defend
\tidusf Defend
\luluf Thunder Crane x3
\tidusf Use Crane after 3 Thunders
\kimahrif Defend
\luluf Thunder
\tidusf Attack
\end{itemize}
Check for \textbf{Lightning Steel, Thunder Ball}
\end{battle}
\liteversiondetermination{Exclude}{%
\begin{enumerate}[resume]
\item \cs[2:00], \sd\ during and after Blitzball game.
\end{enumerate}
}
\vfill
\begin{equip}
\begin{itemize}
\item \textit{If you got Lightning Steel}
\begin{itemize}
\tidusf Lightning Steel
\end{itemize}
\item \textit{If you got Thunder Ball}
\begin{itemize}
\wakkaf Thunder Ball
\end{itemize}
\end{itemize}
\end{equip}
\liteversiondetermination{Exclude}{%
\begin{enumerate}[resume]
\item Run South for the next two screens. \save. Go up the stairs to the locker room, \sd
\item Go back into locker room, speak to \wakka, \sd, \cs[1:20]. \sd\ after \lulu\ scene. \cs[1:40] on Auron Entrance.
\end{enumerate}
\vfill
\
\columnbreak
\ \newline
\begin{blitzball}
\begin{itemize}
\item \textbf{First Half:}
\begin{itemize}
\item \textit{If Luca wins the Blitzoff:}
\begin{itemize}
\item Triangle, switch the mode to \textbf{Mark Mode}, and then \textbf{Left Side}
\end{itemize}
\item \textit{When you get the ball:}
\begin{itemize}
\item Change to \textbf{Manual A} and \textbf{Normal Mode}
\item down some, pass the ball to \tidus
\tidusf Swim next to Jassu, pass to Jassu
\item Hide behind the Goalie
\item If you aggroed a Goer, Swim Around
\end{itemize}
\end{itemize}
\item \sd\ during half time
\item \textbf{Second Half:}
\begin{itemize}
\item \textit{If Luca wins the Blitzoff:}
\begin{itemize}
\item Triangle, switch the mode to \textbf{Mark Mode}, and then \textbf{Right Side}
\end{itemize}
\item \textit{When you get the ball:}
\item Pass to Jassu if he doesn't have it
\item Swim to the Bottom Middle
\item Wait until 2:20, if Abus Aggros then Break
\item Swim to the Left, aggro Balgerda (bottom player), then swim back some
\item Pass to \tidus\ before Balgerda gets in range to block
\tidusf Swim close to the Goal and Sphere Shot before anyone is close enough to block
\begin{itemize}
\item If 1 Defender and 2:49, Sphere Shot over the Defender
\item Otherwise, Break and Sphere Shot
\item If 2 Defenders, Break 1, Sphere Shot
\end{itemize}
\item \sd\ during \wakka\ \cs
\item If you need to Score or it's 1-1, then do the same as above with Jassu
\item Wait until 4:20 then aggro Balgerda, Pass to \wakka
\wakkaf swim close and Venom Shot, or Break, Venom Shot
\end{itemize}
\item Don't try to score in the First Half
\item If you're losing, Change to \textbf{Mark Mode} and lose the game.
\end{itemize}
\end{blitzball}
\begin{enumerate}[resume]
\item \sd, Don't Save, \cs[1:00]
\end{enumerate}
}
\begin{battle}{Sahagin Chief}
\begin{itemize}
\tidusf Attack
\wakkaf Attack
\wakkaf Hi-potion anyone who falls below 200 HP
\end{itemize}
\end{battle}
\liteversiondetermination{Exclude}{%
\begin{enumerate}[resume]
\item \sd, \skippablefmv
\end{enumerate}
}
\begin{battle}[1800]{Garuda}
\begin{itemize}
\tidusf Attack
\wakkaf Dark Attack
\auronf Attack
\wakkaf Attack
\tidusf Spiral Cut on 3rd turn, if available
\tidusf Attack
\end{itemize}
\end{battle}
\liteversiondetermination{Exclude}{%
\begin{enumerate}[resume]
\item \cs+\skippablefmv[1:30]. Don't save. \sd\ the Auroch scene (PS2 only)
\item \cs[4:50]. Run north to the hidden chests, \pickup{Magic and HP Sphere}
\item Run South and try to speak to \auron\ while he's walking away.
\item Follow red arrow to \yuna. \sd\ during guardian scene. Walk to \yuna, \cs[4:20]
\end{enumerate}
}
\liteversiondetermination{Include}{%
\begin{enumerate}[resume]
\item \pickup{Magic and HP Sphere}
\end{enumerate}
} | {
"alphanum_fraction": 0.635686625,
"avg_line_length": 34.2551020408,
"ext": "tex",
"hexsha": "e5ff8df8fddfeb5334801ea7a21dff5ee5775bfc",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-07-28T03:02:16.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-07-28T03:02:16.000Z",
"max_forks_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_forks_repo_path": "Final Fantasy X/Chapters_NSG/007_luca.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_issues_repo_path": "Final Fantasy X/Chapters_NSG/007_luca.tex",
"max_line_length": 143,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_stars_repo_path": "Final Fantasy X/Chapters_NSG/007_luca.tex",
"max_stars_repo_stars_event_max_datetime": "2021-11-04T01:45:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-04T01:45:47.000Z",
"num_tokens": 2061,
"size": 6714
} |
\section{PyLith Application (\protect\object{PyLithApp})}
The top-level object is the PyLith application with three facilities:
\begin{inventory}
\facilityitem{mesher}{Importer for the finite-element mesh;}
\facilityitem{problem}{Problem to run, such as the materials, boundary conditions, etc.; and}
\facilityitem{petsc}{PETSc settings}
\end{inventory}
\subsection{Mesh Information (\facility{mesher})}
Geometrical and topological information for the finite element mesh
may be provided by exporting an Exodus II format file from
CUBIT/Trelis, by exporting a GMV file and an accompanying Pset file
from LaGriT, or by specifying the information in PyLith mesh ASCII
format. See Chapter \vref{cha:examples} for examples.
PyLith supports linear cells in 2D (Figure \vref{fig:2D:cells}), and
3D (Figure \vref{fig:3D:cells}). The vertex ordering must follow the
convention shown in Figures \vref{fig:2D:cells}-\vref{fig:3D:cells}.
PyLith no longer supports use of quadratic cells using the PyLith
ASCII mesh format. In the next release, we plan to support higher
order discretizations via PETSc finite-element features from meshes
with linear cells as input.
The mesh information defines the vertex coordinates and specifies
the vertices composing each cell in the mesh. The mesh information
must also define at least one set of vertices for which displacement
(Dirichlet) boundary conditions will be provided. In most realistic
problems, there will be several vertex groups, each with a unique
identifying label. For example, one group might define a surface of
the mesh where displacement (Dirichlet) boundary conditions will be
applied, another might define a surface where traction (Neumann) boundary
conditions will be applied, while a third might specify a surface
that defines a fault. Similarly, the mesh information contains cell
labels that define the material type for each cell in the mesh. For
a mesh with a single material type, there will only be a single label
for every cell in the mesh. See Chapters \vref{cha:material:models}
and \vref{cha:boundary:interface:conditions} for more detailed discussions
of setting the materials and boundary conditions.
\begin{figure}[htbp]
\includegraphics[scale=0.6]{runpylith/figs/tri3}\hspace*{0.5in}%
\includegraphics[scale=0.6]{runpylith/figs/quad4}
\caption{Linear cells available for 2D problems are the triangle
(left) and the quadrilateral (right).}
\label{fig:2D:cells}
\end{figure}
\begin{figure}[htbp]
\includegraphics[scale=0.6]{runpylith/figs/tet4}\hspace*{0.5in}%
\includegraphics[scale=0.6]{runpylith/figs/hex8}
\caption{Linear cells available for 3D problems are the tetrahedron (left)
and the hexahedron (right).}
\label{fig:3D:cells}
\end{figure}
\subsubsection{\object{Mesh Importer}}
The default mesher component is \object{MeshImporter}, which provides
the capabilities of reading the mesh from files. The \object{MeshImporter} has
several properties and facilities:
\begin{inventory}
\propertyitem{reorder\_mesh}{Reorder the vertices and cells using the
reverse Cuthill-McKee algorithm (default is False)}
\facilityitem{reader}{Reader for a given type of mesh (default is
\object{MeshIOAscii}).}
\facilityitem{distributor}{Handles
distribution of the mesh among processors.}
\facilityitem{refiner}{Perform global uniform mesh refinement after
distribution among processors (default is no refinement).}
\end{inventory}
Reordering the mesh so that vertices and cells connected topologically
also reside close together in memory improves overall performance
and can improve solver performance as well.
\userwarning{The coordinate system associated with the mesh must be a
Cartesian coordinate system, such as a generic Cartesian coordinate
system or a geographic projection.}
\subsubsection{\object{MeshIOAscii}}
The \object{MeshIOAscii} object is intended for reading small, simple
ASCII files containing a mesh constructed by hand. We use this file
format extensively in the examples. Appendix \vref{sec:format:MeshIOAscii}
describes the format of the files. The properties and facilities of
the \object{MeshIOAscii} object include:
\begin{inventory}
\propertyitem{filename}{Name of the mesh file.}
\facilityitem{coordsys}{Coordinate system associated with the mesh.}
\end{inventory}
\subsubsection{\object{MeshIOCubit}}
\label{sec:MeshIOCubit}
The \object{MeshIOCubit} object reads the NetCDF Exodus II files output from
CUBIT/Trelis. Beginning with CUBIT 11.0, the names of the nodesets are included
in the Exodus II files and PyLith can use these nodeset names or revert
to using the nodeset ids. The properties and facilities associated
with the \object{MeshIOCubit} object are:
\begin{inventory}
\propertyitem{filename}{Name of the Exodus II file.}
\propertyitem{use\_nodeset\_names}{Identify nodesets by name rather than id
(default is True).}
\facilityitem{coordsys}{Coordinate system associated with the mesh.}
\end{inventory}
\subsubsection{\object{MeshIOLagrit}}
\label{sec:MeshIOLagrit}
The \object{MeshIOLagrit} object is used to read ASCII and binary GMV and PSET
files output from LaGriT. PyLith will automatically detect whether
the files are ASCII or binary. We attempt to provide support for experimental
64-bit versions of LaGriT via flags indicating whether the FORTRAN
code is using 32-bit or 64-bit integers. The \object{MeshIOLagrit} properties
and facilities are:
\begin{inventory}
\propertyitem{filename\_gmv}{Name of GMV file.}
\propertyitem{filename\_pset}{Name of the PSET file.}
\propertyitem{flip\_endian}{Flip the endian of values when reading
binary files (default is False).}
\propertyitem{io\_int32}{Flag
indicating that PSET files use 32-bit integers (default is True).}
\propertyitem{record\_header\_32bt}{Flag indicating FORTRAN record header is
32-bit (default is True).}
\facilityitem{coordsys}{Coordinate system associated with mesh.}
\end{inventory}
\userwarning{The PyLith developers have not used LaGriT since around 2008
and the most recent release appears to have been in 2010.}
\subsubsection{\object{Distributor}}
The distributor uses a partitioner to compute which cells should be
placed on each processor, computes the overlap among the processors,
and then distributes the mesh among the processors. The type of
partitioner is set via PETSc settings. The properties and facilities
of the \object{Distributor} include:
\begin{inventory}
\propertyitem{partitioner}{Name of mesh partitioner ['chaco','parmetis'].}
\propertyitem{write\_partition}{Flag indicating that the partition information
should be written to a file (default is False).}
\facilityitem{data\_writer}{Writer for partition information (default
is \object{DataWriterVTK} for VTK output).}
\end{inventory}
\begin{cfg}[\object{Distributor} parameters in a \filename{cfg} file]
<h>[pylithapp.mesh_generator.distributor]</h>
<p>partitioner</p> = chaco ; Options are 'chaco' (default) and 'parmetis'.
\end{cfg}
METIS/ParMETIS are not included in the PyLith binaries due to licensing
issues.
\subsubsection{\object{Refiner}}
The refiner is used to decrease node spacing by a power of two by
recursively subdividing each cell by a factor of two. In a 2D triangular
mesh a node is inserted at the midpoint of each edge, splitting each
cell into four cells (see Figure \vref{fig:uniform:refinement:2x}).
In a 2D quadrilateral mesh a node is inserted at the midpoint of each
edge and at the centroid of the cell, splitting each cell into four
cells. In a 3D tetrahedral mesh a node is inserted at the midpoint
of each edge, splitting each cell into eight cells. In a 3D hexahedral
mesh a node is inserted at the midpoint of each edge, the centroid
of each face, and at the centroid of the cell, splitting each cell
into eight cells.
\begin{figure}[htbp]
\includegraphics[scale=0.6]{runpylith/figs/refinement2x}
\caption{Global uniform mesh refinement of 2D and 3D linear
cells. The blue lines and orange circles identify the edges and
vertices in the original cells. The purple lines and green circles
identify the new edges and vertices added to the original cells to
refine the mesh by a factor of two.}
\label{fig:uniform:refinement:2x}
\end{figure}
Refinement occurs after distribution of the mesh among processors.
This allows one to run much larger simulations by (1) permitting the
mesh generator to construct a mesh with a node spacing larger than
that needed in the simulation and (2) operations performed in serial
during the simulation setup phase, such as, adjusting the topology
to insert cohesive cells and distribution of the mesh among processors
uses this much smaller coarse mesh. For 2D problems the global mesh
refinement increases the maximum problem size by a factor of $4^{n}$,
and for 3D problems it increases the maximum problem size by a factor
of $8^{n}$, where $n$ is the number of recursive refinement levels.
For a tetrahedral mesh, the element quality decreases with refinement
so $n$ should be limited to 1-2.
\subsection{Problem Specification (\protect\facility{problem})}
The problem component specifies the basic parameters of the simulation,
including the physical properties, the boundary conditions, and interface
conditions (faults). The current release of PyLith contains two types
of problems, \object{TimeDependent} for use in static, quasistatic,
and dynamic simulations and \object{GreensFns} for computing static
Green's functions. The general properties facilities include:
\begin{inventory}
\propertyitem{solver}{Type of solver to use ({\tt linear} or {\tt nonlinear});}
\facilityitem{solution}{Solution field;}
\facilityitem{normalizer}{Scales used to nondimensionalize the
problem (default is \object{NondimElasticQuasistatic});}
\facilityitem{materials}{Array of materials comprising the domain
(default is [material]);}
\facilityitem{bc}{Array of boundary conditions (default is none);}
\facilityitem{interfaces}{Array of interface conditions, i.e., faults
(default is none); and}
\facilityitem{gravity\_field}{Gravity field used to construct body
forces (default=\object{NullComponent});}
\end{inventory}
\begin{cfg}[Problem parameters in a \filename{cfg} file]
<h>[pylithapp.timedependent]</h>
<p>solver</p> = linear
<f>solution</f> = pylith.problems.SolnDisp
<f>normalizer</f> = spatialdata.units.NondimElasticQuasistatic
<f>materials</f> = [elastic, viscoelastic]
<f>bc</f> = [boundary_east, boundary_bottom, boundary_west]
<f>interfaces</f> = [SanAndreas, SanJacinto]
<f>gravity_field</f> = spatialdata.spatialdb.GravityField
\end{cfg}
The following sections discuss the \facility{solution} and
\facility{normalizer}. Materials, boundary conditions, and interface
conditions are discussed in Chapter \vref{cha:physics} and the gravity
field spatial database is discussed in Section
\vref{sec:gravity:field}.
\subsubsection{Solution Field (\facility{solution})}
The \facility{solution\_field} specifies the subfields of the solution
field along with their discretization. Table
\vref{tab:solution:containers} shows predefined containers for common
subfield collections. Users can create their own containers if they
add different material formulations.
\important{The order of the subfields within the solution field must
be consistent between the \object{Solution} field object and the
point-wise functions. The predefined containers are setup to help
ensure that this is true.}
\important{When a Lagrange multiplier for fault interfaces is
included, it should always be the last solution subfield. It is
special case, because it is discretization only on the cohesive
cells and not over the entire domain.}
\begin{table}[htbp]
\caption{Predefined containers for solution subfields.}
\label{tab:solution:containers}
\begin{tabular}{lll}
\toprule
\thead{Object} & \thead{Subfields} & \thead{Use Cases} \\
\midrule
\object{SolnDisp} & displacement & Elasticity w/o inertia and faults \\
\object{SolnDispVel} & displacement, velocity & Elasticity w/inertia and w/o faults \\
\object{SolnDispLagrange} & displacement, lagrange\_fault & Elasticity w/faults \\
\object{SolnDispVelLagrange} & displacement, lagrange\_fault & Elasticity w/faults \\
\object{SolnDispPres} & displacement, pressure & Incompressible elasticity w/o faults \\
\object{SolnDispPresLagrange} & displacement, pressure, lagrange\_fault & Incompressible elasticity w/o faults \\
\bottomrule
\end{tabular}
\end{table}
Each subfield is a \object{SolutionSubfield} object with the following properties:
\begin{inventory}
\propertyitem{alias}{User-specified name for subfield to use in
output (default is the PyLith-specified name);}
\propertyitem{basis\_order}{Order for basis functions (default=1);}
\propertyitem{quadrature\_order}{Order of quadrature to use in
integration (default=1);}
\propertyitem{dimension}{Topological dimension
associated with subfield (default=-1 and should not be
changed); and}
\propertyitem{finite\_element\_space}{Finite-element space ({\tt
polynomial} or {\tt point; defaults=polynomial}). Point space corresponds to delta
functions at the quadrature points;}
\end{inventory}
\begin{cfg}[Setting discretization information for a \object{SolnDispLagrange} component in a \filename{cfg} file]
<h>[pylithapp.problem]</h>
<f>solution</f> = pylith.problems.SolnDispLagrange
<h>[pylithapp.problem.solution.subfields]</h>
<p>displacement.basis_order</p> = 1
<p>displacement.quadrature_order</p> = 1
<p>lagrange_fault.basis_order</p> = 1
<p>lagrange_fault.quadrature_order</p> = 1
\end{cfg}
\subsubsection{Nondimensionalization (\facility{normalizer})}
PyLith rescales all parameters provided by the user so that the
simulation solves the equations using nondimensional quantities. This
permits application of PyLith to problems across a vast range of
spatial and temporal scales. The scales used to nondimensionalize the
problem are length, pressure, density, and time. PyLith provides two
normalizer objects to make it easy to provide reasonable scales for
the nondimensionalization. The \object{NondimElasticQuasistatic}
normalizer (which is the default) has the following properties:
\begin{inventory}
\propertyitem{length\_scale}{Distance to nondimensionalize length
(default is 1.0 km).}
\propertyitem{shear\_modulus}{Shear modulus to nondimensionalize
pressure (default is 3.0e+10 Pa).}
\propertyitem{relaxation\_time}{Relaxation time to
nondimensionalize time (default is 1.0 year).}
\end{inventory}
\begin{cfg}[\object{NondimElasticQuasistatic} parameters in a \filename{cfg} file]
<h>[pylithapp.timedependent.normalizer]</h>
<p>length_scale</p> = 1.0*km
<p>shear_modulus</p> = 3.0e+10*Pa
<p>relaxation_time</p> = 1.0*yr
\end{cfg}
The \object{NondimElasticDynamic} normalizer has the following
properties:
\begin{inventory}
\propertyitem{shear\_wave\_speed}{Shear wave speed used to
nondimensionalize length and pressure (default is 3.0 km/s).}
\propertyitem{mass\_density}{Mass density to nondimensionalize
density and pressure (default is 3.0e+3 kg/m$^{3}$).}
\propertyitem{wave\_period}{Period of seismic waves used to
nondimensionalize time (default is 1.0 s).}
\end{inventory}
\begin{cfg}[\object{NondimElasticDynamic} parameters in a \filename{cfg} file]
<h>[pylithapp.timedependent.normalizer]</h>
<p>shear_wave_speed</p> = 3.0*km/s
<p>mass_density</p> = 3.0e+3*kg/m**3
<p>wave_period</p> = 1.0*s
\end{cfg}
\important{The default nondimensionalization is reasonable for many
problems; however, it may be necessary to change the default values
in some cases. When doing this, keep in mind that the
nondimensionalization generally applies to the minimum values
encountered for a problem. For example, in a quasistatic problem,
the \property{length\_scale} should be on the order of the minimum
cell size. Similarly, the \property{relaxation\_time} should be on
the order of the minimum relaxation time or time scale associated
with time-dependent boundary and interface conditions.}
\subsubsection{Solution Observers (\facility{solution\_observers})}
\label{sec:solution:observers}
The solution observers get notified of updates to the solution. Table
\vref{solution:observers} lists the current implementations of
solution observers, which are used for output.
\begin{table}[htbp]
\caption{Solution observers.}
\label{tab:solution:observers}
\begin{tabular}{lll}
\toprule
\thead{Object} & \thead{Use Cases} \\
\midrule
\object{OutputSoln} & Output of the solution over the domain; \\
\object{OutputSolnBoundary} & Output of the solution over an external boundary \\
\object{OutputSolnPoints} & Output of the solution at discrete points \\
\bottomrule
\end{tabular}
\end{table}
All of the solution observers have the following properties:
\begin{inventory}
\propertyitem{data\_fields}{List of solution subfields to observer/output (default=all which will output all of the subfields);}
\facilityitem{writer}{Writer for data (default=\object{DataWriterHDF5});}
\facilityitem{trigger}{Trigger defining how often output is written (default=\object{OutputTriggerStep}); and}
\facilityitem{field\_filter}{Filter for output fields (default=\object{FieldFilterNone}).}
\end{inventory}
\object{OutputSolnBoundary} adds a property:
\begin{inventory}
\propertyitem{label}{Label (name of nodeset/pset) identifier of boundary (required);}
\end{inventory}
See Section \vref{sec:output} for detailed information about the
available components for the \facility{writer}, \facility{trigger},
and \facility{field\_filter} facilities.
\begin{cfg}[Setting \object{OutputSolnBoundary} parameters in a \filename{cfg} file]
<h>[pylithapp.problem.solution_observers.boundary]</h>
<p>label</p> = boundary_groundsurf
<p>writer.filename</p> = output/step01-grounssurf.h5
\end{cfg}
\paragraph{Output at Arbitrary Points (\protect\object{OutputSolnPoints})}
\label{sec:output:points}
In many situations with recorded observations, one would like to
extract the solution at the same locations as the recorded
observation. Rather than forcing the finite-element discretization to
be consistent with the observation points, PyLith includes a
specialized solution observer, \object{OutputSolnPoints}, to interpolate
the solution to arbitrary points. The locations
are specified in a text file. The \object{OutputSolnPoints}
includes:
\begin{inventory}
\propertyitem{data\_fields}{List of solution subfields to
observer/output (default=all which will output all of the
subfields);}
\facilityitem{reader}{Reader for points list (default
is \object{PointsList}); and}
\end{inventory}
\subsubsection{\object{PointsList} Reader}
This object corresponds to a simple text file containing a list of
points (one per line) where output is desired. See \vref{sec:format:PointsList}
for file format specifications. The points are specified in the coordinate
system specified by \object{OutputSolnPoints}. The coordinates will be transformed
into the coordinate system of the mesh prior to interpolation. The
properties available to customize the behavior of \object{PointsList}
are:
\begin{inventory}
\propertyitem{filename}{Names of file containing list of points.}
\propertyitem{comment\_delimiter}{Delimiter at beginning of line to identify
comments (default is \#).}
\propertyitem{value\_delimiter}{Delimiter used to separate values (default is
whitespace).}
\end{inventory}
\subsection{PETSc Settings (\protect\facility{petsc})}
\label{sec:petsc:options}
PyLith relies on PETSc for the finite-element data structures, linear
and nonlinear solvers, and time-stepping algorithms. PETSc has its own
object-oriented interface for specifying runtime options. Instead of
trying to maintain a Pyre interface to all of the PETSc options, we
use a single \facility{petsc} facility to collect all of the PETSc
options and pass them to PETSc.
PETSc time-stepping options are discussed in Section
\vref{sec:problems:timedependent}.
\subsubsection{Monitor/Logging Settings}
Table \vref{tab:pesc:options:monitor} shows the main monitoring
options offered by PETSc. Our recommended settings for all simulations
include:
\begin{cfg}[Recommended PETSc monitoring settings as set in a \filename{cfg} file.]
<h>[pylithapp.petsc]</h>
# Trigger errors if linear or nonlinear solver fails to converge.
<p>ksp_error_if_not_converged</p> = true
<p>snes_error_if_not_converged</p> = true
# Monitor converged reasons
<p>ksp_converged_reason</p> = true
<p>snes_converged_reason</p> = true
# Monitor time-stepping and nonlinear solver
<p>ts_monitor</p> = true
<p>snes_monitor</p> = true
<p>snes_linesearch_monitor</p> = true
\end{cfg}
When optimizing and troubleshooting solver settings, we usually turn on all the monitoring.
\begin{table}[htbp]
\caption{Description of PETSc monitoring settings.}
\label{tab:petsc:options:monitor}
\begin{tabular}{lp{4.0in}}
\toprule
\thead{Option} & \thead{Description} \\
\midrule
% log
\property{log\_view} & Show logging objects and events. \\
% TS
\property{ts\_monitor} & Show time-stepping progress. \\
% KSP
\property{ksp\_monitor} & Show preconditioned residual norm. \\
\property{ksp\_view} & Show linear solver parameters. \\
\property{ksp\_error\_if\_not\_converged} & Generate an error if linear solver does not converge. \\
\property{ksp\_converged\_reason} & Indicate why iterating stopped in linear solve. \\
% SNES
\property{snes\_monitor} & Show residual norm for each nonlinear solve iteration. \\
\property{snes\_view} & Show nonlinear solver parameters. \\
\property{snes\_error\_if\_not\_converged} & Generate an error if nonlinear solver does not converge. \\
\property{snes\_converged\_reason} & Indicate why iterating stopped in nonlinear solve. \\
\property{snes\_linesearch\_monitor} & Show line search information in nonlinear solve. \\
\bottomrule
\end{tabular}
\end{table}
\subsubsection{Solver Settings}
For most problems we use the GMRES method from Saad and Schultz for
the linear solver with solver tolerances around 1.0e-10. When running
large problems, we often raise the solver tolerances by one or two
orders of magnitude to reduce runtime while still achieving suitable
accuracy.
See
\href{http://www.mcs.anl.gov/petsc/petsc-as/documentation/linearsolvertable.html}{PETSc
linear solver table} for a list of PETSc options for linear solvers
and preconditioners.
\usertip{It is important to keep in mind the resolution of the model and
observations when setting solver tolerances. For example, matching
observations with an accuracy of 1.0\si{\milli\meter} does not
require solving the equations to an accuracy of
0.0001\si{\milli\meter}.}
\begin{table}[htbp]
\caption{Recommended starting point for PETSc solver tolerances.}
\label{tab:petsc:options:solver}
\begin{tabular}{lcp{4.5in}}
\toprule
\thead{Property} & \thead{Value} & \thead{Description} \\
\midrule
% KSP
\property{ksp\_rtol} & 1.0e-10 & Stop iterating when the preconditioned KSP residual norm has this amount relative to its starting value.\\
\property{ksp\_atol} & 1.0e-12 & Stop iterating when the preconditioned KSP residual normal is smaller than this value.\\
% SNES
\property{snes\_rtol} & 1.0e-10 & Stop iterating when the SNES residual norm has this amount relative to its starting value.\\
\property{snes\_atol} & 1.0e-10 & Stop iterating when the SNES residual normal is smaller than this value.\\
\bottomrule
\end{tabular}
\end{table}
\paragraph{Settings for small problems}
When running small test problems (about 1k or less unknowns) it is very
handy to use a robust preconditioner so that issues related to the boundary
conditions, material parameters, etc. are more obvious. We recommend
using Incomplete (ILU) factorization.
\begin{cfg}[Recommended PETSc solver settings for small problems]
<h>[pylithapp.petsc]</h>
<p>pc_type</p> = ilu
<p>ksp_type</p> = gmres
\end{cfg}
\paragraph{Settings for medium problems}
When running slightly larger problems (about 10k or less unknowns),
the Additive Schwarz Method (ASM) using Incomplete LU (ILU)
factorization preconditioner is usually more efficient.
\begin{cfg}[Recommended PETSc solver settings for medium problems]
<h>[pylithapp.petsc]</h>
<p>pc_type</p> = asm
<p>ksp_type</p> = gmres
\end{cfg}
\paragraph{Efficient settings for elasticity without a fault}
Algebraic multigrid preconditioner usually works very well on
elasticity problems.
\begin{cfg}[Recommended PETSc solver settings for solving elasticity problems without a fault]
<h>[pylithapp.petsc]</h>
<p>pc_type</p> = ml
<p>ksp_type</p> = gmres
\end{cfg}
\important{The ML algebraic multigrid preconditioner is only available
if you build PETSc with the ML package. These features are included
in the PyLith binary packages.}
\paragraph{Efficient settings for elasticity with a fault}
The Lagrange multiplier solution subfield introduces a saddle point in
the system of equations, so we use a Schur complement approach. These
settings are available in
\filename{\$PYLITH\_DIR/share/settings/solver\_fault\_exact.cfg}.
\begin{cfg}[Recommended PETSc solver settings for solving elasticity problems with a fault]
<h>[pylithapp.petsc]</h>
<p>pc_type</p> = fieldsplit
<p>pc_use_amat</p> = true
<p>pc_fieldsplit_type</p> = schur
<p>pc_fieldsplit_schur_factorization_type</p> = full
<p>pc_fieldsplit_dm_splits</p> = true
<p>fieldsplit_displacement_ksp_type</p> = preonly
<p>fieldsplit_displacement_pc_type</p> = lu
<p>fieldsplit_lagrange_multiplier_fault_pc_type</p> = jacobi
<p>fieldsplit_lagrange_multiplier_fault_ksp_type</p> = gmres
<p>fieldsplit_lagrange_multiplier_fault_ksp_rtol</p> = 1.0e-11
<p>fieldsplit_lagrange_multiplier_fault_ksp_converged_reason</p> = true
\end{cfg}
\userwarning{The split fields and algebraic multigrid preconditioning
currently fails in problems with a nonzero null space. This most
often occurs when a problem contains multiple faults that extend
through the entire domain and create subdomains without any
Dirichlet boundary conditions. The current workaround is to use the
Additive Schwarz preconditioner without split fields. See Section
\vref{sec:Troubleshooting} for the error message encountered in this
situation.}
\paragraph{Efficient settings for incompressible elasticity}
The pressure solution subfield introduces a saddle point in the system
of equations, so we again use a Schur complement approach. This time
we can use algebraic multigrid preconditioning on each block. These
settings are available in
\filename{\$PYLITH\_DIR/share/settings/solver\_incompressible\_elasticity.cfg}.
\begin{cfg}[Recommended PETSc solver settings for solving incompressible elasticity problems without a fault]
<h>[pylithapp.petsc]</h>
<p>pc_type</p> = fieldsplit
<p>pc_fieldsplit_type</p> = schur
<p>pc_fieldsplit_schur_fact_type</p> = full
<p>pc_fieldsplit_schur_precondition</p> = full
<p>fieldsplit_displacement_pc_type</p> = lu
<p>fieldsplit_pressure_pc_type</p> = lu
\end{cfg}
% End of file
| {
"alphanum_fraction": 0.7800227765,
"avg_line_length": 43.9757673667,
"ext": "tex",
"hexsha": "99fef87158f2c6b866c6c3c1043423303dd46d28",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f6338261b17551eba879da998a5aaf2d91f5f658",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Grant-Block/pylith",
"max_forks_repo_path": "doc/userguide/runpylith/pylithapp.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f6338261b17551eba879da998a5aaf2d91f5f658",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Grant-Block/pylith",
"max_issues_repo_path": "doc/userguide/runpylith/pylithapp.tex",
"max_line_length": 143,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f6338261b17551eba879da998a5aaf2d91f5f658",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Grant-Block/pylith",
"max_stars_repo_path": "doc/userguide/runpylith/pylithapp.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7088,
"size": 27221
} |
% User Defined Settings
\def\iapyear{2009} % Contest year
\def\iapstart{4} % Date of first Sunday
% Macros
\def \ohm{$\Omega$ }
\def \u{$\mu$ }
\def \uF{$\mu F$ }
\def \pF{$pF$ }
% Margins
%\usepackage{fullpage}
\setlength{\topmargin}{0in}
\setlength{\textheight}{8in}
\setlength{\oddsidemargin}{.25in}
\setlength{\evensidemargin}{0in}
\setlength{\textwidth}{6.25in}
% Preface macros
\newcommand{\titlep}{
\thispagestyle{empty}
\vspace*{\stretch{1}}
\rule{\textwidth}{2pt}
{\Huge\bf \begin{center}
2009 Autonomous Robot Design Competition \\
One Small Step \\
\end{center}
}
\rule{\textwidth}{2pt}
\vspace*{\stretch{1}}
{\sf
\begin{flushright}
{\Large \sf Course Notes}\\[1mm]
Last Updated: \today
\end{flushright}
}
\vspace*{\stretch{2}}
\begin{center}
{\footnotesize Massachusetts Institute of Technology\\
{\tt http://web.mit.edu/6.270/www/}}
\end{center}
\cleardoublepage
}
\newcommand{\beforepreface}{
\pagenumbering{roman}
\pagestyle{plain}
\titlep
}
\newcommand{\prefacesection}[1]{
\chapter*{#1}
\addcontentsline{toc}{chapter}{#1}
}
\newcommand{\afterpreface}{
\newpage
\pagenumbering{arabic}
\pagestyle{headings}}
\newcommand{\contents}{
\newpage
\tableofcontents
\newpage
{\addvspace{10pt}
\let\saveaddvspace=\addvspace
\def\addvspace##1{}
\listoffigures
\let\addvspace=\saveaddvspace}
\newpage
}
% Settings
%\pssilent ##outdated latex input
\newcounter{boxcnt}
\newenvironment{boxlist}{\begin{list}{{\bf \arabic{boxcnt}--}{\Large $\Box$}}{\usecounter{boxcnt}}}{\end{list}}
\newenvironment{circlist}{\begin{list}{$\bigcirc$}{}}{\end{list}}
\def \iapdate#1{
\newcount\x\newcount\y
\x=#1\advance\x by -1
\y=\x\divide\y by 7\multiply\y by 7\advance\x by -\y
\ifcase\x Sunday\or Monday\or Tuesday\or Wednesday\or
Thursday\or Friday\or Saturday\fi,
\x=#1 \advance\x by \iapstart \advance\x by -1
\ifnum\x<32 January \else February \advance\x by -31 \fi
\number\x
\y=\x \divide\y by 10 \multiply\y by 10 \advance\x by -\y
\ifnum \x>3 \x=0 \fi
\ifnum \y=10 \x=0 \fi
\ifcase\x th\or st\or nd\or rd\fi
}
| {
"alphanum_fraction": 0.7001962709,
"avg_line_length": 21.0103092784,
"ext": "tex",
"hexsha": "b64b8ea001422a7f95a85f07e1b774161c578c18",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2020-10-12T23:20:02.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-01-20T04:29:35.000Z",
"max_forks_repo_head_hexsha": "94593ddabf7e6b53b568b3b4a04ca9ed6df123f0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sixtwoseventy/joyos",
"max_forks_repo_path": "src/coursenotes/settings.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "94593ddabf7e6b53b568b3b4a04ca9ed6df123f0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sixtwoseventy/joyos",
"max_issues_repo_path": "src/coursenotes/settings.tex",
"max_line_length": 111,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "f19624be074d2ab10c89786e75daef0ef4e48eb9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "viccro/andthebitches",
"max_stars_repo_path": "src/coursenotes/settings.tex",
"max_stars_repo_stars_event_max_datetime": "2020-03-04T02:16:16.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-02-01T06:43:17.000Z",
"num_tokens": 750,
"size": 2038
} |
\documentclass[color=usenames,dvipsnames]{beamer}\usepackage[]{graphicx}\usepackage[]{color}
% maxwidth is the original width if it is less than linewidth
% otherwise use linewidth (to make sure the graphics do not exceed the margin)
\makeatletter
\def\maxwidth{ %
\ifdim\Gin@nat@width>\linewidth
\linewidth
\else
\Gin@nat@width
\fi
}
\makeatother
\definecolor{fgcolor}{rgb}{0, 0, 0}
\newcommand{\hlnum}[1]{\textcolor[rgb]{0.69,0.494,0}{#1}}%
\newcommand{\hlstr}[1]{\textcolor[rgb]{0.749,0.012,0.012}{#1}}%
\newcommand{\hlcom}[1]{\textcolor[rgb]{0.514,0.506,0.514}{\textit{#1}}}%
\newcommand{\hlopt}[1]{\textcolor[rgb]{0,0,0}{#1}}%
\newcommand{\hlstd}[1]{\textcolor[rgb]{0,0,0}{#1}}%
\newcommand{\hlkwa}[1]{\textcolor[rgb]{0,0,0}{\textbf{#1}}}%
\newcommand{\hlkwb}[1]{\textcolor[rgb]{0,0.341,0.682}{#1}}%
\newcommand{\hlkwc}[1]{\textcolor[rgb]{0,0,0}{\textbf{#1}}}%
\newcommand{\hlkwd}[1]{\textcolor[rgb]{0.004,0.004,0.506}{#1}}%
\let\hlipl\hlkwb
\usepackage{framed}
\makeatletter
\newenvironment{kframe}{%
\def\at@end@of@kframe{}%
\ifinner\ifhmode%
\def\at@end@of@kframe{\end{minipage}}%
\begin{minipage}{\columnwidth}%
\fi\fi%
\def\FrameCommand##1{\hskip\@totalleftmargin \hskip-\fboxsep
\colorbox{shadecolor}{##1}\hskip-\fboxsep
% There is no \\@totalrightmargin, so:
\hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}%
\MakeFramed {\advance\hsize-\width
\@totalleftmargin\z@ \linewidth\hsize
\@setminipage}}%
{\par\unskip\endMakeFramed%
\at@end@of@kframe}
\makeatother
\definecolor{shadecolor}{rgb}{.97, .97, .97}
\definecolor{messagecolor}{rgb}{0, 0, 0}
\definecolor{warningcolor}{rgb}{1, 0, 1}
\definecolor{errorcolor}{rgb}{1, 0, 0}
\newenvironment{knitrout}{}{} % an empty environment to be redefined in TeX
\usepackage{alltt}
%\documentclass[color=usenames,dvipsnames,handout]{beamer}
%\usepackage[roman]{../../lab1}
\usepackage[sans]{../../lab1}
\hypersetup{pdftex,pdfstartview=FitV}
%% New command for inline code that isn't to be evaluated
\definecolor{inlinecolor}{rgb}{0.878, 0.918, 0.933}
\newcommand{\inr}[1]{\colorbox{inlinecolor}{\texttt{#1}}}
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\begin{document}
%\section{Intro}
\begin{frame}[plain]
% \maketitle
\LARGE
\centering \par
{\bf \color{RoyalBlue}{Lab 5 -- Assumptions of ANOVA}} \par
\vspace{1cm}
\Large
% September 17 \& 18, 2018 \\
FANR 6750 \\
\vfill
\large
Richard Chandler and Bob Cooper
\end{frame}
\begin{frame}[plain]
\frametitle{Today's Topics}
\Large
\only<1>{\tableofcontents}%[hideallsubsections]}
% \only<2 | handout:0>{\tableofcontents[currentsection]}%,hideallsubsections]}
\end{frame}
\section{Assumptions of ANOVA}
%% \begin{frame}[fragile]
%% \frametitle{Is it normally distributed?}
%% \small
%% <<boxfor0,fig=true,include=false>>=
%% boxplot(forestCover$pcForest, col="lightgreen",
%% ylab="Percent forest cover", main="", cex.lab=1.5)
%% @
%% \vspace{-0.5cm}
%% \centering
%% \includegraphics[width=0.7\textwidth]{lab05-assump-nonpar-boxfor0} \\
%% \end{frame}
%% \begin{frame}[fragile]
%% \frametitle{Is it normally distributed?}
%% \small
%% <<histfor0,fig=true,include=false>>=
%% hist(forestCover$pcForest, col="lightgreen", cex.lab=1.5,
%% xlab="Percent forest cover", main="")
%% @
%% \vspace{-0.5cm}
%% \centering
%% \includegraphics[width=0.7\textwidth]{lab05-assump-nonpar-histfor0} \\
%% \end{frame}
%% \begin{frame}[fragile]
%% \frametitle{Is it normally distributed?}
%% <<>>=
%% shapiro.test(forestCover$pcForest)
%% @
%% \end{frame}
\begin{frame}
\frametitle{Assumptions of ANOVA}
{%\bf
A common misconception is that the response variable must be
normally distributed when conducting an ANOVA.}
\pause
\vfill
{%\bf
This is incorrect because the normality assumptions pertain to
the {\it residuals}, \alert{not} the response variable.
\pause
\vfill
The key assumption of
ANOVA is that the residuals are independent and come from a normal
distribution with mean 0 and variance $\sigma^2$.}
\pause
\large
\[
y_{ij} = \mu + \alpha_i + \varepsilon_{ij}
\]
\[
\varepsilon_{ij} \sim \text{Normal}(0, \sigma^2)
\]
\pause
%\vfill
\normalsize
{%\bf
We can assess this assumption by looking at the residuals
themselves or by looking at the response variable for each treatment group. }
\end{frame}
\begin{frame}[fragile]
\frametitle{A fake dataset}
\small
% {%\bf %\large
Consider the data: %}
% \scriptsize %\small
\begin{knitrout}\tiny
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{infectionRates} \hlkwb{<-} \hlkwd{read.csv}\hlstd{(}\hlstr{"infectionRates.csv"}\hlstd{)}
\hlkwd{str}\hlstd{(infectionRates)}
\end{alltt}
\begin{verbatim}
## 'data.frame': 90 obs. of 2 variables:
## $ percentInfected: num 0.21 0.25 0.17 0.26 0.21 0.21 0.22 0.27 0.23 0.14 ...
## $ landscape : Factor w/ 3 levels "Park","Suburban",..: 1 1 1 1 1 1 1 1 1 1 ...
\end{verbatim}
\begin{alltt}
\hlkwd{summary}\hlstd{(infectionRates)}
\end{alltt}
\begin{verbatim}
## percentInfected landscape
## Min. :0.010 Park :30
## 1st Qu.:0.040 Suburban:30
## Median :0.090 Urban :30
## Mean :0.121
## 3rd Qu.:0.210
## Max. :0.330
\end{verbatim}
\end{kframe}
\end{knitrout}
% \normalsize %\bf
\vfill
These data are made-up, but imagine they come from a study in which
100 crows are placed in $n=30$ enclosures in each of 3 landscapes. The
response variable is the proportion of crows infected with West Nile
virus at the end of the study. \\
\end{frame}
\begin{frame}[fragile]
\frametitle{One-way ANOVA}
\begin{knitrout}\footnotesize
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{anova1} \hlkwb{<-} \hlkwd{aov}\hlstd{(percentInfected} \hlopt{~} \hlstd{landscape,}
\hlkwc{data}\hlstd{=infectionRates)}
\hlkwd{summary}\hlstd{(anova1)}
\end{alltt}
\begin{verbatim}
## Df Sum Sq Mean Sq F value Pr(>F)
## landscape 2 0.6384 0.3192 306 <2e-16 ***
## Residuals 87 0.0908 0.0010
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
\end{verbatim}
\end{kframe}
\end{knitrout}
\pause
\vfill
%\bf
Significant, but did we meet the assumptions?
\end{frame}
\begin{frame}[fragile]
\frametitle{Boxplots for the three landscape types}
\begin{knitrout}\footnotesize
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{boxplot}\hlstd{(percentInfected}\hlopt{~}\hlstd{landscape, infectionRates,}
\hlkwc{col}\hlstd{=}\hlstr{"lightgreen"}\hlstd{,} \hlkwc{cex.lab}\hlstd{=}\hlnum{1.5}\hlstd{,} \hlkwc{cex.axis}\hlstd{=}\hlnum{1.3}\hlstd{,}
\hlkwc{ylab}\hlstd{=}\hlstr{"Percent forest cover"}\hlstd{)}
\end{alltt}
\end{kframe}
\end{knitrout}
\vspace{-0.5cm}
\centering
\includegraphics[width=0.6\textwidth]{figure/boxfor1-1} \\
\vfill
Notice that the variances don't look equal among groups.
\end{frame}
% \begin{frame}[fragile]
% \frametitle{Are group variances equal?}
% \small
% <<bartlett,size='footnotesize'>>=
% bartlett.test(percentInfected~landscape, data=infectionRates)
% @
% \vfill
% {We reject the null hypothesis that the group variances are equal,
% suggesting that we failed to meet the assumption of constant variance.}
% \end{frame}
\begin{frame}[fragile]
\frametitle{Histogram of residuals}
\scriptsize
%\begin{center}
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{resids} \hlkwb{<-} \hlkwd{resid}\hlstd{(anova1)}
\hlkwd{hist}\hlstd{(resids,} \hlkwc{col}\hlstd{=}\hlstr{"turquoise"}\hlstd{,} \hlkwc{breaks}\hlstd{=}\hlnum{10}\hlstd{,} \hlkwc{xlab}\hlstd{=}\hlstr{"Residuals"}\hlstd{)}
\end{alltt}
\end{kframe}
\end{knitrout}
%\end{center}
\centering
\includegraphics[width=0.6\textwidth]{figure/histresid0-1} \\
\vfill
The histogram doesn't look bad, but we need to test the normality
assumption.
\end{frame}
\begin{frame}[fragile]
\frametitle{Normality test on residuals}
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{shapiro.test}\hlstd{(resids)}
\end{alltt}
\begin{verbatim}
##
## Shapiro-Wilk normality test
##
## data: resids
## W = 0.95528, p-value = 0.003596
\end{verbatim}
\end{kframe}
\end{knitrout}
%\pause
\vspace{0.6cm}
%% {\bf Remember, this is not the correct test:}
%% <<>>=
%% shapiro.test(infectionRates$pcForest)
%% @
{We reject the null hypothesis that the residuals come from a normal
distribution.
\pause
\vfill
Since we failed to meet the key assumption of ANOVA,
we should consider transformations and/or nonparametric tests.
}
\end{frame}
%\begin{comment}
\section{Transformations}
\begin{frame}[plain]
\frametitle{Outline}
\Large
\tableofcontents[currentsection]
\end{frame}
\begin{frame}[fragile]
\frametitle{Logarithmic Transformation}
% \LARGE
\small
$y$ is the transformed response variable. $u$ is original response variable.
\[
y = \log(u + C)
\]
% \large
\vspace{-5mm}
\begin{itemize}%[<+->]
\small %\normalsize
\item The constant $C$ is often 1 if there are no zeros in the data ($u$)
\item Useful when group variances are proportional to the means
\end{itemize}
\pause
% <<log,fig.width=6,fig.height=6,fig.align='center',out.width="35%",size='scriptsize'>>=
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{boxplot}\hlstd{(}\hlkwd{log}\hlstd{(percentInfected)}\hlopt{~}\hlstd{landscape, infectionRates,} \hlkwc{col}\hlstd{=}\hlstr{"green"}\hlstd{,}
\hlkwc{cex.lab}\hlstd{=}\hlnum{1.5}\hlstd{,} \hlkwc{cex.axis}\hlstd{=}\hlnum{1.3}\hlstd{,} \hlkwc{ylab}\hlstd{=}\hlstr{"log(percent forest cover)"}\hlstd{)}
\end{alltt}
\end{kframe}
\end{knitrout}
\includegraphics[width=0.45\textwidth]{figure/boxfor1-1} \hfill
\includegraphics[width=0.45\textwidth]{figure/log-1} \\
\end{frame}
%% \begin{frame}[fragile]
%% \frametitle{Logarithmic Transformation}
%% <<loguy,fig=true,include=FALSE, width=12, height=6>>=
%% par(mfrow=c(1,2), mai=c(0.9, 0.9, 0.1, 0.1))
%% hist(u2, main="")
%% hist(log(u2), main="")
%% @
%% \includegraphics[width=\textwidth]{lab05-assump-nonpar-loguy}
%% \end{frame}
\begin{frame}[fragile]
\frametitle{Square Root Transformation}
% \LARGE
\small
\[
y = \sqrt{u + C}
\]
% \large
\vspace{-5mm}
\begin{itemize}%[<+->]
\small
\item $C$ is often 0.5 or some other small number
\item Useful when group variances are proportional to the means
% (count data)
\end{itemize}
\pause
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{boxplot}\hlstd{(}\hlkwd{sqrt}\hlstd{(percentInfected)}\hlopt{~}\hlstd{landscape, infectionRates,} \hlkwc{col}\hlstd{=}\hlstr{"yellow"}\hlstd{,}
\hlkwc{cex.lab}\hlstd{=}\hlnum{1.5}\hlstd{,} \hlkwc{cex.axis}\hlstd{=}\hlnum{1.3}\hlstd{,} \hlkwc{ylab}\hlstd{=}\hlstr{"sqrt(percent forest cover)"}\hlstd{)}
\end{alltt}
\end{kframe}
\end{knitrout}
\includegraphics[width=0.45\textwidth]{figure/boxfor1-1} \hfill
\includegraphics[width=0.45\textwidth]{figure/sqrt-1} \\
\end{frame}
%% \begin{frame}[fragile]
%% \frametitle{Square Root Transformation}
%% <<sqrtuy,fig=true,include=FALSE, width=12, height=6>>=
%% par(mfrow=c(1,2), mai=c(0.9, 0.9, 0.1, 0.1))
%% hist(u2, main="")
%% hist(sqrt(u2), main="")
%% @
%% \includegraphics[width=\textwidth]{lab05-assump-nonpar-sqrtuy}
%% \end{frame}
\begin{frame}[fragile]
\frametitle{Arcsine-square root Transformation}
% \LARGE
\small
\[
y = \mathrm{arcsin}(\sqrt{u})
\]
% \large
\vspace{-5mm}
\begin{itemize}%[<+->]
\small
\item Used on proportions.
\item logit transformation is an alternative: $y = \log(\frac{u}{1-u})$
% \item Binomial (logistic) regression is an alternative for proportions.
\end{itemize}
\pause
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{boxplot}\hlstd{(}\hlkwd{asin}\hlstd{(}\hlkwd{sqrt}\hlstd{(percentInfected))}\hlopt{~}\hlstd{landscape, infectionRates,} \hlkwc{col}\hlstd{=}\hlstr{"orange"}\hlstd{,}
\hlkwc{cex.lab}\hlstd{=}\hlnum{1.5}\hlstd{,} \hlkwc{cex.axis}\hlstd{=}\hlnum{1.3}\hlstd{,} \hlkwc{ylab}\hlstd{=}\hlstr{"asin(sqrt(percent forest cover))"}\hlstd{)}
\end{alltt}
\end{kframe}
\end{knitrout}
\includegraphics[width=0.45\textwidth]{figure/boxfor1-1} \hfill
\includegraphics[width=0.45\textwidth]{figure/asin-1} \\
\end{frame}
\begin{frame}[fragile]
\frametitle{Reciprocal Transformation}
% \LARGE
\small
\[
y = \frac{1}{u + C}
\]
% \large
\vspace{-5mm}
\begin{itemize}%[<+->]
\small
\item $C$ is often 1 but could be 0 if there are no zeros in $u$
\item Useful when group SDs are proportional to the squared group means
\end{itemize}
\pause
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{boxplot}\hlstd{(}\hlnum{1}\hlopt{/}\hlstd{percentInfected}\hlopt{~}\hlstd{landscape, infectionRates,} \hlkwc{col}\hlstd{=}\hlstr{"pink"}\hlstd{,}
\hlkwc{cex.lab}\hlstd{=}\hlnum{1.5}\hlstd{,} \hlkwc{cex.axis}\hlstd{=}\hlnum{1.3}\hlstd{,} \hlkwc{ylab}\hlstd{=}\hlstr{"1/percent forest cover"}\hlstd{)}
\end{alltt}
\end{kframe}
\end{knitrout}
\includegraphics[width=0.45\textwidth]{figure/boxfor1-1} \hfill
\includegraphics[width=0.45\textwidth]{figure/recip-1} \\
\end{frame}
\begin{frame}[fragile]
\frametitle{ANOVA on transformed data}
\small
{Tranformation can be done in the \inr{aov} formula}
\begin{knitrout}\footnotesize
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{anova2} \hlkwb{<-} \hlkwd{aov}\hlstd{(}\hlkwd{log}\hlstd{(percentInfected)}\hlopt{~}\hlstd{landscape,}
\hlkwc{data}\hlstd{=infectionRates)}
\hlkwd{summary}\hlstd{(anova2)}
\end{alltt}
\begin{verbatim}
## Df Sum Sq Mean Sq F value Pr(>F)
## landscape 2 60.93 30.46 303.5 <2e-16 ***
## Residuals 87 8.73 0.10
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
\end{verbatim}
\end{kframe}
\end{knitrout}
\pause
\vfill
{The log transformation didn't help much: We still reject the
normality assumption.}
\begin{knitrout}\footnotesize
\definecolor{shadecolor}{rgb}{0.878, 0.918, 0.933}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{shapiro.test}\hlstd{(}\hlkwd{resid}\hlstd{(anova2))}
\end{alltt}
\begin{verbatim}
##
## Shapiro-Wilk normality test
##
## data: resid(anova2)
## W = 0.97092, p-value = 0.04106
\end{verbatim}
\end{kframe}
\end{knitrout}
\end{frame}
\section{Non-parametrics}
\begin{frame}[plain]
\frametitle{Outline}
\Large
\tableofcontents[currentsection]
\end{frame}
\begin{frame}[fragile]
\frametitle{Non-parametric Tests}
% {\bf Mann-Whitney $U$-test}
\large
{Wilcoxan rank sum test}
\begin{itemize}
\item For 2 group comparisons
\item a.k.a. the Mann-Whitney $U$ test
\item \inr{wilcox.test}
\end{itemize}
\pause
\vspace{0.5cm}
{Kruskal-Wallis One-Way ANOVA}
\begin{itemize}
\item For testing differences in $>2$ groups
\item \inr{kruskal.test}
\end{itemize}
\pause
\vfill
\centering
These two functions can be used in almost the exact same way as
\inr{t.test} and \inr{aov}, respectively. \\
\end{frame}
%% \begin{frame}
%% \frametitle{In-class assignment}
%% \begin{itemize}
%% \item[\bf (1)] Import the \inr{forestCover} data
%% \item[\bf (2)] Decide which transformation do you think would be best?
%% \item[\bf (3)] Conduct an ANOVA on the untransformed and transformed
%% data. Use at least two of the following tranformations:
%% \begin{itemize}
%% \item log
%% \item square-root
%% \item acrsine square-root
%% \item reciprocal
%% \end{itemize}
%% \item[\bf (4)] Determine which (if any) transformation is best for \inr{pcForest}
%% \item[\bf (5)] Does transformation alter the main conclusion?
%% \end{itemize}
%% \end{frame}
\begin{frame}
\frametitle{Assignment}
\small
% \footnotesize
\begin{enumerate}
\item[\bf (1)] Decide which transformation is best for the
\inr{infectionRates} data by conducting ANOVAs using the log,
square-root, arcsine square-root, and reciprocal
transformations. Use boxplots, historgrams, and Shapiro's tests
to determine the best transformation.
% following tranformations:
% \begin{itemize}
% \item log
% \item square-root
% \item arcsine square-root
% \item reciprocal
% \end{itemize}
\item[\bf (2)] Does transformation alter the conclusion about the
null hypothesis of no difference in means? If not, were the
transformations necessary?
\item[\bf (3)] Test the hypothesis that infection rates are equal
between suburban and urban landscapes using a Wilcoxan rank sum
test. What is the conclusion?
\item[\bf (4)] Conduct a Kruskal-Wallis test on the data. What
is the conclusion? % (in terms of the null hypothesis)?
\end{enumerate}
% \centering
\vfill
Use comments in your \R~script (or .Rmd file) to explain your
answers. Upload your results to ELC at least one day before your
next lab.\\
\end{frame}
\end{document}
| {
"alphanum_fraction": 0.6721441075,
"avg_line_length": 26.2744807122,
"ext": "tex",
"hexsha": "540da4656fa16138e08160e2ad2fec2b16c22ff8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "89bce9e600c6d4601b70eb5cd02531fc1dfa950e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rbchan/exp-design",
"max_forks_repo_path": "labs/assump-nonpar/lab-assump-nonpar.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "89bce9e600c6d4601b70eb5cd02531fc1dfa950e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rbchan/exp-design",
"max_issues_repo_path": "labs/assump-nonpar/lab-assump-nonpar.tex",
"max_line_length": 172,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "89bce9e600c6d4601b70eb5cd02531fc1dfa950e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rbchan/exp-design",
"max_stars_repo_path": "labs/assump-nonpar/lab-assump-nonpar.tex",
"max_stars_repo_stars_event_max_datetime": "2019-02-27T17:03:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-08-01T17:16:11.000Z",
"num_tokens": 6436,
"size": 17709
} |
\subsection{Randomness Analysis}
\label{sec:evaluation-randomness-analysis}
The randomness degree of the adopted pseudo-random number generator has been evaluated with the following standard tests:
\begin{itemize}
\item \textbf{Spectral Test:} this test is considered one of the most intuitive and widely adopted tests to assess the quality of linear congruential generators \cite{knuth1981art}.
It relies on the fact that the output of such generators form lines or hyperplanes when plotted on 2 or more dimensions. The less the distance between these lines or planes, the better is the generator.
In fact, a smaller distance between lines or planes highlights a better uniform distribution.
In Figure~\ref{fig:evaluation-randomness-spectral-16807},\ref{fig:evaluation-randomness-spectral-48271} and \ref{fig:evaluation-randomness-spectral-50812} we show the test results for generators $(16807,2^{31}-1)$, $(48271,2^{31}-1)$ and $(50812,2^{31}-1)$, respectively.
The results show that the adopted generator $(50812,2^{31}-1)$ is much better than $(16807, 2^{31}-1)$, which has been widely adopted in the past, and it is really similar to $(48271,2^{31}-1)$, which is the current 32-bit de-facto standard, according to \cite{leemis2006discrete}.
\item \textbf{Test of Extremes:} this test relies on the fact that if $U=U_{0},...,U_{d-1}$ is an independent identically distributed sequence of $Uniform(0,1)$ random variables, then $\max(U)^{d}$ is also a $Uniform(0,1)$. The test leverages this property to measures, for every stream, how much the generated random values differ from the theoretical uniform distribution.
Given a number of streams $s$ and a level of confidence $c=1-\alpha$, the more the total number of fails is close to the expected value $s \cdot \alpha$, the better is the generator.
In Figure~\ref{fig:evaluation-randomness-extremes-50812} we show the results for the adopted generator $(508012,2^{31}-1, 256)$ with sample size $n=10000$, $k=1000$ bins, sequence size $d=5$ and $95\%$ level of confidence.
The adopted generator shows critical values $v_{min}=913$ and $v_{max}=1088$ and $14$ total fails ($7$ lower and $7$ upper fails), that is not far from the theoretical accepted number of fails, i.e. $256*0.05=13$.
The proposed generator successfully passed the test with a $94.531\%$ level of confidence.
\item \textbf{Kolmogorov-Smirnov Test:} the test measures, at a given level of confidence, the biggest vertical distance between the theoretical cumulative distribution function and the empirical cumulative distribution function.
The more the recorded distance $d$ is less than the critical value $d^*$ for the considered level of confidence, the better the generator is.
As the Kolmogorov-Smirnov analysis relies on pre-calculated randomness statistics, we have chosen to take into account the statistics obtained by the previous Test of Extremes.
In Figure~\ref{fig:evaluation-randomness-kolmogorov-smirnov-50812} we show the test results for the proposed generator $(50812,2^{31}-1, 256)$ with a $95\%$ level of confidence.
%
The proposed generator successfully passed the test, as $d=0.041<0.081=d^*$.
\end{itemize}
We can conclude that the adopted generator $(50812,2^{31}-1, 256)$ provides a satisfying randomness degree.
\begin{figure}
\includegraphics[width=\columnwidth]{fig/evaluation-randomness-spectral-16807}
\caption{The Spectral Test for the generator $(16807,2^{31}-1, 1)$ in the interval $(0, 10^{-3})$.}
\label{fig:evaluation-randomness-spectral-16807}
\end{figure}
\begin{figure}
\includegraphics[width=\columnwidth]{fig/evaluation-randomness-spectral-48271}
\caption{The Spectral Test for the generator $(48271,2^{31}-1, 1)$ in the interval $(0, 10^{-3})$.}
\label{fig:evaluation-randomness-spectral-48271}
\end{figure}
\begin{figure}
\includegraphics[width=\columnwidth]{fig/evaluation-randomness-spectral-50812}
\caption{The Spectral Test for the generator $(50812,2^{31}-1, 1)$ in the interval $(0, 10^{-3})$.}
\label{fig:evaluation-randomness-spectral-50812}
\end{figure}
\begin{figure}
\includegraphics[width=\columnwidth]{fig/evaluation-randomness-extremes-50812}
\caption{The Test of Extremes with $d=5$ to evaluate the randomness of the random number generator $(50812,2^{31}-1, 256)$.}
\label{fig:evaluation-randomness-extremes-50812}
\end{figure}
\begin{figure}
\includegraphics[width=\columnwidth]{fig/evaluation-randomness-kolmogorov-smirnov-50812}
\caption{The Kolmogorov-Smirnov Analysis (leveraging the Test of Extremes with $d=5$) to evaluate the randomness of the random number generator $(50812,2^{31}-1, 256)$ with $95\%$ confidence.}
\label{fig:evaluation-randomness-kolmogorov-smirnov-50812}
\end{figure} | {
"alphanum_fraction": 0.7619852165,
"avg_line_length": 75.1587301587,
"ext": "tex",
"hexsha": "efec34442db470379b75b04c27b45d5ae9d6a3d2",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-02-17T13:30:49.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-02-17T13:30:49.000Z",
"max_forks_repo_head_hexsha": "7cc526fe7cd9916ceaf8285c4e4bc4dce4028537",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "gmarciani/research",
"max_forks_repo_path": "pydes/sec/evaluation-randomness-analysis.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7cc526fe7cd9916ceaf8285c4e4bc4dce4028537",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "gmarciani/research",
"max_issues_repo_path": "pydes/sec/evaluation-randomness-analysis.tex",
"max_line_length": 375,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "7cc526fe7cd9916ceaf8285c4e4bc4dce4028537",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "gmarciani/research",
"max_stars_repo_path": "pydes/sec/evaluation-randomness-analysis.tex",
"max_stars_repo_stars_event_max_datetime": "2018-07-20T12:54:12.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-07-27T13:31:43.000Z",
"num_tokens": 1318,
"size": 4735
} |
% $Id$
\pagebreak
\subsection{Object Model}
The following is a simplified UML diagram showing the structure of the
Time Manager utility. See Appendix A, {\it A Brief Introduction to UML},
for a translation table that lists the symbols in the diagram and their
meaning.
\begin{center}
\includegraphics{TimeMgr_obj}
\end{center}
| {
"alphanum_fraction": 0.7701492537,
"avg_line_length": 23.9285714286,
"ext": "tex",
"hexsha": "c29c94a30051695a1819f95783601f4e50640950",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0e1676300fc91000ecb43539cabf1f342d718fb3",
"max_forks_repo_licenses": [
"NCSA",
"Apache-2.0",
"MIT"
],
"max_forks_repo_name": "joeylamcy/gchp",
"max_forks_repo_path": "ESMF/src/Infrastructure/TimeMgr/doc/TimeMgr_obj.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "0e1676300fc91000ecb43539cabf1f342d718fb3",
"max_issues_repo_issues_event_max_datetime": "2022-03-04T16:12:02.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-03-04T16:12:02.000Z",
"max_issues_repo_licenses": [
"NCSA",
"Apache-2.0",
"MIT"
],
"max_issues_repo_name": "joeylamcy/gchp",
"max_issues_repo_path": "ESMF/src/Infrastructure/TimeMgr/doc/TimeMgr_obj.tex",
"max_line_length": 73,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "0e1676300fc91000ecb43539cabf1f342d718fb3",
"max_stars_repo_licenses": [
"NCSA",
"Apache-2.0",
"MIT"
],
"max_stars_repo_name": "joeylamcy/gchp",
"max_stars_repo_path": "ESMF/src/Infrastructure/TimeMgr/doc/TimeMgr_obj.tex",
"max_stars_repo_stars_event_max_datetime": "2018-07-05T16:48:58.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-07-05T16:48:58.000Z",
"num_tokens": 81,
"size": 335
} |
\section{Methods overview}
There are many interpretability methods. Some of them superseded previous methods or are very similar to each other.
The following table shows the most important (by paper citations and available implementations) and current methods in use.
\begin{tabular}{| p{6.5cm} | p{1.5cm} | p{6cm} | }
\hline
\textbf{Method} & \textbf{Black box method} & \textbf{PyTorch implementation available} \\ \hline
Occlusions (Zeiler et.al. 2013) \cite{zeiler2014visualizing} & Yes & No but simple algorithm \\ \hline
RISE \cite{Petsiuk2018rise} & Yes & Offical implementation is PyTorch \\ \hline
LIME \cite{ribeiro2016should} & Yes & Yes \\ \hline
Meaningful Perturbation \cite{fong2017interpretable} & Yes & Yes \cite{fong2017implementation} \\ \hline
Prediction Difference Analysis \cite{zintgraf2017visualizing} & Yes & No, but simple algorithm \\ \hline
Layer-wise Relevance Propagation (LRP) \cite{bach2015pixel} & No & Yes, but missing batch normalization \cite{lrppytorch} \\ \hline
DeepLIFT \cite{shrikumar2017learning} & No & Initial implementation in SHAP \cite{NIPS2017_7062} \\ \hline
Grad-CAM \cite{selvaraju2017grad} & No & Many implementations, e.g. \cite{visualattribution} \\ \hline
PatternNet \cite{kindermans2017learning} & No & Yes \cite{visualattribution} \\ \hline
Guided Backpropagation \cite{springenberg2014striving} & No & Yes \cite{visualattribution} \\ \hline
Excitation Backprop \cite{zhang2016EB} & No & Yes \cite{visualattribution} \\ \hline
Integrated Gradients \cite{sundararajan2017axiomatic} & No & Yes \cite{integratedgradientpytorch} \\ \hline
\end{tabular}
| {
"alphanum_fraction": 0.7688004972,
"avg_line_length": 76.619047619,
"ext": "tex",
"hexsha": "4b11de60184bf1aaba2ec68a77895fb5735f32c1",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a94ecd7cff9f00ecd23ecee319076b78bef79a8e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "andef4/thesis-doc",
"max_forks_repo_path": "chapters/02_methods/02_overview.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a94ecd7cff9f00ecd23ecee319076b78bef79a8e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "andef4/thesis-doc",
"max_issues_repo_path": "chapters/02_methods/02_overview.tex",
"max_line_length": 131,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a94ecd7cff9f00ecd23ecee319076b78bef79a8e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "andef4/thesis-doc",
"max_stars_repo_path": "chapters/02_methods/02_overview.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 497,
"size": 1609
} |
% declare document class and geometry
\documentclass[12pt]{article} % use larger type; default would be 10pt
\usepackage[english]{babel} % for hyphenation dictionary
%\setdefaultlanguage{english} % polyglossia command for use with XeTeX / LuaTeX
\usepackage[margin=1in]{geometry} % handle page geometry
% import packages and commands
\input{../header2.tex}
% title information
\title{Phys 221A -- Quantum Mechanics -- Lec09}
\author{UCLA, Fall 2014}
\date{\formatdate{3}{11}{2014}} % Activate to display a given date or no date (if empty),
% otherwise the current date is printed
\begin{document}
\maketitle
\section{Propagator}
The propagator is defined as the time evolution operator evaluated in position space
\begin{eqn}
K(\v r, t; \v r', t') = \matrixel{\v r}{U(t,t')}{\v r'}.
\end{eqn}
When $t=t'$ we have $U(t,t'=t) = 1$, so that the propagator in this case is just
\begin{eqn}
K(\v r, t; \v r', t'=t) = \delta(\v r - \v r').
\label{eq:prop0}
\end{eqn}
Now, the Schroedinger equation can be rewritten
\begin{eqn}
(i \hbar \partial_t - H) \psi(\v r, t) = 0,
\end{eqn}
where $H$ is some differential operator, e.g. $H = -\frac{\hbar^2}{2m} \nabla_r^2$. Let's call the new operator on the left the ``Schroedinger operator''. Furthermore, we can write
\begin{eqn}
\int \dif^3{r} \, \psi(\v r, t) \ket{\v r} = U(t,t') \int \dif^3{r} \, \psi(\v r, t') \ket{\v r}.
\end{eqn}
Applying a position bra on the left, we have
\begin{align}
\psi(\v r, t) &= \bra{\v r} \int \dif^3{r'} \, \psi(\v r', t) \ket{\v r'} \\
&= \int \dif^3{r'} \, K(\v r, t; \v r', t') \psi(\v r, t').
\end{align}
Next, applying the Schroedinger operator, we have
\begin{align}
0 &= (i \hbar \partial_t - H) \psi(\v r, t) \\
&= \int \dif^3{r'} \left[ (i \hbar \partial_t - H) K(\v r, t; \v r', t') \right] \psi(\v r', t').
\end{align}
Since $\psi(\v r', t')$ is a completely arbitrary wavefunction, we find that in general the propagator itself obeys the Schroedinger equation,
\begin{eqn}
(i \hbar \partial_t - H) K(\v r, t; \v r', t') = 0.
\end{eqn}
This combined with the initial condition \eqref{eq:prop0} uniquely characterizes the propagator $K$.
\subsection{Causality, Retarded Propagator}
We can impose causality by defining the retarded propagator
\begin{eqn}
K^R (\v r, t; \v r', t') \equiv \theta(t-t') K(\v r, t; \v r', t'),
\end{eqn}
where $\theta(x)$ is the Heaviside step function. This just cuts off information about the past, which is really just redundant because we should always have symmetry under time reversal. What happens when we applying the Schroedinger operator to the retarded propagator? Everything goes away except for the time derivative acting on the step function,
\begin{align}
(i \hbar \partial_t - H) K^R(\v r, t; \v r', t') &= \left[ i \hbar \partial_t \theta(t-t') \right] K(\v r, t; \v r', t') \\
&= i \hbar \delta (t-t') \delta^3(\v r - \v r').
\end{align}
This uniquely defines the retarded propagator---note also that, due to the step function, the retarded propagator is zero for $t < t'$.
\subsection{Green's function}
Given a differential equation of the form
\begin{eqn}
L(\v x) f(\v x) = \delta(\v x - \v y)
\end{eqn}
with some boundary and/or initial conditions, where $L(\v x)$ is some operator and $\v y$ is an arbitrary point, we call $f(\v x)$ a Green's function of $L$. There is a lot of mathematical technology developed around Green's functions, so it is useful to consider the propagator as a Green's function of the Schroedinger operator. This formulation is very useful for more advanced applications like quantum field theory.
\subsection{Time-independent Hamiltonian}
Let's specialize to the case of time-independent $H$, i.e. we will have energy eigenkets $\ket{\alpha_i}$ with eigenvalues $E_i$,
\begin{eqn}
H \ket{\alpha_i} = E_i \ket{\alpha_i}.
\end{eqn}
Recall that in this eigenbasis we can write
\begin{eqn}
U(t-t') = \sum_i \bra{\alpha_i} \ket{\alpha_i} e^{-i E_i (t-t') / \hbar},
\end{eqn}
thus we find that
\begin{align}
K(\v r, t; \v r', t') &= \sum_i \underbrace{\braket{\v r}{\alpha_i}}_{\psi_i(\v r)} \braket{\alpha_i}{\v r'} e^{-i E_i (t-t') / \hbar} \\
&= \sum_i \psi_i (\v r) \psi_i^* (\v r') e^{-i E_i (t-t') / \hbar},
\end{align}
where we have
\begin{eqn}
H(\v r) \psi_i(\v r) = E_i \psi(\v r).
\end{eqn}
\begin{remark}
At the end of the day, we can think of the propagator as a wave function for a particle localized at $\v r'$ at time $t'$.
\end{remark}
Now, we can think of the propagator as as a trace over time,
\begin{align}
G(t-t') &= \tr U(t,t') \\
&= \int \dif^3 r \, K(\v r, t; \v r', t') \\
&= \sum_i e^{-i E_i (t-t') / \hbar},
\end{align}
since the eigenfunctions are all normalized. Note that here the trace is defined by
\begin{align}
\tr U(t,t') &= \sum_i \matrixel{\beta_i}{U(t,t')}{\beta_i} \\
&= \sum_i \int \dif^3{r} \braket{\beta_i}{\v r} \matrixel{\v r}{U}{\beta_i} \\
&= \int \dif^3{r} \matrixel{\v r}{U(t,t')}{\v r}.
\end{align}
Notice that our Green's function
\begin{eqn}
G(t) = \sum_i e^{-i E_i t / \hbar}
\end{eqn}
now is almost exactly the same as the partition function from statistical mechanics
\begin{eqn}
Z = \sum_i e^{-\beta E_i}.
\end{eqn}
This is no trivial remark---it turns out that much of what we do in statistical mechanics is directly applicable to quantum field theory, and vice versa. If we define an ``imaginary time'' $\tau = \hbar \beta$, the connection becomes clearer. We can analytically continue $G(t)$ onto the complex plane defined by $z = t - i \tau$. Then, we find that under the so-called ``Wick rotation'' $t \rightarrow i \tau$ we have
\begin{eqn}
G(-i \tau) = Z.
\end{eqn}
Of course we can also define a retarded Green's function
\begin{eqn}
G^R(t) = \theta(t) G(t),
\end{eqn}
which we can Fourier transform and write
\begin{eqn}
G^R(\omega) = -i \int_{-\infty}^\infty \dif{t} \, e^{i\omega t} G^R(t) = -i \int_0^\infty \dif{t} \, e^{i \omega t} G^R(t).
\end{eqn}
This generally ends up making further calculations intractible but we can ``regularize'' by taking $\omega \rightarrow \omega + i \eta$,
\begin{eqn}
G^R(\omega) \rightarrow -i \int_0^\infty e^{i \omega t - \eta t} G^R(t),
\end{eqn}
or in other words
\begin{eqn}
G^R(t) \rightarrow G^R(t) e^{-\eta t}.
\end{eqn}
Then we find that
\begin{eqn}
G^R(\omega) = -i \sum_i \int_0^\infty \dif{t} e^{i \omega t - \eta t - i E_i t / \hbar},
\end{eqn}
which we can evaluate using
\begin{eqn}
\int_0^\infty \dif{t} \, e^{-\alpha t} = 1 / \alpha, \qquad \text{where $\Re \alpha > 0$},
\end{eqn}
so that
\begin{eqn}
G^R(\omega) = \sum_i \frac{1}{\omega - E_i / \hbar + i \eta}.
\end{eqn}
So we have a function with poles in the complex plane at $E_i / \hbar - i \eta$, which we have pushed just below the real line, into the negative imaginary half-plane.
\end{document}
| {
"alphanum_fraction": 0.6650442478,
"avg_line_length": 40.843373494,
"ext": "tex",
"hexsha": "4b9e641bbed598dd9cb35307fa4cb5a38fb58501",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "48084dbbac2f8a4748c1fdaaf63a4cebaae16809",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "paulinearriaga/phys-ucla",
"max_forks_repo_path": "quantum/lec09.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "48084dbbac2f8a4748c1fdaaf63a4cebaae16809",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "paulinearriaga/phys-ucla",
"max_issues_repo_path": "quantum/lec09.tex",
"max_line_length": 421,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "48084dbbac2f8a4748c1fdaaf63a4cebaae16809",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "paulinearriaga/phys-ucla",
"max_stars_repo_path": "quantum/lec09.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2357,
"size": 6780
} |
\def\figtop{\rule{\textwidth}{0.5mm}}
\def\figbot{\rule{\textwidth}{0.5mm}}
\documentclass[11pt, oneside]{article} % use "amsart" instead of "article" for AMSLaTeX format
\usepackage{geometry} % See geometry.pdf to learn the layout options. There are lots.
\geometry{letterpaper} % ... or a4paper or a5paper or ...
%\geometry{landscape} % Activate for rotated page geometry
%\usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent
\usepackage{graphicx} % Use pdf, png, jpg, or eps§ with pdflatex; use eps in DVI mode
% TeX will automatically convert eps --> pdf in pdflatex
\usepackage{amssymb}
%SetFonts
%SetFonts
\title{Extending the Analysis: Supplement to ECF Chapter}
\author{Steven O. Kimbrough}
%\date{} % Activate to display a given date or no date
\begin{document}
\newcount\draft
\draft=1
\maketitle
%\section{}
%\subsection{}
%\section{Extending the Analysis\label{ecfsec:extending_the_analysis}}
%The basic analysis of the previous section examined the comparative quality of the strategies employed by role 1 players in a random realization of the game. It yielded an aliquot of insight, demonstrating that one player was optimal and one fared poorly in this single example. Comparison of the strategies employed by the several type 1 players suggests (in this one case of a random game) that indeed projects with high scores and high medians should be invested in, but that some spreading of resources among them is merited. But this is just one random game and one type or role of player.
We now investigate the generalization to many random games and all five types or roles of players. For each of the five types of players, we randomly generate 1,000 game setups, instead of just one as in the previous section. We then examine how the individual strategies for each type of player would fare in the 1,000 setups and we report summary statistics. We proceed by type of player in the subsections that follow.
To solve the best response problem with formulate as a Knapsack problem as in the previous section and use the Bang-for-Buck\index{Bang-for-Buck} heuristic, implemented in Python. %See Appendix \ref{ch:bang-for-buck} for illustration of the heuristic in Excel.
%\newpage
\begin{table}[h]
\figtop
\centering
\begin{tabular}{rrrrrrrrrrrrr}
Player & Type & P1 & P2 & P3 & P4 & P5 & P6 & P7 & P8 & P9 & P10 & Sum \\ \hline
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 30 & 20 & 20 & 10 & 80 \\
1 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 30 & 25 & 25 & 80 \\
2 & 1 & 0 & 0 & 0 & 0 & 0 & 25 & 20 & 20 & 10 & 5 & 80 \\
3 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 30 & 50 & 80 \\
\end{tabular}
\caption{Strategies of all players type 1 for ECF Game 1.}
\label{table:random_panel_game_1_type_1_players}
\figbot
\end{table}
\begin{table}[h]
\figtop
\vskip 3 pt
\centering
\begin{tabular}{rrrrrrrrrrr}
& P1 & P2 & P3 & P4 & P5 & P6 & P7 & P8 & P9 & P10 \\
\cline{2-11}
Player 1 & 5 & 15 & 25 & 35 & 45 & 55 & 65 & 75 & 85 & 95 \\
Player 2 & 95 & 5 & 15 & 25 & 35 & 45 & 55 & 65 & 75 & 85 \\
Player 3 & 85 & 95 & 5 & 15 & 25 & 35 & 45 & 55 & 65 & 75 \\
Player 4 & 75 & 85 & 95 & 5 & 15 & 25 & 35 & 45 & 55 & 65 \\
Player 5 & 65 & 75 & 85 & 95 & 5 & 15 & 25 & 35 & 45 & 55 \\
\cline{2-11}
Sums & 325 & 275 & 225 & 175 & 125 & 175 & 225 & 275 & 325 & 375
\end{tabular}
\caption{Data set 1 for ECF Game 1.}
\label{table:ecf_game_1_data}
\figbot
\end{table}
\subsection{Focal Player: Role 1}
Table \ref{table:random_panel_game_1_type_1_players_2} reproduces Table \ref{table:random_panel_game_1_type_1_players} with the addition of the project values for players in role 1 (presented in Table \ref{table:ecf_game_1_data}, page \pageref{table:ecf_game_1_data}). We see a general pattern of allocating thalers across the higher value projects, from player 1's perspective. Notice that player 3 is the most concentrated, dividing its thalers between projects P9 and P10. Player 2 is the most dispersed in its allocations.
\begin{table}[h]
\figtop
\centering
\begin{tabular}{rrrrrrrrrrrrr}
& Prefs: & 5 & 15 & 25 & 35 & 45 & 55 & 65 & 75 & 85 & 95 & \\ \hline
Player & Type & P1 & P2 & P3 & P4 & P5 & P6 & P7 & P8 & P9 & P10 & Sum \\ \hline
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 30 & 20 & 20 & 10 & 80 \\
1 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 30 & 25 & 25 & 80 \\
2 & 1 & 0 & 0 & 0 & 0 & 0 & 25 & 20 & 20 & 10 & 5 & 80 \\
3 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 30 & 50 & 80 \\
\end{tabular}
\caption{Strategies of all players type 1 for ECF Game 1 (after Table \ref{table:random_panel_game_1_type_1_players}).}
\label{table:random_panel_game_1_type_1_players_2}
\figbot
\end{table}
Looking now at Table \ref{table:summary_game1_role1} we can see how the strategies of the four role 1 players fared during 1,000 randomly generated ECF games. As in our simple example from the previous section, Bang-for-Buck,\index{Bang-for-Buck} the optimal strategy (although estimated by a heuristic) does by far the best overall. Also as in the previous case, the strategy of player 3 does far better than any of the other player 1 strategies. In fact, the four strategies can be ranked (intuitively) by the degree to which they focus their thalers on projects P9 and P10. The ranking is $3 > 1> 0 > 2$, and this is exactly the performance order of the mean.
\begin{table}[h]
\figtop
\centering
\begin{tabular}{ccccccccc}
player & count & mean & std & min & 25\%\ & 50\%\ & 75\%\ & max \\
0 & 1000.0 & 22.015 & 35.1 & 0.0 & 0.0 & 0.0 & 15.0 & 100.0 \\
1 & 1000.0 & 49.355 & 51.5 & 0.0 & 0.0 & 15.0 & 95.0 & 185.0 \\
2 & 1000.0 & 10.85 & 24.6 & 0.0 & 0.0 & 0.0 & 15.0 & 100.0 \\
3 & 1000.0 & 111.69 & 53.0 & 0.0 & 95.0 & 100.0 & 180.0 & 195.0 \\
BfB & 1000.0 & 163.825 & 36.0 & 90.0 & 115.0 & 185.0 & 185.0 & 255.0 \\ \end{tabular}
\caption{Summary statistics for player role 1 strategies in ECF game 1.}
\label{table:summary_game1_role1}
\figbot
\end{table}
\newpage\clearpage
\subsection{Focal Player: Role 2}
Table \ref{table:random_panel_game_1_type_2_players} presents the strategies chosen by the
15 type 2 (role 2) players in the game, along with the preference scores (project values) for each of the projects (from Table \ref{table:ecf_game_1_data}). The pattern in evidence is broadly that of the role 1 players, in Table \ref{table:random_panel_game_1_type_1_players_2}. Most of the thalers are allocated to high-value projects, but here there is considerably more spread in the disbursements.
\begin{table}[h]
\figtop
\centering
\begin{tabular}{rrrrrrrrrrrrr}
& Prefs: & 95 & 5 & 15 & 25 & 35 & 45 & 55 & 65 & 75 & 85 & \\\hline
Player & Type & P1 & P2 & P3 & P4 & P5 & P6 & P7 & P8 & P9 & P10 & Sum \\ \hline
4 & 2 & 20 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 20 & 20 & 80 \\
5 & 2 & 20 & 20 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 20 & 80 \\
6 & 2 & 15 & 20 & 0 & 0 & 0 & 0 & 0 & 0 & 30 & 15 & 80 \\
7 & 2 & 19 & 10 & 5 & 0 & 0 & 0 & 5 & 8 & 15 & 18 & 80 \\
8 & 2 & 34 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 12 & 34 & 80 \\
9 & 2 & 30 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 30 & 80 \\
10 & 2 & 30 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 25 & 25 & 80 \\
11 & 2 & 10 & 0 & 0 & 0 & 0 & 0 & 20 & 20 & 20 & 10 & 80 \\
12 & 2 & 30 & 1 & 0 & 0 & 0 & 0 & 0 & 4 & 20 & 25 & 80 \\
13 & 2 & 35 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 25 & 20 & 80 \\
14 & 2 & 30 & 0 & 0 & 0 & 0 & 0 & 0 & 10 & 20 & 20 & 80 \\
15 & 2 & 0 & 0 & 50 & 0 & 0 & 0 & 0 & 20 & 10 & 0 & 80 \\
16 & 2 & 30 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 30 & 80 \\
17 & 2 & 30 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 25 & 25 & 80 \\
18 & 2 & 16 & 0 & 0 & 0 & 8 & 8 & 8 & 8 & 16 & 16 & 80 \\
\end{tabular}
\caption{Strategies of all players type 2 for ECF Game 1.}
\label{table:random_panel_game_1_type_2_players}
\figbot
\end{table}
\clearpage\newpage
Table \ref{table:summary_game1_role2} tells the story of how the various strategies fared. Bang-for-Buck\index{Band-for-Buck} of course does comparatively well. As expected, it does about as well as it did in the role of player 1. None of the player 2 strategies, however come close to the play 3 strategy in role 1, even though the payoffs for role 2 are a one-step rotation of the payoffs for role 1. Notice that player 4 in Table \ref{table:summary_game1_role2} has the highest of the mean scores (71.45) among the players. This player corresponds to the 5th strategy in Table \ref{table:random_panel_game_1_type_2_players}, there labeled player 8. This strategy is arguably the most concentrated on the higher-value strategies and may be compared to
strategy 1 in Table \ref{table:random_panel_game_1_type_1_players_2}, whose mean is 49.36 in Table \ref{table:summary_game1_role1}. This suggests a structural difference in the game between the two roles.
\begin{table}[h]
\figtop
\centering
\begin{tabular}{ccccccccc}
player & count & mean & std & min & 25\%\ & 50\%\ & 75\%\ & max \\
0 & 1000.0 & 40.395 & 54.5 & 0.0 & 0.0 & 5.0 & 85.0 & 255.0 \\
1 & 1000.0 & 41.465 & 54.2 & 0.0 & 0.0 & 5.0 & 85.0 & 255.0 \\
2 & 1000.0 & 47.985 & 56.0 & 0.0 & 0.0 & 5.0 & 80.0 & 255.0 \\
3 & 1000.0 & 36.99 & 52.4 & 0.0 & 0.0 & 5.0 & 85.0 & 255.0 \\
4 & 1000.0 & 71.45 & 67.3 & 0.0 & 0.0 & 85.0 & 95.0 & 255.0 \\
5 & 1000.0 & 62.04 & 65.8 & 0.0 & 0.0 & 75.0 & 95.0 & 255.0 \\
6 & 1000.0 & 63.64 & 65.9 & 0.0 & 0.0 & 75.0 & 95.0 & 255.0 \\
7 & 1000.0 & 28.26 & 45.2 & 0.0 & 0.0 & 0.0 & 75.0 & 180.0 \\
8 & 1000.0 & 56.915 & 63.4 & 0.0 & 0.0 & 5.0 & 90.0 & 255.0 \\
9 & 1000.0 & 65.285 & 64.5 & 0.0 & 0.0 & 75.0 & 95.0 & 255.0 \\
10 & 1000.0 & 51.415 & 60.7 & 0.0 & 0.0 & 5.0 & 90.0 & 255.0 \\
11 & 1000.0 & 10.735 & 27.1 & 0.0 & 0.0 & 0.0 & 5.0 & 160.0 \\
12 & 1000.0 & 62.04 & 65.8 & 0.0 & 0.0 & 75.0 & 95.0 & 255.0 \\
13 & 1000.0 & 63.64 & 65.9 & 0.0 & 0.0 & 75.0 & 95.0 & 255.0 \\
14 & 1000.0 & 38.405 & 53.2 & 0.0 & 0.0 & 5.0 & 85.0 & 255.0 \\
BfB & 1000.0 & 173.765 & 52.4 & 80.0 & 160.0 & 170.0 & 185.0 & 260.0 \\
\end{tabular}
\caption{Summary statistics for player role 2 strategies in ECF game 1.}
\label{table:summary_game1_role2}
\figbot
\end{table}
\newpage\clearpage
\subsection{Focal Player: Role 3}
Table \ref{table:random_panel_game_1_type_3_players} presents the strategies chosen by the
12 type 3 (role 3) players in the game, along with the preference scores (project values) for each of the projects (from Table \ref{table:ecf_game_1_data}). The pattern in evidence is broadly that of the role 1 players, in Table \ref{table:random_panel_game_1_type_1_players_2}. Most of the thalers are allocated to high-value projects, but here there is considerably more spread in the disbursements.
\begin{table}[h]
\figtop
\centering
\begin{tabular}{rrrrrrrrrrrrr}
& Prefs: & 85 & 95 & 5 & 15 & 25 & 35 & 45 & 55 & 65 & 75 & \\ \hline
Player & Type & P1 & P2 & P3 & P4 & P5 & P6 & P7 & P8 & P9 & P10 & Sum \\ \hline
19 & 3 & 20 & 20 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 20 & 80 \\
20 & 3 & 32 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 24 & 24 & 80 \\
21 & 3 & 30 & 30 & 0 & 0 & 0 & 0 & 0 & 0 & 10 & 10 & 80 \\
22 & 3 & 25 & 40 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 15 & 80 \\
23 & 3 & 20 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 20 & 20 & 80 \\
24 & 3 & 30 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 30 & 80 \\
25 & 3 & 20 & 25 & 0 & 0 & 0 & 0 & 5 & 5 & 10 & 15 & 80 \\
26 & 3 & 30 & 50 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 80 \\
27 & 3 & 24 & 25 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 24 & 80 \\
28 & 3 & 20 & 20 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 20 & 80 \\
29 & 3 & 35 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 25 & 20 & 80 \\
30 & 3 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 30 & 30 & 80 \\ \end{tabular}
\caption{Strategies of all players type 3 for ECF Game 1.}
\label{table:random_panel_game_1_type_3_players}
\figbot
\end{table}
\clearpage\newpage
Table \ref{table:summary_game1_role3} tells the story of how the various strategies fared. Bang-for-Buck,\index{Bang-for-Buck} of course, does comparatively well. As expected, it does about as well as it did in the roles of players 1 and 2. As in the case of player 2, none of the player 3 strategies come close to the player 3 strategy in role 1, even though the payoffs for role 3 are a two-step rotation of the payoffs for role 1. Notice that player 7 in Table \ref{table:summary_game1_role3} has the highest of the mean scores (74.45) among the players. This player corresponds to the 8th strategy in Table \ref{table:random_panel_game_1_type_3_players}, there labeled player ID 26. This strategy is arguably the most concentrated on the higher-value strategies and may be compared to
strategy 1 in Table \ref{table:random_panel_game_1_type_1_players_2}, whose mean is 49.36 in Table \ref{table:summary_game1_role1}. This suggests a structural difference in the game between the two roles.
\begin{table}[h]
\figtop
\centering
\begin{tabular}{ccccccccc}
player & count & mean & std & min & 25\%\ & 50\%\ & 75\%\ & max \\
0 & 1000.00 & 59.40 & 58.84 & 0.00 & 0.00 & 65.00 & 95.00 & 235.00 \\
1 & 1000.00 & 72.33 & 64.43 & 0.00 & 0.00 & 65.00 & 140.00 & 235.00 \\
2 & 1000.00 & 59.90 & 56.63 & 0.00 & 0.00 & 75.00 & 95.00 & 255.00 \\
3 & 1000.00 & 68.04 & 57.44 & 0.00 & 0.00 & 85.00 & 95.00 & 255.00 \\
4 & 1000.00 & 45.81 & 53.72 & 0.00 & 0.00 & 0.00 & 75.00 & 235.00 \\
5 & 1000.00 & 64.50 & 62.40 & 0.00 & 0.00 & 65.00 & 95.00 & 235.00 \\
6 & 1000.00 & 52.86 & 55.12 & 0.00 & 0.00 & 65.00 & 95.00 & 235.00 \\
7 & 1000.00 & 74.45 & 54.12 & 0.00 & 0.00 & 95.00 & 95.00 & 255.00 \\
8 & 1000.00 & 62.50 & 59.45 & 0.00 & 0.00 & 75.00 & 95.00 & 235.00 \\
9 & 1000.00 & 59.40 & 58.84 & 0.00 & 0.00 & 65.00 & 95.00 & 235.00 \\
10 & 1000.00 & 68.47 & 62.66 & 0.00 & 0.00 & 65.00 & 95.00 & 235.00 \\
11 & 1000.00 & 61.83 & 52.77 & 0.00 & 0.00 & 65.00 & 75.00 & 235.00 \\
BfB & 1000.00 & 188.93 & 47.78 & 65.00 & 160.00 & 180.00 & 235.00 & 320.00 \\
\end{tabular}
\caption{Summary statistics: player role 3 strategies in ECF game 1.}
\label{table:summary_game1_role3}
\figbot
\end{table}
\newpage\clearpage
\subsection{Focal Player: Role 4}
Table \ref{table:random_panel_game_1_type_4_players} presents the strategies chosen by the
16 type 4 (role 4) players in the game, along with the preference scores (project values) for each of the projects (from Table \ref{table:ecf_game_1_data}). The pattern in evidence is broadly that of the role 1 players, in Table \ref{table:random_panel_game_1_type_1_players_2}. Most of the thalers are allocated to high-value projects, but here there is considerably more spread in the disbursements.
\begin{table}[h]
\figtop
\centering
\begin{tabular}{rrrrrrrrrrrrr}
& Prefs: & 75 & 85 & 95 & 5 & 15 & 25 & 35 & 45 & 55 & 65 & \\ \hline
Player & Type & P1 & P2 & P3 & P4 & P5 & P6 & P7 & P8 & P9 & P10 & Sum \\ \hline
31 & 4 & 10 & 50 & 0 & 0 & 0 & 0 & 0 & 0 & 10 & 10 & 80 \\
32 & 4 & 15 & 50 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 15 & 80 \\
33 & 4 & 45 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 35 & 80 \\
34 & 4 & 35 & 1 & 0 & 0 & 1 & 1 & 1 & 1 & 10 & 30 & 80 \\
35 & 4 & 20 & 8 & 5 & 0 & 0 & 0 & 5 & 7 & 15 & 20 & 80 \\
36 & 4 & 25 & 0 & 0 & 0 & 0 & 0 & 0 & 15 & 20 & 20 & 80 \\
37 & 4 & 20 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 20 & 20 & 80 \\
38 & 4 & 20 & 20 & 20 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 80 \\
39 & 4 & 30 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 10 & 20 & 80 \\
40 & 4 & 15 & 40 & 0 & 0 & 0 & 0 & 0 & 0 & 10 & 15 & 80 \\
41 & 4 & 35 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 35 & 10 & 80 \\
42 & 4 & 0 & 0 & 40 & 0 & 0 & 0 & 0 & 0 & 20 & 20 & 80 \\
43 & 4 & 20 & 30 & 0 & 0 & 0 & 0 & 0 & 10 & 5 & 15 & 80 \\
44 & 4 & 40 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 40 & 80 \\
45 & 4 & 20 & 20 & 20 & 0 & 0 & 0 & 0 & 0 & 0 & 20 & 80 \\
46 & 4 & 15 & 15 & 30 & 0 & 0 & 0 & 0 & 0 & 10 & 10 & 80 \\
\end{tabular}
\caption{Strategies of all players type 4 for ECF Game 1.}
\label{table:random_panel_game_1_type_4_players}
\figbot
\end{table}
\newpage\clearpage
Table \ref{table:summary_game1_role4} tells the story of how the various strategies fared. Bang-for-Buck,\index{Bang-for-Buck} of course, does comparatively well. As expected, it does about as well as it did in the roles of players 1 and 2. As in the case of player 2, none of the player 3 strategies come close to the player 3 strategy in role 1, even though the payoffs for role 3 are a two-step rotation of the payoffs for role 1. Notice that player 7 in Table \ref{table:summary_game1_role3} has the highest of the mean scores (74.45) among the players. This player corresponds to the 8th strategy in Table \ref{table:random_panel_game_1_type_3_players}, there labeled player ID 26. This strategy is arguably the most concentrated on the higher-value strategies and may be compared to
strategy 1 in Table \ref{table:random_panel_game_1_type_1_players_2}, whose mean is 49.36 in Table \ref{table:summary_game1_role1}. This suggests a structural difference in the game between the two roles.
\begin{table}[h]
\figtop
\centering
\begin{tabular}{ccccccccc}
player & count & mean & std & min & 25\%\ & 50\%\ & 75\%\ & max \\
0 & 1000.00 & 72.05 & 48.20 & 0.00 & 55.00 & 85.00 & 85.00 & 205.00 \\
1 & 1000.00 & 68.47 & 48.00 & 0.00 & 0.00 & 85.00 & 85.00 & 205.00 \\
2 & 1000.00 & 75.08 & 54.88 & 0.00 & 0.00 & 75.00 & 140.00 & 225.00 \\
3 & 1000.00 & 60.91 & 55.48 & 0.00 & 0.00 & 65.00 & 85.00 & 225.00 \\
4 & 1000.00 & 40.06 & 46.67 & 0.00 & 0.00 & 0.00 & 65.00 & 205.00 \\
5 & 1000.00 & 42.53 & 47.43 & 0.00 & 0.00 & 55.00 & 65.00 & 225.00 \\
6 & 1000.00 & 38.78 & 44.55 & 0.00 & 0.00 & 0.00 & 65.00 & 150.00 \\
7 & 1000.00 & 36.91 & 45.87 & 0.00 & 0.00 & 0.00 & 65.00 & 205.00 \\
8 & 1000.00 & 38.91 & 48.77 & 0.00 & 0.00 & 0.00 & 65.00 & 225.00 \\
9 & 1000.00 & 65.16 & 51.38 & 0.00 & 0.00 & 85.00 & 85.00 & 205.00 \\
10 & 1000.00 & 68.16 & 47.55 & 0.00 & 55.00 & 55.00 & 75.00 & 195.00 \\
11 & 1000.00 & 35.71 & 42.52 & 0.00 & 0.00 & 0.00 & 65.00 & 150.00 \\
12 & 1000.00 & 52.85 & 49.88 & 0.00 & 0.00 & 65.00 & 85.00 & 225.00 \\
13 & 1000.00 & 69.36 & 54.64 & 0.00 & 0.00 & 65.00 & 140.00 & 225.00 \\
14 & 1000.00 & 36.91 & 45.87 & 0.00 & 0.00 & 0.00 & 65.00 & 205.00 \\
15 & 1000.00 & 33.59 & 43.58 & 0.00 & 0.00 & 0.00 & 65.00 & 150.00 \\
BfB & 1000.00 & 173.85 & 38.40 & 75.00 & 140.00 & 160.00 & 205.00 & 280.00 \\
\end{tabular}
\caption{Summary statistics: player role 4 strategies in ECF game 1.}
\label{table:summary_game1_role4}
\figbot
\end{table}
\newpage\clearpage
\subsection{Focal Player: Role 5}
Table \ref{table:random_panel_game_1_type_5_players} presents the strategies chosen by the
3 type 5 (role 5) players in the game, along with the preference scores (project values) for each of the projects (from Table \ref{table:ecf_game_1_data}). The pattern in evidence is only very broadly that of the role 1 players, in Table \ref{table:random_panel_game_1_type_1_players_2}.
The highest value projects for role 5 are P2, P3, P4, but there is no investment at all in the two highest value projects P3 and P4. Instead the players focus on the two ends of the project list, P1--P2 and P9--P10.
\begin{table}[h]
\figtop
\centering
\begin{tabular}{rrrrrrrrrrrrr}
& Prefs: & 65 & 75 & 85 & 95 & 5 & 15 & 25 & 35 & 45 & 55 \\ \hline
Player & Type & P1 & P2 & P3 & P4 & P5 & P6 & P7 & P8 & P9 & P10 & Sum \\ \hline
47 & 5 & 10 & 30 & 0 & 0 & 0 & 0 & 0 & 10 & 20 & 10 & 80 \\
48 & 5 & 20 & 50 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 10 & 80 \\
49 & 5 & 0 & 50 & 0 & 0 & 0 & 0 & 0 & 0 & 30 & 0 & 80 \\
\end{tabular}
\caption{Strategies of all players type 5 for ECF Game 1.}
\label{table:random_panel_game_1_type_5_players}
\figbot
\end{table}
Table \ref{table:summary_game1_role5} tells the story of how the various strategies fared. Bang-for-Buck,\index{Bang-for-Buck} of course, does well. Because no player really focused on its high-value projects all players do comparatively poorly.
% As expected, it does about as well as it did in the roles of players 1 and 2. As in the case of player 2, none of the player 3 strategies come close to the player 3 strategy in role 1, even though the payoffs for role 3 are a two-step rotation of the payoffs for role 1. Notice that player 7 in Table \ref{table:summary_game1_role3} has the highest of the mean scores (74.45) among the players. This player corresponds to the 8th strategy in Table \ref{table:random_panel_game_1_type_3_players}, there labeled player ID 26. This strategy is arguably the most concentrated on the higher-value strategies and may be compared to
%strategy 1 in Table \ref{table:random_panel_game_1_type_1_players_2}, whose mean is 49.36 in Table \ref{table:summary_game1_role1}. This suggests a structural difference in the game between the two roles.
\begin{table}[h]
\figtop
\centering
\begin{tabular}{ccccccccc}
player & count & mean & std & min & 25\%\ & 50\%\ & 75\%\ & max \\
0 & 1000.00 & 38.06 & 43.31 & 0.00 & 0.00 & 22.50 & 65.00 & 165.00 \\
1 & 1000.00 & 49.88 & 45.81 & 0.00 & 0.00 & 55.00 & 75.00 & 195.00 \\
2 & 1000.00 & 45.59 & 41.53 & 0.00 & 0.00 & 45.00 & 75.00 & 195.00 \\
BfB & 1000.00 & 148.32 & 30.54 & 55.00 & 120.00 & 165.00 & 165.00 & 240.00 \\
\end{tabular}
\caption{Summary statistics: player role 5 strategies in ECF game 1.}
\label{table:summary_game1_role5}
\figbot
\end{table}
\newpage\clearpage
\ifnum\draft=1
\vfill
\noindent File: Extending\_the\_analysis.tex
\fi
\end{document} | {
"alphanum_fraction": 0.6283007364,
"avg_line_length": 57.0080213904,
"ext": "tex",
"hexsha": "48bd9cd577b0809a45bf1559f2ebca36bf2911e3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "78363a0548c3c86a3d424f8b13f16b045261c934",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "stevenokimbrough/DecisionGames",
"max_forks_repo_path": "ECF Chapter/Exending_the_Analysis.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "78363a0548c3c86a3d424f8b13f16b045261c934",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "stevenokimbrough/DecisionGames",
"max_issues_repo_path": "ECF Chapter/Exending_the_Analysis.tex",
"max_line_length": 791,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "78363a0548c3c86a3d424f8b13f16b045261c934",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "stevenokimbrough/DecisionGames",
"max_stars_repo_path": "ECF Chapter/Exending_the_Analysis.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8882,
"size": 21321
} |
\section{Trust Transitivity}
In this section we define some strategies and show the corresponding algorithms. Then we define the Transitive Game, the
worst-case scenario for an honest player when another player plays maliciously.
\subimport{common/definitions/}{idlestrategy.tex}
\subimport{common/algorithms/}{idlestrategycode.tex}
The inputs and outputs are identical to those of \texttt{idleStrategy()} for the rest of the strategies, thus we avoid
repeating them.
\subimport{common/definitions/}{evilstrategy.tex}
\subimport{common/algorithms/}{evilstrategycode.tex}
\subimport{common/definitions/}{conservativestrategy.tex}
\subimport{fc17/algorithms/}{conservativestrategycode.tex}
\texttt{SelectSteal()} returns $y_v$ with $v \in N^{-}\left(A\right)_{j-1}$ such that
\begin{equation}
\label{stealrestriction}
\sum\limits_{v \in N^{-}\left(A\right)_{j-1}}y_v = Dmg_{A, j} \enspace \wedge \enspace \forall v \in N^{-}\left(A\right)_{j-1},
y_v \leq DTr_{v \rightarrow A, j-1} \enspace.
\end{equation}
Player $A$ can arbitrarily define how \texttt{SelectSteal()} distributes the $Steal\left(\right)$ actions
each time she calls the function, as long as (\ref{stealrestriction}) is respected.
The rationale behind this strategy arises from a real-world common situation. Suppose there are a client, an
intermediary and a producer. The client entrusts some value to the intermediary so that the latter can buy the desired
product from the producer and deliver it to the client. The intermediary in turn entrusts an equal value to the
producer, who needs the value upfront to be able to complete the production process. However the producer eventually
does not give the product neither reimburses the value, due to bankruptcy or decision to exit the market with an unfair
benefit. The intermediary can choose either to reimburse the client and suffer the loss, or refuse to return the money
and lose the client's trust. The latter choice for the intermediary is exactly the conservative strategy. It is used
throughout this work as a strategy for all the intermediary players because it models effectively the worst-case
scenario that a client can face after an evil player decides to steal everything she can and the rest of the players do
not engage in evil activity.
We continue with a possible evolution of the game, the Transitive Game.
\subimport{fc17/algorithms/}{transitivegame.tex}
\subimport{common/figures/}{transitivegameexample.tikz}
\noindent In turn 0, there is already a network in place. All players apart from $A$ and $B$ follow the conservative
strategy. The set of players is not modified throughout the Transitive Game, thus we can refer to $\mathcal{V}_j$ as
$\mathcal{V}$. Each conservative player can be in one of three states: Happy, Angry or Sad. Happy players have 0 loss, Angry
players have positive loss and positive incoming direct trust (line~\ref{trstealaddwangry}), thus are able to replenish
their loss at least in part and Sad players have positive loss, but 0 incoming direct trust (line~\ref{trstealaddwsad}),
thus they cannot replenish the loss. An example execution can be seen in Fig.~\ref{fig:transitivegame}.
Let $j_0$ be the first turn on which $B$ is chosen to play. Until then, all players will pass their turn since nothing has
been stolen yet (see \ifdefined\proceedings the Conservative World theorem in Appendix A of the full version
\cite{trustisrisk}\else Appendix A (Theorem~\ref{conservativeworld})\fi). Moreover, let $v = Player(j)$. The Transitive Game
generates turns:
\begin{align}
Turn_j = \bigcup\limits_{w \in N^{-}\left(v\right)_{j-1}}\{Steal\left(y_w,w\right)\} \enspace, & \mbox{ where} \\
\sum\limits_{w \in N^{-}\left(v\right)_{j-1}}y_w = \min\left(in_{v, j-1}, Dmg_{v, j}\right) \enspace. &
\end{align}
\noindent We see that if $Dmg_{v, j} = 0$, then $Turn_j = \emptyset$. From the definition of $Dmg_{v,j}$ and knowing that no
strategy in this case can increase any direct trust, we see that $Dmg_{v,j} \geq 0$. Also $Loss_{v,j} \geq 0$ because
if $Loss_{v,j} < 0$, then $v$ has stolen more value than she has been stolen, thus she would not be following the
conservative strategy.
| {
"alphanum_fraction": 0.7514640431,
"avg_line_length": 69.9836065574,
"ext": "tex",
"hexsha": "683ee958eac11f020682529e0c87b3f09c476acf",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2017-08-28T06:32:33.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-03-07T10:49:58.000Z",
"max_forks_repo_head_hexsha": "60f65bff00041e7e940491913bd4ca3f11bf22d9",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "dionyziz/DecentralizedTrust",
"max_forks_repo_path": "fc17/transitivity.tex",
"max_issues_count": 9,
"max_issues_repo_head_hexsha": "60f65bff00041e7e940491913bd4ca3f11bf22d9",
"max_issues_repo_issues_event_max_datetime": "2017-07-31T14:42:20.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-03-07T12:25:26.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "dionyziz/DecentralizedTrust",
"max_issues_repo_path": "fc17/transitivity.tex",
"max_line_length": 132,
"max_stars_count": 25,
"max_stars_repo_head_hexsha": "60f65bff00041e7e940491913bd4ca3f11bf22d9",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "dionyziz/DecentralizedTrust",
"max_stars_repo_path": "fc17/transitivity.tex",
"max_stars_repo_stars_event_max_datetime": "2021-04-01T14:07:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-03-15T14:33:06.000Z",
"num_tokens": 1161,
"size": 4269
} |
\chapter{Results}
\label{Results}
\section{Experiment Description}
To demonstrate our results, we plotted results from one standardized run of multiple division configurations trained over 2'000 rounds of 10'000 hands each, for a total of 20'000'000 hands.
All divisions included one random and one call agent as baselines, and all divisions cloned a new generation of teachers every 200 rounds, for a total of 90 agents over the whole run. Table \ref{RunDivisions} shows the different training division configurations.
\begin{table}[h!]
\centering
\begin{tabular}{|| c | c | c ||}
\hline
Name & Matchup Method & Agents \\ [0.5ex]
\hline\hline
Qln8-Cl & Climbing & Qlearn-8 \\
QlnA-Cl & Climbing & Qlearn-All \\
SacL-Cl & Climbing & Sac-Low \\
SacH-Cl & Climbing & Sac-High \\
AllAg-Cl & Climbing & Qlearn-8, Qlearn-All, Sac-Low, Sac-High \\
QlnA-Rn & Random & Qlearn-All \\ [1ex]
\hline
\end{tabular}
\caption{Training division configurations used in the standardized run}
\label{RunDivisions}
\end{table}
We trained on a computer with an Intel 6770k CPU, 16GB Ram, and an Nvidia 1070GTX GPU with 8GB VRAM. The training process took about 44 hours in real-time.
After training, we ran three PermaEval divisions to evaluate all agents according to the various metrics: one for all the winnings-based metrics, one for across-division TrueSkill, and a second TrueSkill to evaluate its consistency.
We computed winnings with 10'000 hands per agent pairing of all 102 agents, for a total of 52'020'000 hands. For fairness of comparison, TrueSkill was evaluated for 5'300 rounds, which is approximately the same number of hands.
\section{Performance Metrics}
First, we compare different metrics in consistency and strategy clustering, and take a closer look at TrueSkill.
\subsection{Leaderboards}
\begin{table}[H]
\centering
\subcaptionbox{Sorted by TrueSkill (TS)}{
\begin{tabular}{|| c | c | c | c | c ||}
\hline
Name & TS & Mean & Med & 20-Pctl \\ [0.5ex]
\hline\hline
exam & 151.04 & 3.26 & 2.35 & 0.21 \\
radar & 148.97 & 2.79 & 2.02 & 0.14 \\
loss & 146.49 & 3.95 & 2.76 & 0.33 \\
tan & 143.41 & 4.47 & 2.55 & 0.19 \\
brick & 142.55 & 5.13 & 2.65 & 0.35 \\
clam & 142.43 & 4.67 & 2.81 & 0.15 \\
seeker & 141.14 & 6.16 & 2.98 & 0.33 \\
wisdom & 140.39 & 1.47 & 0.94 & 0.06 \\
suitcase & 138.85 & 4.81 & 2.45 & 0.33 \\
flight & 138.73 & 4.58 & 2.53 & 0.23 \\ [1ex]
\hline
\end{tabular}
}
\subcaptionbox{Sorted by Mean
}{
\begin{tabular}{|| c | c | c | c | c ||}
\hline
Name & TS & Mean & Med & 20-Pctl \\ [0.5ex]
\hline\hline
orange & 129.16 & 6.79 & 1.32 & -0.07 \\
watt & 133.09 & 6.59 & 2.24 & 0.23 \\
seeker & 141.14 & 6.16 & 2.98 & 0.33 \\
ecclesia & 124.79 & 6.15 & 1.42 & 0.00 \\
config & 122.43 & 6.06 & 0.89 & -1.20 \\
power & 132.34 & 6.04 & 1.58 & 0.00 \\
creator & 134.82 & 5.91 & 2.59 & 0.03 \\
union & 129.96 & 5.65 & 1.50 & 0.01 \\
cowbell & 118.01 & 5.50 & 0.88 & -2.32 \\
yarn & 119.79 & 5.50 & 1.14 & -1.60 \\ [1ex]
\hline
\end{tabular}
}
\subcaptionbox{Sorted by Median (Med)
}{
\begin{tabular}{|| c | c | c | c | c ||}
\hline
Name & TS & Mean & Med & 20-Pctl \\ [0.5ex]
\hline\hline
seeker & 141.14 & 6.16 & 2.98 & 0.33 \\
clam & 142.43 & 4.67 & 2.81 & 0.15 \\
penguin & 135.08 & 5.22 & 2.76 & 0.00 \\
loss & 146.49 & 3.95 & 2.76 & 0.33 \\
brick & 142.55 & 5.13 & 2.65 & 0.35 \\
creator & 134.82 & 5.91 & 2.59 & 0.03 \\
tan & 143.41 & 4.47 & 2.55 & 0.19 \\
flight & 138.73 & 4.58 & 2.53 & 0.23 \\
suitcase & 138.85 & 4.81 & 2.45 & 0.33 \\
exam & 151.04 & 3.26 & 2.35 & 0.21 \\[1ex]
\hline
\end{tabular}
}
\subcaptionbox{Sorted by 20-Percentile (20-Pctl)
}{
\begin{tabular}{|| c | c | c | c | c ||}
\hline
Name & TS & Mean & Med & 20-Pctl \\ [0.5ex]
\hline\hline
brick & 142.55 & 5.13 & 2.65 & 0.35 \\
suitcase & 138.85 & 4.81 & 2.45 & 0.33 \\
loss & 146.49 & 3.95 & 2.76 & 0.33 \\
seeker & 141.14 & 6.16 & 2.98 & 0.33 \\
watt & 133.09 & 6.59 & 2.24 & 0.23 \\
flight & 138.73 & 4.58 & 2.53 & 0.23 \\
exam & 151.04 & 3.26 & 2.35 & 0.21 \\
tan & 143.41 & 4.47 & 2.55 & 0.19 \\
rain & 135.02 & 4.20 & 1.01 & 0.17 \\
clam & 142.43 & 4.67 & 2.81 & 0.15 \\[1ex]
\hline
\end{tabular}
}
\caption{Top 10 agents on leaderboard sorted by each metric}
\label{TableTopAgentsMetrics}
\end{table}
We can see in Table \ref{TableTopAgentsMetrics} that the four metrics we used disagree on which agents were best. There are a handful of agents (e.g. \textit{seeker}, \textit{exam}, \textit{brick}, or \textit{suitcase}) that show up in most leaderboards near the top, and it is from these agents that we selected candidates for the AIcrowd competition (specifically, \textit{seeker} was our final agent), but overall there is substantial disagreement.
As one might expect, mean diverges from the other metrics most. We believe this is because mean does not reward consistency, e.g. losing a bit often and winning big sometimes is a good strategy for mean optimization. All three other metrics discourage it, and reward worst-case consistency.
\begin{figure}[H]
\centering
\subcaptionbox{
Colored by TrueSkill
}{
\includegraphics[width=0.45\linewidth]{Results/figures/aggtightrankTrueSkill.png}
}
\subcaptionbox{
Colored by Mean
}{
\includegraphics[width=0.45\linewidth]{Results/figures/aggtightrankMean.png}
}
\subcaptionbox{
Colored by Median
}{
\includegraphics[width=0.45\linewidth]{Results/figures/aggtightrankMedian.png}
}
\subcaptionbox{
Colored by 20-Percentile
}{
\includegraphics[width=0.45\linewidth]{Results/figures/aggtightrank20-Percentile.png}
}
\caption{Aggression and Tightness colored by rank for each metric, blue is better (normalized)}
\label{AggTightRank}
\end{figure}
However, this difference is not visible in the strategy plots seen in Figure \ref{AggTightRank}. Instead here it is TrueSkill that is an outlier, as it ranks tight and passive agents as worse overall than all others, though mean also considers them worse than the other two.
This would lead to the hypothesis that tight and passive players, which we will see later (section \ref{ResultsAgents}) are mostly SAC agents, play consistently ok in their worst games (decent 20pctl and median), but rarely take advantage of good games (low mean), and also rarely win matches (low TrueSkill). This is consistent with the definition of passivity and tightness, though one would expect the aggressive and loose players to have lower 20pctl scores, which we do not observe.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\linewidth]{Results/figures/upsets_per_metric.png}
\caption{Percent 1v1 evaluations whose outcome matches what the metric difference predicts}
\label{UpsetsPlot}
\end{figure}
Figure \ref{UpsetsPlot} demonstrates how good these metrics are at predicting the winner in a 3v3 evaluation. We can see that TrueSkill as the only metric specifically optimizing for win probability performs best, closely followed by median.
\subsection{TrueSkill Coherence}
\begin{figure}[H]
\centering
\subcaptionbox{
First PermaEval division TrueSkill
}{
\includegraphics[width=0.85\linewidth]{Results/figures/trueskill1.png}
}
\subcaptionbox{
Second PermaEval division TrueSkill
}{
\includegraphics[width=0.85\linewidth]{Results/figures/trueskill2.png}
}
\caption{TrueSkill over time in both PermaEval divisions}
\label{TrueSkillCompare}
\end{figure}
TrueSkill did not converge quickly or well in preliminary experiments, so to establish that it is at least consistent we computed the full ranking twice. Both runs with the changing TrueSkill estimates can be seen in Figure \ref{TrueSkillCompare}.
While the broad tendencies stabilize quickly, we still observe agents swapping or even completely changing their skill rating by as much as 40 points after the first 500 rounds, despite all agents being static and always playing at the same skill level. This does not bode well for convergence.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\linewidth]{Results/figures/trueskill_comparison.png}
\caption{Agent rank in both TrueSkill PermaEval divisions, colored by type}
\label{TrueSkillCompare2}
\end{figure}
Figure \ref{TrueSkillCompare2} shows the rank of all agents as estimated by the two runs of TrueSkills. Were TrueSkill perfectly consistent, this would be a straight line. Instead we see a mystifying result of it being dependent on agent types; SAC agents appear to play inconsistently when judged by TrueSkill (i.e. ranking after matches), but other agents are consistent, if not perfectly so. This seems less an artifact of TrueSkill and more an issue with SAC agents themselves.
\section{Agents}
\label{ResultsAgents}
How do the different agent architectures compare?
\begin{figure}[H]
\centering
\includegraphics[width=0.8\linewidth]{Results/figures/traditional_scatterplot_Type.png}
\caption{Aggression and Tightness, colored by agent type}
\label{AggTightAgentType}
\end{figure}
Agent architectures appear paramount for their strategy. Figure \ref{AggTightAgentType} shows results from multiple divisions (i.e. different seeds and different opponents), which does not prevent a remarkable convergence by agent type.
We see that Qlearn-All almost never calls, and either folds or raises, leaning towards more raises. Qlearn-8 shares a similar tendency but not nearly as strong (the scale of \textit{Distorted Aggression} is hyperbolic towards the top). We believe this is because of implementation details of exploration, more in section \ref{ConclusionAgents}.
In contrast, SAC agents play aggressive only when tight, or loose when passive. This is more "sensible" by common sense (if you call less, you fold more), but appears to perform worse in our leagues.
\begin{table}[H]
\centering
\begin{tabular}{|| c | c | c | c | c ||}
\hline
Rank & TrueSkill & Mean & Median & 20-Percentile \\ [0.5ex]
\hline\hline
1 & Qlearn-8 & Qlearn-All & Qlearn-All & Qlearn-All \\
2 & Qlearn-All & Qlearn-All & Qlearn-8 & Qlearn-All \\
3 & Qlearn-8 & Qlearn-All & Qlearn-8 & Qlearn-8 \\
4 & Qlearn-8 & Qlearn-All & Qlearn-8 & Qlearn-8 \\
5 & Qlearn-All & Qlearn-All & Qlearn-All & Qlearn-All \\
6 & Qlearn-8 & Qlearn-All & Qlearn-All & Qlearn-8 \\
7 & Qlearn-All & Qlearn-All & Qlearn-8 & Qlearn-8 \\
8 & Qlearn-All & Qlearn-All & Qlearn-8 & Qlearn-8 \\
9 & Qlearn-8 & Qlearn-All & Qlearn-8 & Qlearn-All \\
10 & Qlearn-8 & Qlearn-All & Qlearn-8 & Qlearn-8 \\
11 & Qlearn-8 & Qlearn-All & Qlearn-All & Qlearn-All \\
12 & Qlearn-All & Qlearn-All & Qlearn-All & Qlearn-All \\
13 & Qlearn-All & Qlearn-8 & Qlearn-All & Qlearn-8 \\
14 & Qlearn-All & Qlearn-All & Qlearn-8 & Qlearn-All \\
15 & Qlearn-8 & Qlearn-8 & Qlearn-All & Qlearn-All \\
16 & Qlearn-All & Qlearn-8 & Qlearn-8 & Qlearn-8 \\
17 & Qlearn-All & Qlearn-8 & Qlearn-All & Qlearn-All \\
18 & Qlearn-8 & Qlearn-8 & Qlearn-All & Qlearn-All \\
19 & Qlearn-All & Qlearn-8 & Qlearn-All & Qlearn-All \\
20 & Qlearn-All & Qlearn-8 & Qlearn-All & Qlearn-All \\[1ex]
\hline
\end{tabular}
\caption{Agent type of the top 20 agents according to each metric}
\label{TypeRankings}
\end{table}
Looking at Table \ref{TypeRankings} the first immediate observation is that no architecture other than the two Qlearns was able to compete.
The second is that while Qlearn-8 and Qlearn-All seem approximately evenly matched in most metrics, in mean Qlearn-All is dominant, earning more money on average despite not winning more often. This is consistent with Qlearn-All's higher aggression.
\begin{figure}[H]
\centering
\subcaptionbox{
Ranked by TrueSkill
}{
\includegraphics[width=0.45\linewidth]{Results/figures/agentdistTrueSkill.png}
}
\subcaptionbox{
Ranked by Mean
}{
\includegraphics[width=0.45\linewidth]{Results/figures/agentdistMean.png}
}
\subcaptionbox{
Ranked by Median
}{
\includegraphics[width=0.45\linewidth]{Results/figures/agentdistMedian.png}
}
\subcaptionbox{
Ranked by 20-Percentile
}{
\includegraphics[width=0.45\linewidth]{Results/figures/agentdist20-Percentile.png}
}
\caption{Agent distribution by type and rank}
\label{AgentTypeDistribution}
\end{figure}
Figure \ref{AgentTypeDistribution} shows how the relative performance of SAC agents differs depending on metrics; ranked by mean or median they outperform call agents, but ranked by TrueSkill only the best of them do.
We can also see that 20pctl is left-skewed, with the best agents being closer to the norm than the worst agents. This makes it difficult to compare the best half, but it allows some insight in the worst; we see that Qlearn-All is surprisingly good, even with its worst untrained agents, in its worst 20\% of matches, despite extreme aggression.
\begin{figure}[H]
\centering
\subcaptionbox{
Ranked by TrueSkill
}{
\includegraphics[width=0.45\linewidth]{Results/figures/generationsTrueSkill.png}
}
\subcaptionbox{
Ranked by Mean
}{
\includegraphics[width=0.45\linewidth]{Results/figures/generationsMean.png}
}
\subcaptionbox{
Ranked by Median
}{
\includegraphics[width=0.45\linewidth]{Results/figures/generationsMedian.png}
}
\subcaptionbox{
Ranked by 20-Percentile
}{
\includegraphics[width=0.45\linewidth]{Results/figures/generations20-Percentile.png}
}
\caption{Agent evolution over time by type and rank}
\label{AgentGenerations}
\end{figure}
Each agent left clones of itself every 200 iterations. Comparing these clones by generation allows us to see how agents improved over time. Figure \ref{AgentGenerations} shows this progress using the four metrics.
On first sight, it looks like they haven't. Improvement is visible in TrueSkill for the Qlearn agents, as well as small improvements in the other metrics, but overall this is less improvement than expected.
\section{Populations}
How important are division populations?
\begin{table}[H]
\centering
\begin{tabular}{|| c | c | c | c | c ||}
\hline
Rank & TrueSkill & Mean & Median & 20-Percentile \\ [0.5ex]
\hline\hline
1 & Qln8-Cl & QlnA-Cl & QlnA-Cl & QlnA-Rn \\
2 & QlnA-Rn & QlnA-Cl & Qln8-Cl & QlnA-Cl \\
3 & Qln8-Cl & QlnA-Cl & Qln8-Cl & Qln8-Cl \\
4 & Qln8-Cl & QlnA-Cl & Qln8-Cl & Qln8-Cl \\
5 & QlnA-Rn & AllAg-Cl & QlnA-Rn & QlnA-Cl \\
6 & Qln8-Cl & QlnA-Rn & QlnA-Rn & Qln8-Cl \\
7 & QlnA-Cl & QlnA-Rn & Qln8-Cl & Qln8-Cl \\
8 & QlnA-Rn & QlnA-Rn & Qln8-Cl & Qln8-Cl \\
9 & Qln8-Cl & QlnA-Rn & Qln8-Cl & AllAg-Cl \\
10 & Qln8-Cl & QlnA-Rn & Qln8-Cl & Qln8-Cl \\
11 & Qln8-Cl & QlnA-Cl & QlnA-Cl & QlnA-Rn \\
12 & QlnA-Cl & QlnA-Cl & QlnA-Rn & QlnA-Rn \\
13 & AllAg-Cl & Qln8-Cl & QlnA-Rn & AllAg-Cl \\
14 & QlnA-Rn & QlnA-Rn & AllAg-Cl & QlnA-Rn \\
15 & AllAg-Cl & AllAg-Cl & QlnA-Rn & QlnA-Cl \\
16 & QlnA-Cl & Qln8-Cl & Qln8-Cl & AllAg-Cl \\
17 & QlnA-Rn & Qln8-Cl & QlnA-Cl & QlnA-Rn \\
18 & AllAg-Cl & Qln8-Cl & QlnA-Cl & AllAg-Cl \\
19 & QlnA-Rn & Qln8-Cl & QlnA-Cl & QlnA-Cl \\
20 & QlnA-Cl & Qln8-Cl & QlnA-Rn & QlnA-Rn \\ [1ex]
\hline
\end{tabular}
\caption{Agent division of the top 20 agents according to each metric}
\label{DivisionRankings}
\end{table}
According to Table \ref{DivisionRankings} Climbing appears only marginally better than Random matching, though enough to be relevant. This accords with smaller experiments we did before this run. It also seems that mixing all agents decreases performance overall, indicating that overfitting is not (yet) an issue. It might be that competition against better agents is important, but its relative under-performance may also be a symptom of its lower training time; because the mixed division has more agents, the training rounds are split between them, leaving each individual agent with less rounds per cloning generation.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\linewidth]{Results/figures/traditional_scatterplot_Division.png}
\caption{Aggression and Tightness, colored by division}
\label{AggTightDivision}
\end{figure}
Compared with Figure \ref{AggTightAgentType}, Figure \ref{AggTightDivision} is more mixed. In particular, the mixed division (AllAg-Cl) is spread across the entire plot, and each division specialized to an architecture covers the area of that architecture. This reinforces the concept that strategy space location depends mostly on architecture choice and not on population.
\begin{figure}[H]
\centering
\subcaptionbox{
Ranked by TrueSkill
}{
\includegraphics[width=0.45\linewidth]{Results/figures/matchupdistTrueSkill.png}
}
\subcaptionbox{
Ranked by Mean
}{
\includegraphics[width=0.45\linewidth]{Results/figures/matchupdistMean.png}
}
\subcaptionbox{
Ranked by Median
}{
\includegraphics[width=0.45\linewidth]{Results/figures/matchupdistMedian.png}
}
\subcaptionbox{
Ranked by 20-Percentile
}{
\includegraphics[width=0.45\linewidth]{Results/figures/matchupdist20-Percentile.png}
}
\caption{Agent distribution by matchup type}
\label{MatchupDistribution}
\end{figure}
Figure \ref{MatchupDistribution} takes a closer look at Climbing vs Random for the same agent architecture. Random is more robust in the final evaluation (against all agents), whereas the best Climbing agents earn more money.
\begin{figure}[H]
\centering
\subcaptionbox{
Qln8-Cl
}{
\includegraphics[width=0.45\linewidth]{Results/figures/internal_vs_external_Qln8-Cl.png}
}
\subcaptionbox{
QlnA-Cl
}{
\includegraphics[width=0.45\linewidth]{Results/figures/internal_vs_external_QlnA-Cl.png}
}
\subcaptionbox{
SacL-Cl
}{
\includegraphics[width=0.45\linewidth]{Results/figures/internal_vs_external_SacL-Cl.png}
}
\subcaptionbox{
SacH-Cl
}{
\includegraphics[width=0.45\linewidth]{Results/figures/internal_vs_external_SacH-Cl.png}
}
\subcaptionbox{
AllAg-Cl
}{
\includegraphics[width=0.45\linewidth]{Results/figures/internal_vs_external_AllAg-Cl.png}
}
\subcaptionbox{
QlnA-Rn
}{
\includegraphics[width=0.45\linewidth]{Results/figures/internal_vs_external_QlnA-Rn.png}
}
\caption{Internal ranking vs PermaEval global ranking by division, colored by agent type}
\label{DivisionConsistency}
\end{figure}
To estimate how much agents in each division overfit, we compared the internal ranking of agents in each division with the global ranking of those agents. Figure \ref{DivisionConsistency} shows this relation. Ideally, the rankings would form a straight line from bottom-left to top-right.
It seems very little overfit happens, as these results echo those of Figure \ref{TrueSkillCompare2}, meaning that most variation here is expected simply due to TrueSkill seed.
One must recall Figure \ref{TrueSkillCompare}, and consider that internal rankings were computed parallel to training. This means the latest agents had only 200 rounds to establish their ranking, and even the oldest less than two thirds of the time compared to the global ranking. | {
"alphanum_fraction": 0.681171673,
"avg_line_length": 47.3129411765,
"ext": "tex",
"hexsha": "7f5f74f63da405ffc9410fd2a600e44c309d9492",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e8d6666f41cb02041758a5b35df804fd0d2027bd",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Orpheon/Pokerbot-Thesis",
"max_forks_repo_path": "Results/results.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e8d6666f41cb02041758a5b35df804fd0d2027bd",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Orpheon/Pokerbot-Thesis",
"max_issues_repo_path": "Results/results.tex",
"max_line_length": 623,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e8d6666f41cb02041758a5b35df804fd0d2027bd",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Orpheon/Pokerbot-Thesis",
"max_stars_repo_path": "Results/results.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6181,
"size": 20108
} |
%------------------------
% Resume in Latex
% Author : Jian Liew
% License : MIT
%------------------------
\documentclass[letterpaper,11pt]{article}
\usepackage{latexsym}
\usepackage[empty]{fullpage}
\usepackage{titlesec}
\usepackage{marvosym}
\usepackage[usenames,dvipsnames]{color}
\usepackage{verbatim}
\usepackage{enumitem}
\usepackage[hidelinks]{hyperref}
\usepackage{fancyhdr}
\usepackage[english]{babel}
\pagestyle{fancy}
\fancyhf{} % clear all header and footer fields
\fancyfoot{}
\renewcommand{\headrulewidth}{0pt}
\renewcommand{\footrulewidth}{0pt}
% Adjust margins
\addtolength{\oddsidemargin}{-0.5in}
\addtolength{\evensidemargin}{-0.5in}
\addtolength{\textwidth}{1in}
\addtolength{\topmargin}{-.5in}
\addtolength{\textheight}{1.0in}
\urlstyle{same}
\raggedbottom
\raggedright
\setlength{\tabcolsep}{0in}
% Sections formatting
\titleformat{\section}{
\vspace{-4pt}\scshape\raggedright\large
}{}{0em}{}[\color{black}\titlerule \vspace{-5pt}]
%-------------------------
% Custom commands
\newcommand{\resumeItem}[2]{
\item\small{
% \textbf{#1}{: #2 \vspace{-2pt}}
{#1}{ #2 \vspace{-2pt}}
}
}
\newcommand{\resumeSubheading}[4]{
\vspace{-1pt}\item
\begin{tabular*}{0.97\textwidth}[t]{l@{\extracolsep{\fill}}r}
\textbf{#1} & #2 \\
\textit{\small#3} & \textit{\small #4} \\
\end{tabular*}\vspace{-5pt}
}
\newcommand{\resumeSubItem}[2]{\resumeItem{#1}{#2}\vspace{-4pt}}
\renewcommand{\labelitemii}{$\circ$}
\newcommand{\resumeSubHeadingListStart}{\begin{itemize}[leftmargin=*]}
\newcommand{\resumeSubHeadingListEnd}{\end{itemize}}
\newcommand{\resumeItemListStart}{\begin{itemize}}
\newcommand{\resumeItemListEnd}{\end{itemize}\vspace{-5pt}}
%-------------------------------------------
%%%%%% CV STARTS HERE %%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
%----------HEADING-----------------
\begin{tabular*}{\textwidth}{l@{\extracolsep{\fill}}r}
\textbf{\href{http://jianliew.me/}{\Large Jian Loong Liew}} & Email : \href{mailto:[email protected]}{[email protected]}\\
\href{https://www.github.com/JianLoong}{https://www.github.com/JianLoong} & Mobile : +61-4-3117-0997 \\
\end{tabular*}
\vspace{+4pt}
I have 3.5 years of working experience in the education sector. I am currently looking
for a career change into the industry as a junior software developer/engineer. Passionate and driven to success.
%-----------EDUCATION-----------------
\section{Education}
\resumeSubHeadingListStart
\resumeSubheading
{Monash University}{Melbourne, Australia}
{Master of Information Technology (Professional) - Thesis}{Aug. 2013 -- Aug. 2015}
\resumeItemListStart
\resumeItem{}{Faculty of Information Technology Merit Scholarship}
\resumeItem{}{Completed thesis titled "Category R-tree", a data structure for optimised spatial queries.}
\resumeItem{\textbf{Relevant Units -}}{
Mobile and Distributed Systems,
Data Analysis \& Processing,
Internet Applications Development,
and
Research Methods
}
\resumeItemListEnd
\resumeSubheading
{Swinburne University of Technology}{Melbourne, Australia}
{Bachelor of Science (Computer Science \& Software Engineering); GPA: 3.91}{Jul. 2010 -- Dec. 2012}
\resumeItemListStart
\resumeItem{}{Swinburne University of Technology Merit Scholarship}
\resumeItem{}{Won Corporate and Industry Award by obtaining the highest cGPA in the course.}
\resumeItem{\textbf{Relevant Units -}}{
System Architecture \& Design,
Software Deployment \& Evolution,
Software Engineering Project
}
\resumeItemListEnd
\resumeSubHeadingListEnd
%-----------EXPERIENCE-----------------
\section{Experience}
\resumeSubHeadingListStart
\resumeSubheading
{Monash University}{Melbourne, Australia}
{Sessional Lecturer \& Teaching Associate}{Oct. 2015 - Present}
\newline
\resumeItemListStart
\resumeItem{\textbf{FIT9131 - Programming Foundations in Java.}}
{Lectured for Semester 2, 2018}
\resumeItem{\textbf{Restructured the subject FIT5032 - Internet Application Development}}
{}
\resumeItem{\textbf{Subjects taught include - }}{FIT9134 - Computer Architecture \& Operating Systems,
FIT9135 - Data Communications,
FIT5032 - Internet Applications Development,
FIT5042 - Enterprise Application Development,
FIT5120 - Industry Experience Project,
and
FIT5136 - Software Engineering}
\resumeItemListEnd
\resumeSubHeadingListEnd
%-----------PROJECTS-----------------
% \section{Projects}
% \resumeSubHeadingListStart
% \resumeSubItem{QuantSoftware Toolkit}
% {Open source python library for financial data analysis and machine learning for finance.}
% \resumeSubItem{Github Visualization}
% {Data Visualization of Git Log data using D3 to analyze project trends over time.}
% \resumeSubItem{Recommendation System}
% {Music and Movie recommender systems using collaborative filtering on public datasets.}
% \resumeSubItem{Mac Setup}
% {Book that gives step by step instructions on setting up developer environment on Mac OS.}
% \resumeSubHeadingListEnd
%
% --------PROGRAMMING SKILLS------------
\section{Programming Skills}
\resumeSubHeadingListStart
\item{
\textbf{Languages}{: Java, Python, JavaScript, C, C++, C\#, SQL, PHP, Shell Scripting}
}
\item{
\textbf{Technologies}{: Data Warehousing, Git, RESTful Web Service, Google Firebase}
}
\resumeSubHeadingListEnd
\section{Visa Status}
\textbf{Permanent Resident} - Skilled - Independent (subclass 189)
%-------------------------------------------
\end{document}
| {
"alphanum_fraction": 0.6644341405,
"avg_line_length": 34.2529411765,
"ext": "tex",
"hexsha": "be5567406cba9b559e3572502501cc1960156cd0",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "378a5090c7a0162313956a5228f6dc52caf23be1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "JianLoong/resume",
"max_forks_repo_path": "jian_liew_resume.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "378a5090c7a0162313956a5228f6dc52caf23be1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JianLoong/resume",
"max_issues_repo_path": "jian_liew_resume.tex",
"max_line_length": 136,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "378a5090c7a0162313956a5228f6dc52caf23be1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "JianLoong/resume",
"max_stars_repo_path": "jian_liew_resume.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1560,
"size": 5823
} |
% !TEX root = ../main.tex
\chapter{Motivation}
\label{ch:motivation}
\section{Definition of Social Media}
\label{sec:definitionOfSocialMedia}
% A definition of social media in general
Since this thesis will be focusing on data from social media, a definition needs to be established first.
Social media is a class of internet-enabled software applications that is characterized by a set of features and functionalities.
These characteristics vary depending on whose definition is used.
There is, however, a general consensus that the first instances of social media applications were developed in the late 1990s.
\\
In this thesis, the set of characterizing features defined be Ellison \etAl \cite{Ellison2008} will be used, which can be seen below.
\begin{enumerate}
\item
Creating a profile containing personal information such as location or age, as well as pictures and optionally other kinds of multimedia content
\item
Building a list of connected profiles facilitating relationships such as friendship
\item
Seeing and traversing lists of connections from your own and other profiles
\end{enumerate}
% Some examples of social media, including twitter, which I focus on in this thesis
The first application that can be classified as social media according to these characteristics
was the web-based service \texttt{SixDegrees.com}, launched in 1997~\cite{Ellison2008}.
A timeline of this and other selected social media application launches can be seen in~\cref{fig:timeline}.
\begin{figure}
\caption{A timeline of selected social media launches}
\label{fig:timeline}
\begin{chronology}[5]{1997}{2017}{\linewidth}
\event{1997}{\small{SixDegrees.com}}
%\event{1999}{LiveJournal}
%\event{1999}{AsianAvenue}
%\event{1999}{BlackPlanet}
%\event{2000}{MiGente}
%\event{2001}{CyWorld}
%\event{2001}{Ryze}
%\event{2001}{Fotolog}
\event{\decimaldate{22}{3}{2002}}{\small{Friendster}}
%\event{\decimaldate{5}{5}{2003}}{\small{LinkedIn}}
\event{\decimaldate{1}{8}{2003}}{\small{MySpace}}
%\event{2003}{\small{Xing}}
\event{\decimaldate{4}{2}{2004}}{\small{Facebook}}
%\event{\decimaldate{10}{2}{2004}}{\small{Flickr}}
\event{\decimaldate{15}{7}{2006}}{\small{Twitter}}
\event{\decimaldate{6}{10}{2010}}{\small{Instagram}}
\event{\decimaldate{28}{6}{2011}}{\small{Google Plus}}
\event{\decimaldate{24}{1}{2013}}{\small{Vine}}
\end{chronology}
\end{figure}
Apart from the main characterizing features, social media applications vary greatly in functionality.
Most offer messaging and blogging capabilities, while some even allow for more specific use cases like live video-streaming \cite{Ellison2008}.
\begin{table}
\caption{A comparison of social media applications}
\label{tab:comparison}
\resizebox{\textwidth}{!}{%
\begin{tabular}{lllll} %
\toprule
& & \multicolumn{2}{c}{Main Relationship Type} & \\
\cmidrule{3-4}
Name
& Main Content Types
& Name
& Characteristics
& Platforms
\\
\midrule
Facebook
& Text, images, videos, live-stream
& Friendship
& Two-sided, balanced
& Web and App
\\
\midrule
Twitter
& Text (limited to 140 characters)
& Following
& One-sided, unbalanced$^{\mathrm{a}}$
& Web and App
\\
\midrule
Instagram
& Images
& Following
& One-sided, unbalanced$^{\mathrm{a}}$
& Web and App
\\
\midrule
Google Plus
& Text, images, videos
& Friendship$^{\mathrm{b}}$
& Two-sided, balanced
& Web and App
\\
\midrule
Vine (discontinued)
& Videos
& Following
& One-sided, unbalanced $^{\mathrm{a}}$
& App
\\\bottomrule
\multicolumn{5}{l}{$^{\mathrm{a}}$Usually unreciprocated relationship with few accounts having the majority of followers}
\\
\multicolumn{5}{l}{$^{\mathrm{b}}$Being in the same social circle}
\\
\end{tabular}}
\end{table}
As seen in~\cref{tab:comparison}, not all social media applications are solely web-based services.
Some, such as Vine, did not even offer a web-based interface anymore.
Also, only few of the social media applications analyzed offer networking capabilities in the sense of two-sided relationship initiation,
meaning there is no focus on creating relationships online that are then transferred into the real world.
For these reasons, this thesis refrains form using the term social networking site (SNS), as commonly used in previous works~\cite{Ellison2008},
and instead uses the term social media application.
\section{Impact of Social Media}
\label{sec:developmentOfSocialMedia}
% Social media is big and growing
The first indication of the growing importance and influence of social media can be found by looking at the growth numbers:
Facebook, for example, has more than 1 billion users to date,
and Twitter's userbase grew by 1\,382\% between February 2008 and February 2009 alone~\cite{mcgiboney2009twitter}.\\
It is estimated that during the 2016 U.S. election, american adults read and remembered on the order of one to several so called fake news;
untruths portrayed as news favoring a specific candidate.
Although no assessment can be made as to whether the influence of fake news was pivotal to the election,
evidence can be found supporting the claim that exposure to fake news influences a voters sentiment~\cite{Allcott2017}.\\
Furthermore, social media applications changed the way organisations, communities and individuals communicate.
Customers expect companies to listen to and engage with them on these channels -
and past approaches to customer care are ill-suited for this new environment~\cite{Kietzmann2011}.
Most of the recent growth in social media usage happened in a important target demographic for companies -
65\% of american adults were using social media in 2015, almost 10 times more than in 2005, when only 7\% did~\cite{Perrin2015}.\\
Similar developments can be observed in the amount of goods and services sold via the internet:
The share of e-commerce in the total U.S. retail sales grew from 4.2\% in the first quarter of 2010 to more than double that, 8.5\%,
by the second quarter of 2017, a mere 7 years later~\cite{statistaECommerceGrowth}.
This growth will continue and the change from traditional commerce to e-commerce
will further influence the way customers interact with companies.
The growing importance and influence of social media for companies and individuals,
paired with the potential for misuse as demonstrated during the 2016 election,
necessitates improved real-time monitoring capabilities for the affected stakeholders.\\
From an organizational standpoint, complaints, comments, questions and suggestions by the customer are not only unstructured,
but also "designed by the customer", meaning that the organization has little influence on the way customers choose to communicate with them.
Since many customers are already familiar with social media, it often becomes the medium of choice for communicating with companies.
However, no standardized rules exist for these kind of transactions, making reporting, analysis and knowledge management on the companies side difficult~\cite{Culnan2015}.\\
This thesis aims to explore and implement means of monitoring topics of interaction on social media in real-time,
and combine this analysis with the associated sentiment, to provide informative insights for these stakeholders.
\section{Previous Work}
\label{sec:previousWork}
% First justification why we are using twitter
Twitter's main content type as shown in~\cref{tab:comparison} consists of small, simple text entries, called tweets, with a maximum length of 140 characters,
which makes it popular among researchers, and a good fit for the proof of concept that this thesis aims to provide.
There are also technical benefits of using Twitter, which are explained further in~\cref{ch:twitter}.
%Social media is not used to its full potential by e.g. companies
Culnan \etAl argue that social media applications offer great opportunity for companies,
but while they are widely adopted and used,
companies struggle to achieve measurable business success with their social media work.
They found that identifying quantifiable metrics is an element of effective social media implementation for companies.
Furthermore, they argue that another main element is absorptive capacity, meaning how effectively messages and
interactions on social media are processed~\cite{Culnan2015}.
Salzborn \etAl found that people responsible for managing social-media accounts with big influence may be exposed to, and fear,
drastic mood shifts ("shitstorms") and that being tasked with managing such an event poses great difficulty~\cite{Salzborn2015}.
\section{Background Theory}
\label{sec:backgroundTheory}
% There is reason to believe companies would benefit from being able to analyze what their customers talk about (topics) and how they feel about these topics (sentiment by topic)
This thesis theorizes that monitoring what users talk about,
and how they feel about it, will enable stakeholders to get a more quantifiable and structured understanding of their target group.\\
To achieve high absorptive capacity, this monitoring solution would have to be automated,
utilizing topic modeling, sentiment analysis and the combination of those as part of the processing.
These metrics could enable organizations to effectively implement social media~\cite{Culnan2015}.\\
% What it could be used for, example use case
Not only could these real-time analyses be used to reliably monitor consumer sentiment,
it could also be used to react to changes in sentiment among users fast enough to prevent and counteract
drastic mood shifts, and prevent further damage.\\
% Using it as a feedback cycle for A/B testing.
Another benefit of monitoring the sentiment regarding a topic in real-time is that it could be used for product development,
to compare different iterations of a product.
It could also be useful to rapidly iterate social media marketing strategies. | {
"alphanum_fraction": 0.7472474406,
"avg_line_length": 54.2094240838,
"ext": "tex",
"hexsha": "6aaf9dce047382ccbead4296cffdebc88dee8f93",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "76f6e8686ab629391fd714228547ed1de097466c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ClaasM/streamed-sentiment-topic-intent",
"max_forks_repo_path": "reports/thesis/chapters/01_Motivation.tex",
"max_issues_count": 8,
"max_issues_repo_head_hexsha": "76f6e8686ab629391fd714228547ed1de097466c",
"max_issues_repo_issues_event_max_datetime": "2022-03-11T23:16:16.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-03-24T15:33:52.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ClaasM/streamed-sentiment-topic-intent",
"max_issues_repo_path": "reports/thesis/chapters/01_Motivation.tex",
"max_line_length": 178,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "76f6e8686ab629391fd714228547ed1de097466c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ClaasM/streamed-sentiment-topic-intent",
"max_stars_repo_path": "reports/thesis/chapters/01_Motivation.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2354,
"size": 10354
} |
\documentclass[../../main.tex]{subfiles}
\begin{document}
\subsection{Motivation}
There are several well-known tensor decomposition methods, but there is no clear understanding how they compare in terms of quality.
In this work we aim to compare different high-order tensor decomposition techniques, provided in HottBox\footnote{\url{https://hottbox.github.io/stable/index.html}} tooling. More specifically, we benchmark CPD, Tucker Decomposition and Tensor-Train against three quality criterions: precision, stability and computational complexity.
As an input data we use Tensor Flow logo (three dimensional tensor). For this tensor we find low-rank approximations and then calculate quality criterions listed above.
\subsection{Problem statement}
Formally, let's state $X$ is an 3rd order tensor (input image). We define approximations in the following way:
\begin{equation} \label{eq:kotua_1}
\underline{\mathbf{X}}=\sum_{r=1}^{R} \underline{\mathbf{X}}_{r}=\sum_{r=1}^{R} \lambda_{r} \cdot \mathbf{a}_{r} \circ \mathbf{b}_{r} \circ \mathbf{c}_{r} \end{equation}
\begin{equation} \label{eq:kotua_2}
\underline{\mathbf{X}}=\underline{\mathbf{G}} \times_{1} \mathbf{A} \times_{2} \mathbf{B} \times_{3} \mathbf{C}\end{equation}
\begin{equation} \label{eq:kotua_3}
\underline{\mathbf{X}}=\mathbf{A} \times{ }_{2}^{1} \underline{\mathbf{G}}^{(1)} \times_{3}^{1} \underline{\mathbf{G}}^{(2)} \times_{3}^{1} \cdots \times_{3}^{1} \underline{\mathbf{G}}^{(N-1)} \times_{3}^{1} \mathbf{B}\end{equation}
Where \ref{eq:kotua_1} - \ref{eq:kotua_3} correspond to CPD, HOSVD and Tensor-Train decompositions. Corresponding output tensors we define as $X_{cpd}, X_{ho}, X_{tt}$.
Precision is defined the following way:
\begin{equation}
Precision = \frac{|X - X_i|}{|X|}, \forall i \in \set{\{cpd, ho, tt\}}
\end{equation}
Complexity is calculated as time taken to complete decomposition. Stability is estimated as increase in error when some Gaussian noise is added to the input tensor. In this work Gaussian noise is a tensor of same shape as X, where each element is sampled from the normal distribution with parameters $\mu$ and $\sigma$.
\subsection{Experiment description}
Tensor Flow logo is taken as an input image. The input converted to the 3rd order tensor with shape (232, 217, 4).
Then, three experiments are conducted for each type of tensor decomposition (CPD, HOSVD and Tensor Train):
\begin{enumerate}
\item Calculate relative error of decomposition for different ranks of core tensor
\item Measure time taken to compute the decomposition for each of the rank
\item Calculate relative error of decomposition when Gaussian noise with different sigma is added to the input image. Repeat it for each rank and then plot 3D graph, illustrating the stability.
\end{enumerate}
\subsection{Plots and analysis}
First of all, we compare how relative error decreases with regards to the rank for different tensor decompositions. As it can be seen from the chart \ref{fig:kotua:2}, TT and HOSVD showcase almost identical results while CPD is quite noisy and its errors decrease slower.
\begin{figure}[h!]
\centering
\includegraphics[width=0.6\textwidth]{figures/errors}
\caption{Comparison of relative error of decomposition w.r.t rank for HOSVD, CPD and TT.}
\label{fig:kotua:2}
\end{figure}
In terms of the computational complexity, HOSVD is a clear leader. CPD, on the other hand, is again an outsider. On the chart \ref{fig:kotua:3} the time axis is plotted in the logarithmic scale, so it can be seen, that CPD is slower than HOSVD by a factor of 100.
\begin{figure}[h!]
\centering
\includegraphics[width=0.6\textwidth]{figures/times}
\caption{Comparison of complexity of decomposition w.r.t rank for HOSVD, CPD and TT.}
\label{fig:kotua:3}
\end{figure}
\subsubsection{Noisy Data}
Charts \ref{fig:kotua:4} - \ref{fig:kotua:6} show, how noise influences stability of algorithms. In general, algorithms behave identically: the error decreases with regards to rank when noise is insignificant. But when noise becomes large errors start to increase with regards to rank.
\begin{figure}[h!]
\centering
\includegraphics[width=0.6\textwidth]{figures/cpd}
\caption{Relative error of CPD decomposition w.r.t to rank and standard deviation of Gaussian noise.}
\label{fig:kotua:4}
\end{figure}
Interestingly enough, regardless of noise amplitude, at low ranks errors decrease before reaching some threshold. After that point the growth trend takes its lead.
\begin{figure}[h!]
\centering
\includegraphics[width=0.6\textwidth]{figures/hosvd}
\caption{Relative error of HOSVD decomposition w.r.t to rank and standard deviation of Gaussian noise.}
\label{fig:kotua:5}
\end{figure}
\begin{figure}[h!]
\centering
\includegraphics[width=0.6\textwidth]{figures/tt}
\caption{Relative error of TT decomposition w.r.t to rank and standard deviation of Gaussian noise.}
\label{fig:kotua:6}
\end{figure}
To summarize staiblity criterions, let's take a look on the chart \ref{fig:kotua:7}. It compares algorithms when small noise (std = 0.1) is added to the picture.As you can see from this chart, HOSVD is the most stable algorithm. CPD is also quite good at high ranks, but no so good at low ranks. Tensor Train is on par with HOSVD at low ranks, but it become less stable at high ranks.
\begin{figure}[h!]
\centering
\includegraphics[width=0.6\textwidth]{figures/stability}
\caption{Relative error of TT decomposition w.r.t to rank and standard deviation of Gaussian noise.}
\label{fig:kotua:7}
\end{figure}
\subsection{Conclusion}
Judging by all three parameters (precision, stability and complexity) HOSVD is the best algorithm. It's very fast, stable and on par with Tensor Train in terms of precision. Tensor Train has a bit more accurate results, but it's definitely slower and not so stable. CPD is computationally expensive and its error decreases very slow with regards to rank. Though it is quite stable when it comes to high ranks.
\end{document} | {
"alphanum_fraction": 0.7647353481,
"avg_line_length": 53.954954955,
"ext": "tex",
"hexsha": "1a796d05ee3e68da66c85f5eaeba565bf30c771f",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-12-20T13:56:02.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-19T21:55:49.000Z",
"max_forks_repo_head_hexsha": "213f5d81e2ae0c4e77b197b63e6980523f65d9bb",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Intelligent-Systems-Phystech/mmp2021",
"max_forks_repo_path": "sections/Kotua2021Lab4/main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "213f5d81e2ae0c4e77b197b63e6980523f65d9bb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Intelligent-Systems-Phystech/mmp2021",
"max_issues_repo_path": "sections/Kotua2021Lab4/main.tex",
"max_line_length": 409,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "213f5d81e2ae0c4e77b197b63e6980523f65d9bb",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Intelligent-Systems-Phystech/mmp2021",
"max_stars_repo_path": "sections/Kotua2021Lab4/main.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-20T03:58:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-15T18:31:33.000Z",
"num_tokens": 1630,
"size": 5989
} |
\documentclass[]{report}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\usepackage[margin=2cm]{geometry}
\usepackage{hyperref}
\hypersetup{unicode=true,
pdftitle={mydata},
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{longtable,booktabs}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{0}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
%%% Use protect on footnotes to avoid problems with footnotes in titles
\let\rmarkdownfootnote\footnote%
\def\footnote{\protect\rmarkdownfootnote}
%%% Change title format to be more compact
\usepackage{titling}
% Create subtitle command for use in maketitle
\providecommand{\subtitle}[1]{
\posttitle{
\begin{center}\large#1\end{center}
}
}
\setlength{\droptitle}{-2em}
\title{mydata}
\pretitle{\vspace{\droptitle}\centering\huge}
\posttitle{\par}
\subtitle{Autogenerated data summary from dataMaid}
\author{}
\preauthor{}\postauthor{}
\predate{\centering\large\emph}
\postdate{\par}
\date{2019-11-15 19:04:16}
\renewcommand{\chaptername}{Part}
\newcommand{\fullline}{\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}}
\newcommand{\bminione}{\begin{minipage}{0.75 \textwidth}}
\newcommand{\bminitwo}{\begin{minipage}{0.25 \textwidth}}
\newcommand{\emini}{\end{minipage}}
\begin{document}
\maketitle
\hypertarget{data-report-overview}{%
\chapter{Data report overview}\label{data-report-overview}}
The dataset examined has the following dimensions:
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.33\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.12\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.33\columnwidth}\raggedright
Number of observations\strut
\end{minipage} & \begin{minipage}[t]{0.12\columnwidth}\raggedleft
250\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.33\columnwidth}\raggedright
Number of variables\strut
\end{minipage} & \begin{minipage}[t]{0.12\columnwidth}\raggedleft
21\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\hypertarget{checks-performed}{%
\subsection{Checks performed}\label{checks-performed}}
The following variable checks were performed, depending on the data type
of each variable:
\begin{longtable}[]{@{}lcccccccc@{}}
\toprule
\begin{minipage}[b]{0.28\columnwidth}\raggedright
~\strut
\end{minipage} & \begin{minipage}[b]{0.06\columnwidth}\centering
character\strut
\end{minipage} & \begin{minipage}[b]{0.06\columnwidth}\centering
factor\strut
\end{minipage} & \begin{minipage}[b]{0.06\columnwidth}\centering
labelled\strut
\end{minipage} & \begin{minipage}[b]{0.09\columnwidth}\centering
haven labelled\strut
\end{minipage} & \begin{minipage}[b]{0.06\columnwidth}\centering
numeric\strut
\end{minipage} & \begin{minipage}[b]{0.06\columnwidth}\centering
integer\strut
\end{minipage} & \begin{minipage}[b]{0.05\columnwidth}\centering
logical\strut
\end{minipage} & \begin{minipage}[b]{0.06\columnwidth}\centering
Date\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.28\columnwidth}\raggedright
Identify miscoded missing values\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.09\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.05\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.28\columnwidth}\raggedright
Identify prefixed and suffixed whitespace\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.09\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.05\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.28\columnwidth}\raggedright
Identify levels with \textless{} 6 obs.\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.09\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.05\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.28\columnwidth}\raggedright
Identify case issues\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.09\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.05\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.28\columnwidth}\raggedright
Identify misclassified numeric or integer variables\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.09\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.05\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.28\columnwidth}\raggedright
Identify outliers\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.09\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage} & \begin{minipage}[t]{0.05\columnwidth}\centering
\strut
\end{minipage} & \begin{minipage}[t]{0.06\columnwidth}\centering
\(\times\)\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
Please note that all numerical values in the following have been rounded
to 2 decimals.
\hypertarget{summary-table}{%
\chapter{Summary table}\label{summary-table}}
\begin{longtable}[]{@{}llrrc@{}}
\toprule
\begin{minipage}[b]{0.22\columnwidth}\raggedright
~\strut
\end{minipage} & \begin{minipage}[b]{0.15\columnwidth}\raggedright
Variable class\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
\# unique values\strut
\end{minipage} & \begin{minipage}[b]{0.20\columnwidth}\raggedleft
Missing observations\strut
\end{minipage} & \begin{minipage}[b]{0.14\columnwidth}\centering
Any problems?\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{id}{ID}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
250\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.00 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\(\times\)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{name}{Name}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
250\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\(\times\)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{sex}{Sex}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{age}{Age}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
numeric\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
50\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{race}{Race}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
6\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{preinvasivecomponent}{PreinvasiveComponent}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{lvi}{LVI}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{pni}{PNI}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{lastfollowupdate}{LastFollowUpDate}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
POSIXct\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
13\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\(\times\)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{death}{Death}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
logical\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{group}{Group}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{grade}{Grade}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
4\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{tstage}{TStage}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
5\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{anti-x-intensity}{Anti-X-intensity}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
numeric\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
4\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{anti-y-intensity}{Anti-Y-intensity}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
numeric\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
4\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{lymphnodemetastasis}{LymphNodeMetastasis}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{valid}{Valid}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
logical\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{smoker}{Smoker}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
logical\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{grade_level}{Grade\_Level}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
4\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{surgerydate}{SurgeryDate}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
POSIXct\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
227\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.40 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\(\times\)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.22\columnwidth}\raggedright
\protect\hyperlink{deathtime}{DeathTime}\strut
\end{minipage} & \begin{minipage}[t]{0.15\columnwidth}\raggedright
character\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
2\strut
\end{minipage} & \begin{minipage}[t]{0.20\columnwidth}\raggedleft
0.00 \%\strut
\end{minipage} & \begin{minipage}[t]{0.14\columnwidth}\centering
\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\hypertarget{variable-list}{%
\chapter{Variable list}\label{variable-list}}
\hypertarget{id}{%
\section{ID}\label{id}}
\begin{itemize}
\tightlist
\item
The variable is a key (distinct values for each observation).
\end{itemize}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{name}{%
\section{Name}\label{name}}
\begin{itemize}
\tightlist
\item
The variable is a key (distinct values for each observation).
\end{itemize}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{sex}{%
\section{Sex}\label{sex}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
2\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``Male''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-3-Sex-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{age}{%
\section{Age}\label{age}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
numeric\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
49\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Median\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
51\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
1st and 3rd quartiles\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
37; 62\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Min. and max.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
25; 73\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-4-Age-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{race}{%
\section{Race}\label{race}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
5\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``White''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-5-Race-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{preinvasivecomponent}{%
\section{PreinvasiveComponent}\label{preinvasivecomponent}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
2\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``Absent''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-6-PreinvasiveComponent-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{lvi}{%
\section{LVI}\label{lvi}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
2\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``Absent''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-7-LVI-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{pni}{%
\section{PNI}\label{pni}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
2\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``Absent''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-8-PNI-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{lastfollowupdate}{%
\section{LastFollowUpDate}\label{lastfollowupdate}}
\begin{itemize}
\tightlist
\item
The variable has class POSIXct which is not supported by dataMaid.
\end{itemize}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{death}{%
\section{Death}\label{death}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
logical\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
2\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``TRUE''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-10-Death-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{group}{%
\section{Group}\label{group}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.18\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedleft
2\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedleft
``Treatment''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-11-Group-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{grade}{%
\section{Grade}\label{grade}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``3''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-12-Grade-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{tstage}{%
\section{TStage}\label{tstage}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
4\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``4''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-13-TStage-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{anti-x-intensity}{%
\section{Anti-X-intensity}\label{anti-x-intensity}}
\begin{itemize}
\tightlist
\item
Note that this variable is treated as a factor variable below, as it
only takes a few unique values.
\end{itemize}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
numeric\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``3''\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Reference category\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-14-Anti-X-intensity-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{anti-y-intensity}{%
\section{Anti-Y-intensity}\label{anti-y-intensity}}
\begin{itemize}
\tightlist
\item
Note that this variable is treated as a factor variable below, as it
only takes a few unique values.
\end{itemize}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
numeric\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``2''\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Reference category\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-15-Anti-Y-intensity-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{lymphnodemetastasis}{%
\section{LymphNodeMetastasis}\label{lymphnodemetastasis}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
2\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``Absent''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-16-LymphNodeMetastasis-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{valid}{%
\section{Valid}\label{valid}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
logical\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
2\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``TRUE''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-17-Valid-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{smoker}{%
\section{Smoker}\label{smoker}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
logical\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
2\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``TRUE''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-18-Smoker-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{grade_level}{%
\section{Grade\_Level}\label{grade_level}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
1 (0.4 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
3\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedleft
``high''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-19-Grade-Level-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{surgerydate}{%
\section{SurgeryDate}\label{surgerydate}}
\begin{itemize}
\tightlist
\item
The variable has class POSIXct which is not supported by dataMaid.
\end{itemize}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
\hypertarget{deathtime}{%
\section{DeathTime}\label{deathtime}}
\begin{minipage}{0.75 \textwidth}
\begin{longtable}[]{@{}lr@{}}
\toprule
\begin{minipage}[b]{0.34\columnwidth}\raggedright
Feature\strut
\end{minipage} & \begin{minipage}[b]{0.21\columnwidth}\raggedleft
Result\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Variable type\strut
\end{minipage} & \begin{minipage}[t]{0.21\columnwidth}\raggedleft
character\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of missing obs.\strut
\end{minipage} & \begin{minipage}[t]{0.21\columnwidth}\raggedleft
0 (0 \%)\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Number of unique values\strut
\end{minipage} & \begin{minipage}[t]{0.21\columnwidth}\raggedleft
2\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.34\columnwidth}\raggedright
Mode\strut
\end{minipage} & \begin{minipage}[t]{0.21\columnwidth}\raggedleft
``Within1Year''\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\end{minipage}
\begin{minipage}{0.25 \textwidth}
\includegraphics{dataMaid_mydata_files/figure-latex/Var-21-DeathTime-1.pdf}
\end{minipage}
\noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}}
Report generation information:
\begin{itemize}
\item
Created by Serdar BALCI old (username: \texttt{serdarbalciold}).
\item
Report creation time: Fri Nov 15 2019 19:04:17
\item
Report was run from directory:
\texttt{/Users/serdarbalciold/histopathology-template}
\item
dataMaid v1.3.2 {[}Pkg: 2019-07-27 from CRAN (R 3.6.0){]}
\item
R version 3.6.0 (2019-04-26).
\item
Platform: x86\_64-apple-darwin15.6.0 (64-bit)(macOS 10.15.1).
\item
Function call:
\texttt{dataMaid::makeDataReport(data\ =\ mydata,\ render\ =\ FALSE,\ file\ =\ here::here("out",\ \ "dataMaid\_mydata.Rmd"),\ replace\ =\ TRUE,\ quiet\ =\ TRUE,\ openResult\ =\ FALSE)}
\end{itemize}
\end{document}
| {
"alphanum_fraction": 0.7576368216,
"avg_line_length": 30.9716312057,
"ext": "tex",
"hexsha": "e2c34611c802835b67f330036b7ab209965d8999",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "32310d42d9d0437fb8ab2e7d1228eb8f6d9455dd",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sbalci/histopathR",
"max_forks_repo_path": "inst/rmarkdown/templates/clinicopathological/skeleton/out/dataMaid_mydata.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "8ce63d974d2f13a2fb99c2db5fcb76ced5ffbf61",
"max_issues_repo_issues_event_max_datetime": "2020-02-27T05:43:08.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-05-27T12:00:50.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sbalci/histopathology-template",
"max_issues_repo_path": "out/dataMaid_mydata.tex",
"max_line_length": 186,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "32310d42d9d0437fb8ab2e7d1228eb8f6d9455dd",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sbalci/histopathR",
"max_stars_repo_path": "inst/rmarkdown/templates/clinicopathological/skeleton/out/dataMaid_mydata.tex",
"max_stars_repo_stars_event_max_datetime": "2020-09-22T18:27:06.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-09-22T18:27:06.000Z",
"num_tokens": 16999,
"size": 43670
} |
\section{Episode 37: The Orcward Hunting Part}
\medskip
\DndDropCapLine{E}xme, Kolo and Burnie move forward, stealthily. Rip comes up and completely distracts Burnie, who promptly falls into a pit. william wallace orcs hear the trap get sprung and come running, big dicks a swinging. Kolo hides, exme gets gang-banged. Myron and Kolo are playing table tennis. Myron hears exme dying, and runs up to help. Big daddy blue-balls runs up. Rip finally comes along too, and flops his dick out too. Burnie climbs up a thing and explodes an orc through his dick, dodging attacks as quick as possible.\medskip
Kolo is all alone. But he’s getting safer, casting spells and cracking viles on people. We slowly murderise the people, rip shoots one with a big thunderboom at one point. Blue balls damned sticky armor keeps blocking. We finally get him (Kolo killing blow!). Kolo goes scouts, we discover there’s bats and trolls up ahead, we stage a place to attack. Rip ruins the whole plan and moves forward, triggering a bunch of bats. Bats swarm, slowly die. Exme discovers stanri can counter-attack.\medskip
Bats dead, damned trolls show up. A lady with weapons and a man without. Toll man goes down quickly, but troll-queen is crazy hard to hurt. We finally get her through a huge-combo of everyone doing everything. We manage to have a nap, and then go for a wander.
\vspace*{5mm}
| {
"alphanum_fraction": 0.7814761216,
"avg_line_length": 92.1333333333,
"ext": "tex",
"hexsha": "e516df2cd389ff1a6598bcdb2e91bbd4a5f20e08",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-10-04T09:40:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-10-04T09:40:24.000Z",
"max_forks_repo_head_hexsha": "23763424cf31c50618bc6ddeefe2196cdf6be974",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mcgi5sr2/velterraBook",
"max_forks_repo_path": "content/eps/37.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "23763424cf31c50618bc6ddeefe2196cdf6be974",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mcgi5sr2/velterraBook",
"max_issues_repo_path": "content/eps/37.tex",
"max_line_length": 544,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "23763424cf31c50618bc6ddeefe2196cdf6be974",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mcgi5sr2/velterraBook",
"max_stars_repo_path": "content/eps/37.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 341,
"size": 1382
} |
\chapter{Dying World 700AF}
\begin{battle}{Noel vs Caius}
\begin{itemize}
\item Check the amount and order of your items
\item Die
\end{itemize}
\end{battle}
\pickup{8 Power Essences}{behind the wall on the right before the cutscene trigger}
\begin{battle}{Gogmagog Gamma}
\begin{flushleft}
\begin{itemize}
\item \sixth
\begin{itemize}
\item Shift Immediately
\end{itemize}
\item \second
\begin{itemize}
\item Auto-chain
\item Feral Link
\end{itemize}
\item \third
\begin{itemize}
\item Auto-debuff
\end{itemize}
\item \textit{Repeat 3 times:}
\begin{itemize}
\item \fifth
\begin{itemize}
\item Fira-Firaga, \comb
\end{itemize}
\item \first
\begin{itemize}
\item Shift
\end{itemize}
\end{itemize}
\end{itemize}
\end{flushleft}
\end{battle}
\pickup{10 Vitality Essences}{behind of the trees in the desert wasteland}
\newline | {
"alphanum_fraction": 0.5744680851,
"avg_line_length": 24.5681818182,
"ext": "tex",
"hexsha": "3091f8a697e6badfd145922da2201ee173469f4b",
"lang": "TeX",
"max_forks_count": 15,
"max_forks_repo_forks_event_max_datetime": "2021-10-03T12:58:27.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-06T10:30:25.000Z",
"max_forks_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_forks_repo_path": "Final Fantasy XIII-2/Chapters/dyingworld700af.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_issues_repo_issues_event_max_datetime": "2020-11-18T11:44:28.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-11-05T08:11:06.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_issues_repo_path": "Final Fantasy XIII-2/Chapters/dyingworld700af.tex",
"max_line_length": 83,
"max_stars_count": 10,
"max_stars_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_stars_repo_path": "Final Fantasy XIII-2/Chapters/dyingworld700af.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-18T09:01:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-27T04:50:16.000Z",
"num_tokens": 327,
"size": 1081
} |
\chapter{Summary}
\label{chap:conclusions}
The purpose of this thesis was to give an overview of arbitrage-free pricing methodology and affine short-rate processes used in interest rate modeling and credit risk. These kind of models have a long history starting from the late 1970's. Their heyday was pre-financial crisis of 2007-08. After the crisis, many significant changes have occurred in interest rate markets, for example:
\begin{itemize}
\item IBOR rates are not anymore considered as riskless and relevant spreads have widened.
\item Credit adjustments are required for unsecured positions.
\item Multi-curve pricing has been the industry standard at least for linear products such as swaps.
\item Extraordinary monetary policies of central banks have caused negative interest rates, which were earlier considered impossible. As such, the possibility of negative rates in Va\v{s}\'{i}\v{c}ek--model was considered a flaw earlier.
\end{itemize}
Although short-rate models have been surpassed by market models in practice, it is an interest question consider how short-rate models fare in the current market structures.
Short-rate models have several theoretical short-comings. The first is conceptual, there is no actual instantaneous short-rate. It is purely theoretical concept created to explain how the interest rates are formed. Although having an unobservable process as a main ingredient of a theory is troubling, it can be forgiven if implied theoretical structure is otherwise logically sound and it can produce accurate results. The viability of Black-Scholes option pricing methodology is based on the assumption that the future volatility of the stock process can be accurately inferred, even if it is not actually observable. Same can be true for short-rate modeling of interest rates. If the observed term structure is coherent with the implications of a hypothesized short-rate process, then the model might be useful even if the model might be fundamentally wrong.
The second main theoretical short-coming of short-rate modeling is the fact that it is mainly concerned of a single point, the next infinitesimal future time-step. As such, it is not fat-fetched to hypothesize that short-rate models have hard time to explain complex temporal evolution of interest rate curve. For example, we have demonstrated that for affine short-rate models with one factor, the long-rates are perfectly correlated. This is a severe limitation but it can be mitigated with the introduction of multiple factors, time-varying parameters or technique of dynamic extension.
However, short-rate models are not without merits. They are conceptually simple to understand. Affine models are analytically tractable with explicit analytical bond pricing formulas. Some models even have explicit analytical bond option pricing formulas which can be converted to the price caps and floors.
The calibration of models to the market data was inconclusive in the sense that we did not achieve consistent results. For simple models without credit risk, although there were some precise calibrations, no model was consistently accurate. Since we employed stochastic optimization algorithms for calibration, there is no certainty that global minimums were found. The curse of dimensionality makes the optimization problem very hard for multi-factor models. Thus a bad fit does not indicate that the model is unsuitable for the observed data. Since single-factor models have a manageable number of parameters, we could expect optimization to be fairly dependable. For $A(0,1)+$--model, the alternative calibration replicated the original parameter values almost perfectly. For $A(0,1)+$--model, the alternative calibration produced significantly different parameter values but the accuracy was very similar. Therefore we can infer with reasonable confidence that $A(0,1)+$ and $A(1,1)+$--models can not necessarily fit post-crisis term-structures. Since some of the considered multi-factor models of family $A(M,N)+$ offered decent accuracy, we believe that two or three factor models could be used to fit the recent observed data. As the alternative calibrations led to significantly different parameters and calibration errors, the inference about model quality of multi-factor models of type $A(M,N)+$ is not reliable.
Since the calibration data included maturities ranging from overnight rate to 30-year rate, the observed rate structure is complex. Calibration to subset of these maturity ranges will likely produce significantly better fits. This reasoning is supported results of the dynamic calibration of Euribor rates ranging from one week to annual maturity, which generally give significantly better accuracy.
For simple models with credit risk, the calibration errors were large. However, calibration was only attempted with recent data and only models with two-factors were considered. As we had seen, single-factor models did not perform well in this environment for interest rate curves and these factors had to explain both the interest rate and spread curves. It is probably that models with more factors could work better.
The calibration methodology employed had severe short-comings. Although differential evolution has the desired ability to explore the optimization space, it probably wasted lots of function calls to explore infeasible regions. On the other, since the alternative optimizations tended not to converge original points, it seems that meta-parameters for the optimization were misspecified. Also it is not clear if differential evolution is the best choice for this kind of calibration. Particle swamp optimization or simulated annealing might have been better alternatives. The large number of solutions near the optimization borders suggest that those borders might have been inappropriate. On the other hand. Since the shifts were stopped at borders, the borders were also likely to be hit during optimization tries.
It could be interesting to test how well these affine models and their dynamic extensions compare against more modern models such as SABR when they are calibrated to recent volatility structures. Since descendants of LIBOR models are based on the assumption of log-normal distribution, negative interest rates causes problems that require unnatural solutions such as shifts or normality assumptions that may lower model quality. Negative rates are possible for affine models with Gaussian factors, but this probably does not compensate for inferior volatility fabrics of these models.
| {
"alphanum_fraction": 0.8183338401,
"avg_line_length": 226.8275862069,
"ext": "tex",
"hexsha": "9e218f99aa76f5f58beb019029c0c6681b7fe08e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "537337ab3dc49be9f1f4283706b0f4dcbc8cb059",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mrytty/gradu-public",
"max_forks_repo_path": "conclusions.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "537337ab3dc49be9f1f4283706b0f4dcbc8cb059",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mrytty/gradu-public",
"max_issues_repo_path": "conclusions.tex",
"max_line_length": 1424,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "537337ab3dc49be9f1f4283706b0f4dcbc8cb059",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mrytty/gradu-public",
"max_stars_repo_path": "conclusions.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1227,
"size": 6578
} |
\input{../utils/slide-preamble1.tex}
\input{../utils/slide-preamble2.tex}
\input{../utils/macros.tex}
% \bibliography{../bib/references}
\bibliography{references}
\input{../utils/title-info.tex}
\newcommand{\Y}{{\tikz\draw[black,fill=yellow] (0,0) circle (1ex);}\xspace}
\newcommand{\G}{{\tikz\draw[black,fill=green] (0,0) circle (1ex);}\xspace}
\title[Statistics]{Data Analysis and Statistics}
% \date{\today}
\date{April 20, 2015}
\begin{document}
\begin{noheadline}
\maketitle
\end{noheadline}
\nopost{
\begin{noheadline}
\begin{frame}[c]
\vspace{-1.3cm}
\begin{center}
\includegraphics[height=1.3\textheight]{../images/seating-chart.pdf}
\end{center}
\end{frame}
\end{noheadline}
}
\begin{noheadline}
\begin{frame}
\frametitle{Today's issues:}
\tableofcontents
\end{frame}
\end{noheadline}
\section{Basics of probability}
\clickerslide{
\begin{frame}
\begin{clickerquestion}
\item If I flip a fair coin and roll a fair 6-sided die, what is the
probability that I get a heads and a 1 \textit{\textbf{OR}} a tails
and a 6?
\begin{clickeroptions}
\item 1/12
\item 1/144
\item 4/9
\item \clickeranswer{1/6}
\item 0.25
\end{clickeroptions}
\end{clickerquestion}
\end{frame}
}
\clickerslide{
\begin{frame}
\begin{clickerquestion}
\item What is the probability of getting at least two heads in three
flips of a fair coin?
\begin{clickeroptions}
\item \clickeranswer{0.5}
\item 0.25
\item 1/8
\item 3/8
\item 0.75
\end{clickeroptions}
\end{clickerquestion}
\end{frame}
}
\section{Frequentist inference: hypothesis testing}
\begin{frame}
\begin{itemize}[<+->]
\item We find a species of plant in which some individuals have yellow
seeds and others have green seeds.
\item This species of plant is closely related to the garden peas that
Mendel used.
\item Luckily enough, some plant breeders have already bred pure lines
for each seed color.
\item We cross individuals from yellow-seeded and green-seeded pure
lines, and all of the \f{1} progeny have yellow seeds.
\item The plant breeders tell us that when they cross \f{1}s with
pure-line green-seeded individuals, they sometimes see more yellow
seeds than they expect.
\item The breeders hypothesize that heterozygotes produce more gametes
with the yellow allele than the green allele.
\end{itemize}
\end{frame}
\clickerslide{
\begin{frame}
\begin{clickerquestion}
\item
Based on Mendel's work, what is a good \highlight{null} hypothesis
for how this trait is inherited?
\begin{clickeroptions}
\item Seed color is controlled by a single gene with two alleles
that assort independently during meiosis.
\item Seed color is controlled by a two genes that segregate during
meiosis.
\item Seed color is controlled by a two genes that assort
independently during meiosis.
\item \clickeranswer{Seed color is controlled by a single gene with
two alleles that segregate during meiosis.}
\end{clickeroptions}
\end{clickerquestion}
\end{frame}
}
\clickerslide{
\begin{frame}
\begin{clickerquestion}
\item
If we cross an \f{1} male to a female from the green-seeded pure
line, what ratio of yellow-seeded to green-seeded progeny do we
expect to find if our null hypothesis is correct?
\begin{clickeroptions}
\item 3:1
\item \clickeranswer{1:1}
\item 1:3
\item 9:3:3:1
\end{clickeroptions}
\end{clickerquestion}
\end{frame}
}
\begin{frame}
\begin{itemize}
\item<1-> We cross an \f{1} male to a female from the green-seeded pure
line, and we get 3 yellow-seeded and 1 green-seeded offspring
\vspace{0.5cm}
\item<2-> Our null hypothesis predicted a 1:1 ratio, and we found a 3:1
ratio. What should we do? Is the null wrong?
\vspace{0.5cm}
\item<3-> How likely were we to get our results due to chance?
\end{itemize}
\end{frame}
\begin{frame}
We need to figure out the probability of outcomes under the null
hypothesis.
\vspace{1cm}
Was our null hypothesis a good null hypothesis?
\nbox{Yes---It makes precise predictions, which allow us to calculate
the probability of all possible outcomes of our experiment.}
\end{frame}
\begin{frame}
\frametitle{All possible results}
\begin{columns}
\column{0.33\textwidth}
\Y,\Y,\Y,\Y
\vspace{0.5cm}
\Y,\Y,\Y,\G \\
\Y,\Y,\G,\Y \\
\Y,\G,\Y,\Y \\
\G,\Y,\Y,\Y
\column{0.33\textwidth}
\Y,\Y,\G,\G \\
\Y,\G,\Y,\G \\
\Y,\G,\G,\Y \\
\G,\Y,\G,\Y \\
\G,\G,\Y,\Y \\
\G,\Y,\Y,\G
\column{0.33\textwidth}
\Y,\G,\G,\G \\
\G,\Y,\G,\G \\
\G,\G,\Y,\G \\
\G,\G,\G,\Y
\vspace{0.5cm}
\G,\G,\G,\G
\end{columns}
\vspace{1cm}
\uncover<2->{Which results are as ``weird'' as the results we found (3
yellow: 1 green)? What's the total probability of these unexpected
results?}
\end{frame}
\clickerslide{
\begin{frame}
\begin{clickerquestion}
\item What does the probability 0.625 (10/16) represent regarding our
experiment?
\begin{clickeroptions}
\item The probability of getting results as weird as what we
actually observed, assuming the null hypothesis is incorrect.
\item The probability of the null hypothesis.
\item \clickeranswer{The probability of getting results as weird as
what we actually observed, under the null hypothesis (i.e.,
assuming the null hypothesis is correct).}
\item The probability that the null hypothesis is incorrect.
\end{clickeroptions}
\end{clickerquestion}
\vspace{1cm}
What do we call this probability?
\nbox{The p-value}
\end{frame}
}
\begin{frame}
\frametitle{All possible results}
\begin{columns}
\column{0.33\textwidth}
\Y,\Y,\Y,\Y
\vspace{0.5cm}
\Y,\Y,\Y,\G \\
\Y,\Y,\G,\Y \\
\Y,\G,\Y,\Y \\
\G,\Y,\Y,\Y
\column{0.33\textwidth}
\Y,\Y,\G,\G \\
\Y,\G,\Y,\G \\
\Y,\G,\G,\Y \\
\G,\Y,\G,\Y \\
\G,\G,\Y,\Y \\
\G,\Y,\Y,\G
\column{0.33\textwidth}
\Y,\G,\G,\G \\
\G,\Y,\G,\G \\
\G,\G,\Y,\G \\
\G,\G,\G,\Y
\vspace{0.5cm}
\G,\G,\G,\G
\end{columns}
\vspace{1cm}
\uncover<1->{If we consider a p-value of 0.05 or less as significant, what
was wrong with our experiment?}
\nbox{Our sample size is too small to ever get a significant result}
\uncover<1->{What can we do?}
\nbox{Increase our sample size or perform more replicates.}
\end{frame}
\clickerslide{
\begin{frame}
\begin{clickerquestion}
\item We repeat our experiment and get the same ratio (3 yellow: 1
green), but this time we sampled 20 progeny. What will happen to
our p-value relative to our previous result?
\begin{clickeroptions}
\item \clickeranswer{It will decrease.}
\item It will stay the same.
\item It will increase.
\item It will prove the null hypothesis is wrong.
\end{clickeroptions}
\end{clickerquestion}
\end{frame}
}
\begin{frame}
\uncover<1->{
The probability of getting a result as weird as 15 yellow: 5 green,
assuming the null hypothesis is correct, is 0.041.}
\vspace{4mm}
\uncover<1->{
What do we do?
\nbox{Given our results are so unlikely under the null hypothesis,
we can assume it is not a good explanation of what is going on
(we ``reject'' the null hypothesis).}}
\vspace{0.3cm}
\uncover<1->{
What is the probability of the null hypothesis?
\nbox{We don't know; in frequentist statistical tests, we can only make
probability statements about the data (or summary statistics
calculated from the data), \highlight{NOT} the hypotheses.}}
\uncover<1->{
What is the probability of our results under the ``alternative''
hypothesis?
\nbox{We don't know; statistical tests always test the null hypothesis.
If we reject the null, we can often conclude our alternative
hypothesis is a better explanation. However, we usually cannot
quantify how well it explains the data.}}
\end{frame}
\begin{frame}[t]
We were able to reject the null hypothesis of simple Mendelian
inheritance. So, what now? \\
\vspace{8mm}
Our alternative hypothesis was that heterozygotes produce more gametes with
the yellow allele than the green allele. \\
\vspace{8mm}
What is the next step? How do we continue to advance science? \\
\nbox{We need to come up with a \highlight{mechanism} for the alternative
hypothesis that allows us to make precise predictions about the data we
should see if it is correct. This would make the alternative hypothesis
into a good null hypothesis that we can test quantitatively.}
\end{frame}
% \section{Bayesian inference teaser}
% \begin{frame}[t]
% \begin{itemize}
% \item<1->The proportion of people in the USA with HIV is 0.003 (3/1000).
% \item<2->The test for HIV is very accurate:
% \begin{itemize}
% \item<3-> The probability of a positive test if a person has
% HIV is 0.998.
% \item<4-> The probability of a positive test if a person is
% healthy is 0.015.
% \end{itemize}
% \end{itemize}
% \end{frame}
% \begin{frame}[t]
% \begin{itemize}
% \item The proportion of people in the USA with HIV is 0.003 (3/1000).
% \item The test for HIV is very accurate:
% \begin{itemize}
% \item The probability of a positive test if a person has
% HIV is 0.998.
% \item The probability of a positive test if a person is
% healthy is 0.015.
% \end{itemize}
% \end{itemize}
% \begin{clickerquestion}
% \item If we randomly sample someone from the population, test them, and
% find a positive result, what is the probability this individual has
% HIV?
% \begin{clickeroptions}
% \item 0.998
% \item 0.985
% \item \clickeranswer{0.167}
% \item 0.881
% \end{clickeroptions}
% \end{clickerquestion}
% \end{frame}
\end{document}
| {
"alphanum_fraction": 0.5962277644,
"avg_line_length": 30.9889196676,
"ext": "tex",
"hexsha": "fd759e11da6648a917cfc07c643aa839fcb05866",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c212c94bd532f72f83d9d48d4393ada71f8b7b5a",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "joaks1/bio-180-intro-bio",
"max_forks_repo_path": "lecture-materials/13-statistics/13-statistics.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c212c94bd532f72f83d9d48d4393ada71f8b7b5a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "joaks1/bio-180-intro-bio",
"max_issues_repo_path": "lecture-materials/13-statistics/13-statistics.tex",
"max_line_length": 83,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c212c94bd532f72f83d9d48d4393ada71f8b7b5a",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "joaks1/bio-180-intro-bio",
"max_stars_repo_path": "lecture-materials/13-statistics/13-statistics.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3079,
"size": 11187
} |
\documentclass[a4paper]{amsart}
\usepackage[utf8]{inputenc}
\usepackage{datetime2}
\usepackage{enumitem}
\usepackage{float}
\usepackage{graphicx} \graphicspath{{fig/}}
\usepackage{minted}
\begin{document}
\title[]{\lowercase{\texttt{rv32i}}\\---\\RISC-V RV32I emulator}
\author[]{\lowercase{selendym\\([email protected])}}
\date{\DTMnow}
\maketitle
\bigbreak
\section{Introduction}
\noindent
The subject of this project is the emulation of the RISC-V instruction-set architecture%
\footnote{https://riscv.org/specifications/}
(ISA), more precisely, the ``RV32I base integer instruction set'' variant, which is a 32-bit ISA.
The current implementation does what it should: it emulates RV32I and is able to run programs compiled with the GCC cross-compilation toolset for RISC-V bare-metal targets%
\footnote{https://aur.archlinux.org/packages/riscv64-unknown-elf-gcc/}.
As the implementation is ``bare-metal''%
\footnote{There is no support from the runtime environment, no operating system to handle system calls.},
the full C standard library cannot be used to write test programs.
However, to support \texttt{malloc}, the implementation does emulate handling of \texttt{sbrk} system calls, although very primitively.
Also, input-output (IO) is handled with custom \texttt{ecall} environment calls that are mapped to the standard input and output of the emulator.
The custom \texttt{ecall}s the test program uses are implemented with inline assembly, because C does not support executing arbitrary instructions directly.
The implementation provides a rudimentary command-line interface (CLI) for the emulator; this is described in more detail in subsection \ref{subsec:main}.
\bigbreak
\section{Structure}
\begin{figure}
\centering
\includegraphics[width=1\columnwidth]{class_diag}
\caption{Class diagram of the project.}
\label{fig:class_diag}
\end{figure}
\noindent
A class diagram of the project is given in Figure \ref{fig:class_diag}.
Arrows represent dependencies and point from the requiring entity to the required one.
\medbreak
\subsection{Cpu}
\begin{listing}
\begin{minted}[autogobble]{cpp}
const reg_arr_t &get_reg_arr() const;
const word_t &get_reg( reg_idx_t index ) const;
const word_t &get_sp_reg() const;
const word_t &get_pc_reg() const;
void set_reg_arr( const reg_arr_t ®_arr );
void set_reg( reg_idx_t index, word_t value );
void set_sp_reg( word_t value );
void set_pc_reg( word_t value );
void step();
\end{minted}
\caption{Public interface of class \texttt{Cpu}.}
\label{list:cpu_public_interface}
\end{listing}
\noindent
Class \texttt{Cpu} (\texttt{src/cpu/cpu.\{h,c\}pp}) implements the main interface of the emulator proper.
Its public interface is given in Listing \ref{list:cpu_public_interface}.
\texttt{Cpu} implements the \texttt{get\_*} and \texttt{set\_*} member functions for getting and setting, and the \texttt{step} member function for advancing, the state of the emulation run.
Each invocation of \texttt{step} corresponds to the execution of the instruction (in emulated memory) pointed to by the current value of the \texttt{pc\_reg} data member.
With each step, \texttt{pc\_reg} is updated automatically; thus, to advance the emulation run, only \texttt{step} needs to be called.
Each invocation of \texttt{step} proceeds by executing in succession the functions \texttt{fetch}, \texttt{decode}, and \texttt{execute}.
\texttt{fetch} fetches the instruction to be executed from the (emulated) memory address corresponding to \texttt{pc\_reg}.
\texttt{decode} decodes the fetched instruction and returns a struct comprising the decoded fields.
\texttt{execute} executes the decoded instruction and changes the state of the emulation run accordingly.
\texttt{fetch} has a trivial implementation, and \texttt{decode} is described in detail in subsection \ref{subsec:decoder}.
The implementation of \texttt{execute} composes the body of \texttt{Cpu}.
Instructions are first divided into groups corresponding to their major opcodes.
These groups are then executed by their namesake functions; for example, \texttt{arith\_i} is responsible for executing the instructions belonging to the group ``immediate arithmetic''.
\medbreak
\subsection{Decoder}\label{subsec:decoder}
\begin{listing}
\begin{minted}[autogobble]{cpp}
instr_t decode( iword_t instr_word );
\end{minted}
\caption{``Public interface'' of namespace \texttt{decoder}.}
\label{list:decoder_public_interface}
\end{listing}
\begin{listing}
\begin{minted}[autogobble]{cpp}
struct instr_t
{
// raw instruction word
iword_t instr_word;
// instruction-word bit fields
opcode_e opcode; // instr_word[ 6: 0]
iword_t funct3; // instr_word[14:12]
iword_t funct7; // instr_word[31:25]
iword_t rd; // instr_word[11: 7]
iword_t rs1; // instr_word[19:15]
iword_t rs2; // instr_word[24:20]
word_t imm; // variant; depends on opcode format type; sign-extended
// decoded instruction and type
opcode_type_e opcode_type; // opcode format type
mnem_e mnem; // mnemonic; 'fully resolved opcode'; e.g., 'XOR'
}; // END struct instr_t
\end{minted}
\caption{Struct \texttt{instr\_t}.}
\label{list:instr_t}
\end{listing}
\noindent
Namespace \texttt{decoder} (\texttt{src/cpu/decoder.\{h,c\}pp}) implements the instruction decoder of the emulator.%
\footnote{\texttt{decoder} can be considered as a stateless class, comprising only (static) member functions.}
Its ``public interface'' is given in Listing \ref{list:decoder_public_interface}.
\texttt{decoder} implements the \texttt{decode} function for decoding a raw instruction word into an \texttt{instr\_t} struct (Listing \ref{list:instr_t}) comprising the decoded fields.
\texttt{decode} serves as the interface for decoding and performs some preliminary steps before calling more specific decoding functions.
As with \texttt{execute}, instructions are first divided into groups corresponding to their major opcodes.
These groups are then decoded by their namesake functions with \texttt{decode\_mnem\_} prepended; for example, \texttt{decode\_mnem\_arith\_i} is responsible for decoding the instructions belonging to the group ``immediate arithmetic''.
\texttt{decode\_imm} differs from the other functions by not belonging to any single group and is responsible for decoding immediate values for all groups.
\medbreak
\subsection{Memory}
\begin{listing}
\begin{minted}[autogobble]{cpp}
const uint8_t *get_mem_carr( addr_t addr = 0 ) const;
uint8_t *get_mem_carr_nc( addr_t addr = 0 ); // non-const
const addr_t &get_mem_size() const;
uint8_t lb( addr_t addr ) const;
uint16_t lh( addr_t addr ) const;
uint32_t lw( addr_t addr ) const;
uint64_t ld( addr_t addr ) const; // rv64i
void sb( addr_t addr, uint8_t word8 );
void sh( addr_t addr, uint16_t word16 );
void sw( addr_t addr, uint32_t word32 );
void sd( addr_t addr, uint64_t word64 ); // rv64i
\end{minted}
\caption{Public interface of class \texttt{Memory}.}
\label{list:memory_public_interface}
\end{listing}
\noindent
Class \texttt{Memory} (\texttt{src/cpu/memory.\{h,c\}pp}) implements the emulated memory system that \texttt{Cpu} uses.
Its public interface is given in Listing \ref{list:memory_public_interface}.
The \texttt{get\_mem\_size} member function returns the memory array size in (8-bit) bytes.
The \texttt{get\_mem\_carr} member function and its non-constant variant are used to provide direct access to the underlying raw memory array; these can be considered as a rudimentary form of direct memory access (DMA) and are used to provide a simple interface to IO functions.
The \texttt{l\{b,h,w,d\}} member functions allow addressed loads (reads) from a given memory location and return values of 8 (byte), 16 (half-word), 32 (word), and 64 (double-word) bits, respectively.
The \texttt{s\{b,h,w,d\}} member functions allow addressed stores (writes) to a given memory location of values of 8 (byte), 16 (half-word), 32 (word), and 64 (double-word) bits, respectively.
\texttt{Memory} implements the raw memory array using Linux system library call \texttt{mmap} to memory-map a file, serving as the memory image, onto the emulated memory address space.
The semantics of \texttt{mmap} allow any modifications made by the emulation run to transparently and automatically be mirrored in the memory image file, which can then be examined during or after the run to help in troubleshooting possible problems.
\medbreak
\subsection{ISA}
\begin{listing}
\begin{minted}[autogobble]{cpp}
using std::uint8_t, std::int8_t;
using std::uint16_t, std::int16_t;
using std::uint32_t, std::int32_t;
using std::uint64_t, std::int64_t;
constexpr unsigned iword_length = 32; // instruction bit width
using iword_t = uint32_t; // instruction word type
using siword_t = int32_t; // instruction word type; signed
constexpr unsigned word_length = 32; // register bit width
using word_t = uint32_t; // register word type
using sword_t = int32_t; // register word type; signed
constexpr unsigned addr_length = word_length; // memory address bit width
using addr_t = word_t; // memory address type
constexpr unsigned reg_count = 32; // register count
constexpr word_t reg_0_value = 0; // register-0 fixed value
using reg_arr_t = std::array< word_t, reg_count >;
using reg_idx_t = reg_arr_t::size_type;
constexpr auto iword_mask = util::bit_mask< iword_t, iword_length >;
constexpr auto iword_extract = util::bit_extract< iword_t, iword_length >;
constexpr auto word_mask = util::bit_mask< word_t, word_length >;
constexpr auto word_extract = util::bit_extract< word_t, word_length >;
constexpr auto addr_mask = word_mask;
constexpr auto addr_extract = word_extract;
namespace reg_idx_ns;
enum class opcode_type_e;
enum class opcode_e;
enum class mnem_e;
struct instr_t;
\end{minted}
\caption{Contents of namespace \texttt{isa}.}
\label{list:isa_contents}
\end{listing}
\noindent
Namespace \texttt{isa} (\texttt{src/cpu/isa.hpp}) implements the shared type definitions of the emulator.
Its contents are given in Listing \ref{list:isa_contents}.
\medbreak
\subsection{Main}
\noindent
Function \texttt{main} (\texttt{src/cpu/main.cpp}) implements a simple CLI for the emulator.
\medbreak
\subsection{Test}
\noindent
Function \texttt{main} (\texttt{src/test/test.c}) implements a test program for the emulator to run.
\bigbreak
\section{Instructions for building and usage}
\medbreak
\subsection{Main}\label{subsec:main}
\noindent
The CLI for the emulator, \texttt{main}, is built by invoking \texttt{make [all]} (the brackets signify optionality) in the \texttt{src/cpu} directory.
After a successful compilation, the \texttt{main} executable is located at \texttt{src/test/main}.
Note that a GCC version supporting \texttt{-std=c++17} is required.
Usage: \texttt{./main [<step\_count>] [<pc>] [<sp>]}
If \texttt{step\_count} is not given, the largest possible value, $-1$, is used (with unsigned arithmetic, this will wrap around).
Note that unless required, \texttt{pc} and \texttt{sp} should not be set explicitly; these correspond to the initial program counter (pc), which should point to the address of \texttt{\_start}, and the initial stack pointer (sp), which by default points to just past the end of the memory image.
\medbreak
\subsection{Test}
\noindent
The test program for the emulator, \texttt{test}, is built by invoking \texttt{make [all]} in the \texttt{src/test} directory.
After a successful compilation, two memory images, \texttt{mem.img.clean} and \texttt{mem.img} are created (the latter is simply a copy of the former).
Note that this step is optional, as a precompiled \texttt{mem.img.clean} is provided; otherwise, a GCC version supporting RISC-V bare-metal cross-compilation is required.
After an emulation run, the memory image file, \texttt{mem.img}, has most likely been modified by the run.
If desired, the image can be restored to a clean state by invoking \texttt{make reset-mem} in the \texttt{src/test} directory; this will also create the image if it does not exist.
\bigbreak
\section{Testing}
\noindent
Testing of the emulator proper was mostly realized by having the emulator run a test program compiled for the implemented ISA, RV32I, using GCC as a cross-compiler.
As the amount of differing instructions in RV32I is about 50 in total, a slightly more extensive test program should be able to cover these many times over.
In case bugs are present, it is very likely that the test program will not run correctly, if at all.
The lack of any unit tests was mostly due to lack of time and also of need in this particular case --- the coverage of the test program might not be 100\%, but it is likely reasonably close to this.
The test program is written mostly in C, with start-files and some utilities written in the RV32I subset of RISC-V assembly language.
As the target for compilation is bare-metal, custom start-files are required to set up and tear down the emulated environment to allow correct program execution.
Namely, the \texttt{\_start} and \texttt{\_exit} functions set up and tear down the emulation, respectively, and print diagnostic messages along the way to ease troubleshooting.
Due to the way the memory system is implemented, examining the contents of the emulated memory during or after an emulation run is made easy.
There are two files for the memory image, \texttt{mem.img.clean} and \texttt{mem.img}.
The former is the untouched, ``clean'', image, while the latter is the actually used memory image and may have changes made by the emulator.
The made changes are permanent, so if a ``clean'' emulation run is required, invoking \texttt{make reset-mem} in the test directory allows resetting the used memory image to the clean state.
To see the changes made in the memory image without great pains, a tool with hex diff capability is recommended%
\footnote{One such tool is \texttt{binwalk}, called with \texttt{binwalk --hexdump --red mem.img.clean mem.img}.}.
\end{document}
| {
"alphanum_fraction": 0.7394854202,
"avg_line_length": 51.8683274021,
"ext": "tex",
"hexsha": "0a7e561c49016f6317aaa735f32be61cf75f86e3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "46ea2b303e28a68215513c3bad8d28076d804baf",
"max_forks_repo_licenses": [
"0BSD"
],
"max_forks_repo_name": "selendym/rv32i",
"max_forks_repo_path": "doc/doc.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "46ea2b303e28a68215513c3bad8d28076d804baf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"0BSD"
],
"max_issues_repo_name": "selendym/rv32i",
"max_issues_repo_path": "doc/doc.tex",
"max_line_length": 295,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "46ea2b303e28a68215513c3bad8d28076d804baf",
"max_stars_repo_licenses": [
"0BSD"
],
"max_stars_repo_name": "selendym/rv32i",
"max_stars_repo_path": "doc/doc.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3690,
"size": 14575
} |
\chapter{Sequence objects}
\label{chapter:seq_objects}
Biological sequences are arguably the central object in Bioinformatics, and in this chapter we'll introduce the Biopython mechanism for dealing with sequences, the \verb|Seq| object.
Chapter~\ref{chapter:seq_annot} will introduce the related \verb|SeqRecord| object, which combines the sequence information with any annotation, used again in Chapter~\ref{chapter:seqio} for Sequence Input/Output.
Sequences are essentially strings of letters like \verb|AGTACACTGGT|, which seems very natural since this is the most common way that sequences are seen in biological file formats.
The most important difference between \verb|Seq| objects and standard Python strings is they have different methods.
Although the \verb|Seq| object supports many of the same methods as a plain string, its \verb|translate()| method differs by doing biological translation, and there are also additional biologically relevant methods like \verb|reverse_complement()|.
\section{Sequences act like strings}
In most ways, we can deal with Seq objects as if they were normal Python strings, for example getting the length, or iterating over the elements:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> my_seq = Seq("GATCG")
>>> for index, letter in enumerate(my_seq):
... print("%i %s" % (index, letter))
0 G
1 A
2 T
3 C
4 G
>>> print(len(my_seq))
5
\end{minted}
You can access elements of the sequence in the same way as for strings (but remember, Python counts from zero!):
%cont-doctest
\begin{minted}{pycon}
>>> print(my_seq[0]) #first letter
G
>>> print(my_seq[2]) #third letter
T
>>> print(my_seq[-1]) #last letter
G
\end{minted}
The \verb|Seq| object has a \verb|.count()| method, just like a string.
Note that this means that like a Python string, this gives a
\emph{non-overlapping} count:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> "AAAA".count("AA")
2
>>> Seq("AAAA").count("AA")
2
\end{minted}
\noindent For some biological uses, you may actually want an overlapping count
(i.e. $3$ in this trivial example). When searching for single letters, this
makes no difference:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> my_seq = Seq("GATCGATGGGCCTATATAGGATCGAAAATCGC")
>>> len(my_seq)
32
>>> my_seq.count("G")
9
>>> 100 * float(my_seq.count("G") + my_seq.count("C")) / len(my_seq)
46.875
\end{minted}
While you could use the above snippet of code to calculate a GC\%, note that the \verb|Bio.SeqUtils| module has several GC functions already built. For example:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> from Bio.SeqUtils import GC
>>> my_seq = Seq("GATCGATGGGCCTATATAGGATCGAAAATCGC")
>>> GC(my_seq)
46.875
\end{minted}
\noindent Note that using the \verb|Bio.SeqUtils.GC()| function should automatically cope with mixed case sequences and the ambiguous nucleotide S which means G or C.
Also note that just like a normal Python string, the \verb|Seq| object is in some ways ``read-only''. If you need to edit your sequence, for example simulating a point mutation, look at the Section~\ref{sec:mutable-seq} below which talks about the \verb|MutableSeq| object.
\section{Slicing a sequence}
A more complicated example, let's get a slice of the sequence:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> my_seq = Seq("GATCGATGGGCCTATATAGGATCGAAAATCGC")
>>> my_seq[4:12]
Seq('GATGGGCC')
\end{minted}
Note that `Seq` objects follow the usual indexing conventions for Python strings, with the first element of the sequence numbered 0. When you do a slice the first item is included (i.e.~4 in this case) and the last is excluded (12 in this case).
Also like a Python string, you can do slices with a start, stop and \emph{stride} (the step size, which defaults to one). For example, we can get the first, second and third codon positions of this DNA sequence:
%cont-doctest
\begin{minted}{pycon}
>>> my_seq[0::3]
Seq('GCTGTAGTAAG')
>>> my_seq[1::3]
Seq('AGGCATGCATC')
>>> my_seq[2::3]
Seq('TAGCTAAGAC')
\end{minted}
Another stride trick you might have seen with a Python string is the use of a -1 stride to reverse the string. You can do this with a \verb|Seq| object too:
%cont-doctest
\begin{minted}{pycon}
>>> my_seq[::-1]
Seq('CGCTAAAAGCTAGGATATATCCGGGTAGCTAG')
\end{minted}
\section{Turning Seq objects into strings}
\label{sec:seq-to-string}
If you really do just need a plain string, for example to write to a file, or insert into a database, then this is very easy to get:
%cont-doctest
\begin{minted}{pycon}
>>> str(my_seq)
'GATCGATGGGCCTATATAGGATCGAAAATCGC'
\end{minted}
Since calling \verb|str()| on a \verb|Seq| object returns the full sequence as a string,
you often don't actually have to do this conversion explicitly.
Python does this automatically in the print function:
%cont-doctest
\begin{minted}{pycon}
>>> print(my_seq)
GATCGATGGGCCTATATAGGATCGAAAATCGC
\end{minted}
You can also use the \verb|Seq| object directly with a \verb|%s| placeholder when using the Python string formatting or interpolation operator (\verb|%|):
%cont-doctest
\begin{minted}{pycon}
>>> fasta_format_string = ">Name\n%s\n" % my_seq
>>> print(fasta_format_string)
>Name
GATCGATGGGCCTATATAGGATCGAAAATCGC
<BLANKLINE>
\end{minted}
\noindent This line of code constructs a simple FASTA format record (without worrying about line wrapping).
Section~\ref{sec:SeqRecord-format} describes a neat way to get a FASTA formatted
string from a \verb|SeqRecord| object, while the more general topic of reading and
writing FASTA format sequence files is covered in Chapter~\ref{chapter:seqio}.
\section{Concatenating or adding sequences}
Two \verb|Seq| objects can be concatenated by adding them:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> seq1 = Seq("ACGT")
>>> seq2 = Seq("AACCGG")
>>> seq1 + seq2
Seq('ACGTAACCGG')
\end{minted}
Biopython does not check the sequence contents and will not raise an exception if for example you concatenate a protein sequence and a DNA sequence (which is likely a mistake):
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> protein_seq = Seq("EVRNAK")
>>> dna_seq = Seq("ACGT")
>>> protein_seq + dna_seq
Seq('EVRNAKACGT')
\end{minted}
You may often have many sequences to add together, which can be done with a for loop like this:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> list_of_seqs = [Seq("ACGT"), Seq("AACC"), Seq("GGTT")]
>>> concatenated = Seq("")
>>> for s in list_of_seqs:
... concatenated += s
...
>>> concatenated
Seq('ACGTAACCGGTT')
\end{minted}
Like Python strings, Biopython \verb|Seq| also has a \verb|.join| method:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> contigs = [Seq("ATG"), Seq("ATCCCG"), Seq("TTGCA")]
>>> spacer = Seq("N"*10)
>>> spacer.join(contigs)
Seq('ATGNNNNNNNNNNATCCCGNNNNNNNNNNTTGCA')
\end{minted}
\section{Changing case}
Python strings have very useful \verb|upper| and \verb|lower| methods for changing the case.
For example,
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> dna_seq = Seq("acgtACGT")
>>> dna_seq
Seq('acgtACGT')
>>> dna_seq.upper()
Seq('ACGTACGT')
>>> dna_seq.lower()
Seq('acgtacgt')
\end{minted}
These are useful for doing case insensitive matching:
%cont-doctest
\begin{minted}{pycon}
>>> "GTAC" in dna_seq
False
>>> "GTAC" in dna_seq.upper()
True
\end{minted}
\section{Nucleotide sequences and (reverse) complements}
\label{sec:seq-reverse-complement}
For nucleotide sequences, you can easily obtain the complement or reverse
complement of a \verb|Seq| object using its built-in methods:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> my_seq = Seq("GATCGATGGGCCTATATAGGATCGAAAATCGC")
>>> my_seq
Seq('GATCGATGGGCCTATATAGGATCGAAAATCGC')
>>> my_seq.complement()
Seq('CTAGCTACCCGGATATATCCTAGCTTTTAGCG')
>>> my_seq.reverse_complement()
Seq('GCGATTTTCGATCCTATATAGGCCCATCGATC')
\end{minted}
As mentioned earlier, an easy way to just reverse a \verb|Seq| object (or a
Python string) is slice it with -1 step:
%cont-doctest
\begin{minted}{pycon}
>>> my_seq[::-1]
Seq('CGCTAAAAGCTAGGATATATCCGGGTAGCTAG')
\end{minted}
If you do accidentally end up trying to do something weird like taking the
(reverse) complement of a protein sequence, the results are biologically
meaningless:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> protein_seq = Seq("EVRNAK")
>>> protein_seq.complement()
Seq('EBYNTM')
\end{minted}
Here the letter ``E'' is not a valid IUPAC ambiguity code for nucleotides,
so was not complemented. However, ``V'' means ``A'', ``C'' or ``G'' and
has complement ``B``, and so on.
The example in Section~\ref{sec:SeqIO-reverse-complement} combines the \verb|Seq|
object's reverse complement method with \verb|Bio.SeqIO| for sequence input/output.
\section{Transcription}
Before talking about transcription, I want to try to clarify the strand issue.
Consider the following (made up) stretch of double stranded DNA which
encodes a short peptide:
\begin{tabular}{rcl}
\\
& {\small DNA coding strand (aka Crick strand, strand $+1$)} & \\
5' & \texttt{ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG} & 3' \\
& \texttt{|||||||||||||||||||||||||||||||||||||||} & \\
3' & \texttt{TACCGGTAACATTACCCGGCGACTTTCCCACGGGCTATC} & 5' \\
& {\small DNA template strand (aka Watson strand, strand $-1$)} & \\
\\
& {\LARGE $|$} &\\
& Transcription & \\
& {\LARGE $\downarrow$} &\\
\\
5' & \texttt{AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG} & 3' \\
& {\small Single stranded messenger RNA} & \\
\\
\end{tabular}
The actual biological transcription process works from the template strand, doing a reverse complement (TCAG $\rightarrow$ CUGA) to give the mRNA. However, in Biopython and bioinformatics in general, we typically work directly with the coding strand because this means we can get the mRNA sequence just by switching T $\rightarrow$ U.
Now let's actually get down to doing a transcription in Biopython. First, let's create \verb|Seq| objects for the coding and template DNA strands:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> coding_dna = Seq("ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG")
>>> coding_dna
Seq('ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG')
>>> template_dna = coding_dna.reverse_complement()
>>> template_dna
Seq('CTATCGGGCACCCTTTCAGCGGCCCATTACAATGGCCAT')
\end{minted}
\noindent These should match the figure above - remember by convention nucleotide sequences are normally read from the 5' to 3' direction, while in the figure the template strand is shown reversed.
Now let's transcribe the coding strand into the corresponding mRNA, using the \verb|Seq| object's built in \verb|transcribe| method:
%cont-doctest
\begin{minted}{pycon}
>>> coding_dna
Seq('ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG')
>>> messenger_rna = coding_dna.transcribe()
>>> messenger_rna
Seq('AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG')
\end{minted}
\noindent As you can see, all this does is to replace T by U.
If you do want to do a true biological transcription starting with the template strand, then this becomes a two-step process:
%cont-doctest
\begin{minted}{pycon}
>>> template_dna.reverse_complement().transcribe()
Seq('AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG')
\end{minted}
The \verb|Seq| object also includes a back-transcription method for going from the mRNA to the coding strand of the DNA. Again, this is a simple U $\rightarrow$ T substitution:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> messenger_rna = Seq("AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG")
>>> messenger_rna
Seq('AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG')
>>> messenger_rna.back_transcribe()
Seq('ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG')
\end{minted}
\emph{Note:} The \verb|Seq| object's \verb|transcribe| and \verb|back_transcribe| methods
were added in Biopython 1.49. For older releases you would have to use the \verb|Bio.Seq|
module's functions instead, see Section~\ref{sec:seq-module-functions}.
\section{Translation}
\label{sec:translation}
Sticking with the same example discussed in the transcription section above,
now let's translate this mRNA into the corresponding protein sequence - again taking
advantage of one of the \verb|Seq| object's biological methods:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> messenger_rna = Seq("AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG")
>>> messenger_rna
Seq('AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG')
>>> messenger_rna.translate()
Seq('MAIVMGR*KGAR*')
\end{minted}
You can also translate directly from the coding strand DNA sequence:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> coding_dna = Seq("ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG")
>>> coding_dna
Seq('ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG')
>>> coding_dna.translate()
Seq('MAIVMGR*KGAR*')
\end{minted}
You should notice in the above protein sequences that in addition to the end stop character, there is an internal stop as well. This was a deliberate choice of example, as it gives an excuse to talk about some optional arguments, including different translation tables (Genetic Codes).
The translation tables available in Biopython are based on those \href{https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi}{from the NCBI} (see the next section of this tutorial). By default, translation will use the \emph{standard} genetic code (NCBI table id 1).
Suppose we are dealing with a mitochondrial sequence. We need to tell the translation function to use the relevant genetic code instead:
%cont-doctest
\begin{minted}{pycon}
>>> coding_dna.translate(table="Vertebrate Mitochondrial")
Seq('MAIVMGRWKGAR*')
\end{minted}
You can also specify the table using the NCBI table number which is shorter, and often included in the feature annotation of GenBank files:
%cont-doctest
\begin{minted}{pycon}
>>> coding_dna.translate(table=2)
Seq('MAIVMGRWKGAR*')
\end{minted}
Now, you may want to translate the nucleotides up to the first in frame stop codon,
and then stop (as happens in nature):
%cont-doctest
\begin{minted}{pycon}
>>> coding_dna.translate()
Seq('MAIVMGR*KGAR*')
>>> coding_dna.translate(to_stop=True)
Seq('MAIVMGR')
>>> coding_dna.translate(table=2)
Seq('MAIVMGRWKGAR*')
>>> coding_dna.translate(table=2, to_stop=True)
Seq('MAIVMGRWKGAR')
\end{minted}
\noindent Notice that when you use the \verb|to_stop| argument, the stop codon itself
is not translated - and the stop symbol is not included at the end of your protein
sequence.
You can even specify the stop symbol if you don't like the default asterisk:
%cont-doctest
\begin{minted}{pycon}
>>> coding_dna.translate(table=2, stop_symbol="@")
Seq('MAIVMGRWKGAR@')
\end{minted}
Now, suppose you have a complete coding sequence CDS, which is to say a
nucleotide sequence (e.g. mRNA -- after any splicing) which is a whole number
of codons (i.e. the length is a multiple of three), commences with a start
codon, ends with a stop codon, and has no internal in-frame stop codons.
In general, given a complete CDS, the default translate method will do what
you want (perhaps with the \verb|to_stop| option). However, what if your
sequence uses a non-standard start codon? This happens a lot in bacteria --
for example the gene yaaX in \texttt{E. coli} K12:
%TODO - handle line wrapping in doctest?
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> gene = Seq("GTGAAAAAGATGCAATCTATCGTACTCGCACTTTCCCTGGTTCTGGTCGCTCCCATGGCA"
... "GCACAGGCTGCGGAAATTACGTTAGTCCCGTCAGTAAAATTACAGATAGGCGATCGTGAT"
... "AATCGTGGCTATTACTGGGATGGAGGTCACTGGCGCGACCACGGCTGGTGGAAACAACAT"
... "TATGAATGGCGAGGCAATCGCTGGCACCTACACGGACCGCCGCCACCGCCGCGCCACCAT"
... "AAGAAAGCTCCTCATGATCATCACGGCGGTCATGGTCCAGGCAAACATCACCGCTAA")
>>> gene.translate(table="Bacterial")
Seq('VKKMQSIVLALSLVLVAPMAAQAAEITLVPSVKLQIGDRDNRGYYWDGGHWRDH...HR*',
ProteinAlpabet())
>>> gene.translate(table="Bacterial", to_stop=True)
Seq('VKKMQSIVLALSLVLVAPMAAQAAEITLVPSVKLQIGDRDNRGYYWDGGHWRDH...HHR')
\end{minted}
\noindent In the bacterial genetic code \texttt{GTG} is a valid start codon,
and while it does \emph{normally} encode Valine, if used as a start codon it
should be translated as methionine. This happens if you tell Biopython your
sequence is a complete CDS:
%TODO - handle line wrapping in doctest?
\begin{minted}{pycon}
>>> gene.translate(table="Bacterial", cds=True)
Seq('MKKMQSIVLALSLVLVAPMAAQAAEITLVPSVKLQIGDRDNRGYYWDGGHWRDH...HHR')
\end{minted}
In addition to telling Biopython to translate an alternative start codon as
methionine, using this option also makes sure your sequence really is a valid
CDS (you'll get an exception if not).
The example in Section~\ref{sec:SeqIO-translate} combines the \verb|Seq| object's
translate method with \verb|Bio.SeqIO| for sequence input/output.
\section{Translation Tables}
In the previous sections we talked about the \verb|Seq| object translation method (and mentioned the equivalent function in the \verb|Bio.Seq| module -- see
Section~\ref{sec:seq-module-functions}).
Internally these use codon table objects derived from the NCBI information at
\url{ftp://ftp.ncbi.nlm.nih.gov/entrez/misc/data/gc.prt}, also shown on
\url{https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi} in a much more readable layout.
As before, let's just focus on two choices: the Standard translation table, and the
translation table for Vertebrate Mitochondrial DNA.
%doctest
\begin{minted}{pycon}
>>> from Bio.Data import CodonTable
>>> standard_table = CodonTable.unambiguous_dna_by_name["Standard"]
>>> mito_table = CodonTable.unambiguous_dna_by_name["Vertebrate Mitochondrial"]
\end{minted}
Alternatively, these tables are labeled with ID numbers 1 and 2, respectively:
%cont-doctest
\begin{minted}{pycon}
>>> from Bio.Data import CodonTable
>>> standard_table = CodonTable.unambiguous_dna_by_id[1]
>>> mito_table = CodonTable.unambiguous_dna_by_id[2]
\end{minted}
You can compare the actual tables visually by printing them:
%TODO - handle <BLANKLINE> automatically in doctest?
\begin{minted}{pycon}
>>> print(standard_table)
Table 1 Standard, SGC0
| T | C | A | G |
--+---------+---------+---------+---------+--
T | TTT F | TCT S | TAT Y | TGT C | T
T | TTC F | TCC S | TAC Y | TGC C | C
T | TTA L | TCA S | TAA Stop| TGA Stop| A
T | TTG L(s)| TCG S | TAG Stop| TGG W | G
--+---------+---------+---------+---------+--
C | CTT L | CCT P | CAT H | CGT R | T
C | CTC L | CCC P | CAC H | CGC R | C
C | CTA L | CCA P | CAA Q | CGA R | A
C | CTG L(s)| CCG P | CAG Q | CGG R | G
--+---------+---------+---------+---------+--
A | ATT I | ACT T | AAT N | AGT S | T
A | ATC I | ACC T | AAC N | AGC S | C
A | ATA I | ACA T | AAA K | AGA R | A
A | ATG M(s)| ACG T | AAG K | AGG R | G
--+---------+---------+---------+---------+--
G | GTT V | GCT A | GAT D | GGT G | T
G | GTC V | GCC A | GAC D | GGC G | C
G | GTA V | GCA A | GAA E | GGA G | A
G | GTG V | GCG A | GAG E | GGG G | G
--+---------+---------+---------+---------+--
\end{minted}
\noindent and:
\begin{minted}{pycon}
>>> print(mito_table)
Table 2 Vertebrate Mitochondrial, SGC1
| T | C | A | G |
--+---------+---------+---------+---------+--
T | TTT F | TCT S | TAT Y | TGT C | T
T | TTC F | TCC S | TAC Y | TGC C | C
T | TTA L | TCA S | TAA Stop| TGA W | A
T | TTG L | TCG S | TAG Stop| TGG W | G
--+---------+---------+---------+---------+--
C | CTT L | CCT P | CAT H | CGT R | T
C | CTC L | CCC P | CAC H | CGC R | C
C | CTA L | CCA P | CAA Q | CGA R | A
C | CTG L | CCG P | CAG Q | CGG R | G
--+---------+---------+---------+---------+--
A | ATT I(s)| ACT T | AAT N | AGT S | T
A | ATC I(s)| ACC T | AAC N | AGC S | C
A | ATA M(s)| ACA T | AAA K | AGA Stop| A
A | ATG M(s)| ACG T | AAG K | AGG Stop| G
--+---------+---------+---------+---------+--
G | GTT V | GCT A | GAT D | GGT G | T
G | GTC V | GCC A | GAC D | GGC G | C
G | GTA V | GCA A | GAA E | GGA G | A
G | GTG V(s)| GCG A | GAG E | GGG G | G
--+---------+---------+---------+---------+--
\end{minted}
You may find these following properties useful -- for example if you are trying
to do your own gene finding:
%cont-doctest
\begin{minted}{pycon}
>>> mito_table.stop_codons
['TAA', 'TAG', 'AGA', 'AGG']
>>> mito_table.start_codons
['ATT', 'ATC', 'ATA', 'ATG', 'GTG']
>>> mito_table.forward_table["ACG"]
'T'
\end{minted}
\section{Comparing Seq objects}
\label{sec:seq-comparison}
Sequence comparison is actually a very complicated topic, and there is no easy
way to decide if two sequences are equal. The basic problem is the meaning of
the letters in a sequence are context dependent - the letter ``A'' could be part
of a DNA, RNA or protein sequence. Biopython can track the molecule type, so
comparing two \verb|Seq| objects could mean considering this too.
Should a DNA fragment ``ACG'' and an RNA fragment ``ACG'' be equal? What about
the peptide ``ACG``? Or the Python string ``ACG``?
In everyday use, your sequences will generally all be the same type of
(all DNA, all RNA, or all protein).
Well, as of Biopython 1.65, sequence comparison only looks at the sequence
and compares like the Python string.
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> seq1 = Seq("ACGT")
>>> "ACGT" == seq1
True
>>> seq1 == "ACGT"
True
\end{minted}
As an extension to this, using sequence objects as keys in a Python dictionary
is equivalent to using the sequence as a plain string for the key.
See also Section~\ref{sec:seq-to-string}.
\section{Sequences with unknown sequence contents}
In some cases, the length of a sequence may be known but not the actual letters constituting it. For example, GenBank and EMBL files may represent a genomic DNA sequence only by its config information, without specifying the sequence contents explicitly. Such sequences can be represented by creating a \verb|Seq| object with the argument \verb|None|, followed by the sequence length:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> unknown_seq = Seq(None, 10)
\end{minted}
The \verb|Seq| object thus created has a well-defined length. Any attempt to access the sequence contents, however, will raise an \verb|UndefinedSequenceError|:
%cont-doctest
\begin{minted}{pycon}
>>> unknown_seq
Seq(None, length=10)
>>> len(unknown_seq)
10
>>> print(unknown_seq)
Traceback (most recent call last):
...
Bio.Seq.UndefinedSequenceError: Sequence content is undefined
>>>
\end{minted}
\section{MutableSeq objects}
\label{sec:mutable-seq}
Just like the normal Python string, the \verb|Seq| object is ``read only'', or in Python terminology, immutable. Apart from wanting the \verb|Seq| object to act like a string, this is also a useful default since in many biological applications you want to ensure you are not changing your sequence data:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> my_seq = Seq("GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGA")
\end{minted}
Observe what happens if you try to edit the sequence:
%cont-doctest
\begin{minted}{pycon}
>>> my_seq[5] = "G"
Traceback (most recent call last):
...
TypeError: 'Seq' object does not support item assignment
\end{minted}
However, you can convert it into a mutable sequence (a \verb|MutableSeq| object) and do pretty much anything you want with it:
%cont-doctest
\begin{minted}{pycon}
>>> from Bio.Seq import MutableSeq
>>> mutable_seq = MutableSeq(my_seq)
>>> mutable_seq
MutableSeq('GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGA')
\end{minted}
Alternatively, you can create a \verb|MutableSeq| object directly from a string:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import MutableSeq
>>> mutable_seq = MutableSeq("GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGA")
\end{minted}
Either way will give you a sequence object which can be changed:
%cont-doctest
\begin{minted}{pycon}
>>> mutable_seq
MutableSeq('GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGA')
>>> mutable_seq[5] = "C"
>>> mutable_seq
MutableSeq('GCCATCGTAATGGGCCGCTGAAAGGGTGCCCGA')
>>> mutable_seq.remove("T")
>>> mutable_seq
MutableSeq('GCCACGTAATGGGCCGCTGAAAGGGTGCCCGA')
>>> mutable_seq.reverse()
>>> mutable_seq
MutableSeq('AGCCCGTGGGAAAGTCGCCGGGTAATGCACCG')
\end{minted}
Do note that unlike the \verb|Seq| object, the \verb|MutableSeq| object's methods like \verb|reverse_complement()| and \verb|reverse()| act in-situ!
An important technical difference between mutable and immutable objects in Python means that you can't use a \verb|MutableSeq| object as a dictionary key, but you can use a Python string or a \verb|Seq| object in this way.
Once you have finished editing your a \verb|MutableSeq| object, it's easy to get back to a read-only \verb|Seq| object should you need to:
%cont-doctest
\begin{minted}{pycon}
>>> from Bio.Seq import Seq
>>> new_seq = Seq(mutable_seq)
>>> new_seq
Seq('AGCCCGTGGGAAAGTCGCCGGGTAATGCACCG')
\end{minted}
You can also get a string from a \verb|MutableSeq| object just like from a \verb|Seq| object (Section~\ref{sec:seq-to-string}).
\section{UnknownSeq objects}
\textbf{Note that }\texttt{UnknownSeq} \textbf{is deprecated. To represent a sequence of known length but unknown sequence contents, please use } \texttt{Seq(None, length)}\textbf{.}
The \verb|UnknownSeq| object is a subclass of the basic \verb|Seq| object
and its purpose is to represent a
sequence where we know the length, but not the actual letters making it up.
You could of course use a normal \verb|Seq| object in this situation, but it wastes
rather a lot of memory to hold a string of a million ``N'' characters when you could
just store a single letter ``N'' and the desired length as an integer.
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import UnknownSeq
>>> unk = UnknownSeq(20)
>>> unk
UnknownSeq(20, character='?')
>>> print(unk)
????????????????????
>>> len(unk)
20
\end{minted}
For DNA or RNA sequences, unknown nucleotides are commonly denoted by the letter ``N'', while for proteins ``X'' is commonly used for unknown amino acids. When creating an `UnknownSeq`, you can specify the character to be used instead of ``?'' to represent unknown letters. For example
%cont-doctest
\begin{minted}{pycon}
>>> from Bio.Seq import UnknownSeq
>>> unk_dna = UnknownSeq(20, character="N")
>>> unk_dna
UnknownSeq(20, character='N')
>>> print(unk_dna)
NNNNNNNNNNNNNNNNNNNN
\end{minted}
You can use all the usual \verb|Seq| object methods too, note these give back
memory saving \verb|UnknownSeq| objects where appropriate as you might expect:
%cont-doctest
\begin{minted}{pycon}
>>> unk_dna
UnknownSeq(20, character='N')
>>> unk_dna.complement()
UnknownSeq(20, character='N')
>>> unk_dna.reverse_complement()
UnknownSeq(20, character='N')
>>> unk_dna.transcribe()
UnknownSeq(20, character='N')
>>> unk_protein = unk_dna.translate()
>>> unk_protein
UnknownSeq(6, character='X')
>>> print(unk_protein)
XXXXXX
>>> len(unk_protein)
6
\end{minted}
You may be able to find a use for the \verb|UnknownSeq| object in your own
code, but it is more likely that you will first come across them in a
\verb|SeqRecord| object created by \verb|Bio.SeqIO|
(see Chapter~\ref{chapter:seqio}).
Some sequence file formats don't always include the actual sequence, for
example GenBank and EMBL files may include a list of features but for the
sequence just present the contig information. Alternatively, the QUAL files
used in sequencing work hold quality scores but they \emph{never} contain a
sequence -- instead there is a partner FASTA file which \emph{does} have the
sequence.
\section{Working with strings directly}
\label{sec:seq-module-functions}
To close this chapter, for those you who \emph{really} don't want to use the sequence
objects (or who prefer a functional programming style to an object orientated one),
there are module level functions in \verb|Bio.Seq| will accept plain Python strings,
\verb|Seq| objects (including \verb|UnknownSeq| objects) or \verb|MutableSeq| objects:
%doctest
\begin{minted}{pycon}
>>> from Bio.Seq import reverse_complement, transcribe, back_transcribe, translate
>>> my_string = "GCTGTTATGGGTCGTTGGAAGGGTGGTCGTGCTGCTGGTTAG"
>>> reverse_complement(my_string)
'CTAACCAGCAGCACGACCACCCTTCCAACGACCCATAACAGC'
>>> transcribe(my_string)
'GCUGUUAUGGGUCGUUGGAAGGGUGGUCGUGCUGCUGGUUAG'
>>> back_transcribe(my_string)
'GCTGTTATGGGTCGTTGGAAGGGTGGTCGTGCTGCTGGTTAG'
>>> translate(my_string)
'AVMGRWKGGRAAG*'
\end{minted}
\noindent You are, however, encouraged to work with \verb|Seq| objects by default.
| {
"alphanum_fraction": 0.725229005,
"avg_line_length": 36.5287356322,
"ext": "tex",
"hexsha": "40b62c7a19087f791a7f6d606f752da8dfb2bcc6",
"lang": "TeX",
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2022-02-04T06:10:23.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-02-20T22:53:21.000Z",
"max_forks_repo_head_hexsha": "7739f44e9ad5d60747ed829d6558b60440a6cd83",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "CroP-BioDiv/biopython",
"max_forks_repo_path": "Doc/Tutorial/chapter_seq_objects.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "7739f44e9ad5d60747ed829d6558b60440a6cd83",
"max_issues_repo_issues_event_max_datetime": "2020-07-23T02:46:25.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-21T03:13:37.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "CroP-BioDiv/biopython",
"max_issues_repo_path": "Doc/Tutorial/chapter_seq_objects.tex",
"max_line_length": 384,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "7739f44e9ad5d60747ed829d6558b60440a6cd83",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "CroP-BioDiv/biopython",
"max_stars_repo_path": "Doc/Tutorial/chapter_seq_objects.tex",
"max_stars_repo_stars_event_max_datetime": "2020-10-05T03:27:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-06-14T17:01:57.000Z",
"num_tokens": 8601,
"size": 28602
} |
\documentclass[13pt,onlymath]{beamer}
\usefonttheme{serif}
\usepackage{graphicx,amsmath,amssymb,tikz,psfrag,epstopdf,fancyvrb}
\usepackage[lighttt]{lmodern}
%\usepackage{graphicx,psfrag}
\input defs.tex
%% formatting
\mode<presentation>
{
\usetheme{default}
}
\setbeamertemplate{navigation symbols}{}
\usecolortheme[rgb={0.13,0.28,0.59}]{structure}
\setbeamertemplate{itemize subitem}{--}
\setbeamertemplate{frametitle} {
\begin{center}
{\large\bf \insertframetitle}
\end{center}
}
\newcommand\footlineon{
\setbeamertemplate{footline} {
\begin{beamercolorbox}[ht=2.5ex,dp=1.125ex,leftskip=.8cm,rightskip=.6cm]{structure}
\footnotesize \insertsection
\hfill
{\insertframenumber}
\end{beamercolorbox}
\vskip 0.45cm
}
}
\footlineon
\AtBeginSection[]
{
\begin{frame}<beamer>
\frametitle{Outline}
\tableofcontents[currentsection,currentsubsection]
\end{frame}
}
%% begin presentation
\title{\large \bfseries Combinatorial Games}
\author{Jaehyun Park\\[3ex]
CS 97SI\\
Stanford University}
\date{\today}
\begin{document}
\frame{
\thispagestyle{empty}
\titlepage
}
\begin{frame}{Combinatorial Games}
\BIT
\item Turn-based competitive multi-player games
\item Can be a simple win-or-lose game, or can involve points
\item Everyone has perfect information
\item Each turn, the player changes the current ``state'' using a valid ``move''
\item At some states, there are no valid moves
\BIT
\item The current player immediately loses at these states
\EIT
\EIT
\end{frame}
\section{Simple Games}
\begin{frame}{Combinatorial Game Example}
\BIT
\item Settings: There are $n$ stones in a pile. Two players take turns and remove 1 or 3 stones at a time. The one who takes the last stone wins. Find out the winner if both players play perfectly
\item State space: Each state can be represented by the number of remaining stones in the pile
\item Valid moves from state $x$: $x \rightarrow (x-1)$ or $x \rightarrow (x-3)$, as long as the resulting number is nonnegative
\item State 0 is the losing state
\EIT
\end{frame}
\begin{frame}{Example (continued)}
\BIT
\item No cycles in the state transitions
\BIT
\item Can solve the problem bottom-up (DP)
\EIT
\item A player wins if there is a way to force the opponent to lose
\BIT
\item Conversely, we lose if there is no such a way
\EIT
\item State $x$ is a winning state (W) if
\BIT
\item $(x-1)$ is a losing state,
\item OR $(x-3)$ is a losing state
\EIT
\item Otherwise, state $x$ is a losing state (L)
\EIT
\end{frame}
\begin{frame}{Example (continued)}
\BIT
\item DP table for small values of $n$:
\begin{center}
\begin{tabular}{|c|cccccccc|}
\hline
$n$&0&1&2&3&4&5&6&7 \\ \hline
W/L&L&W&L&W&L&W&L&W \\
\hline
\end{tabular}
\end{center}
\vfill
\item See a pattern?
\vfill
\item Let's prove our conjecture
\EIT
\end{frame}
\begin{frame}{Example (continued)}
\BIT
\item Conjecture: If $n$ is odd, the first player wins. If $n$ is even, the second player wins.
\vfill
\item Holds true for the base case $n=0$
\item In general,
\BIT
\item If $n$ is odd, we can remove one stone and give the opponent an even number of stones
\item If $n$ is even, no matter what we choose, we have to give an odd number of stones to the opponent
\EIT
\EIT
\end{frame}
\section{Minimax Algorithm}
\begin{frame}{More Complex Games}
\BIT
\item Settings: a competitive zero-sum two-player game
\item Zero-sum: if the first player's score is $x$, then the other player gets $-x$
\item Each player tries to maximize his/her own score
\item Both players play perfectly
\vfill
\item Can be solved using a \emph{minimax} algorithm
\EIT
\end{frame}
\begin{frame}{Minimax Algorithm}
\BIT
\item Recursive algorithm that decides the best move for the current player at a given state
\item Define $f(S)$ as the optimal score of the current player who starts at state $S$
\item Let $T_1, T_2, \ldots, T_m$ be states can be reached from $S$ using a single move
\item Let $T$ be the state that minimizes $f(T_i)$
\item Then, $f(S) = -f(T)$
\BIT
\item Intuition: minimizing the opponent's score maximizes my score
\EIT
\EIT
\end{frame}
\begin{frame}{Memoization}
\BIT
\item (Not \emph{memorization} but \emph{memoization})
\item A technique used to avoid repeated calculations in recursive functions
\item High-level idea: take a note (memo) of the return value of a function call. When the function is called with the same argument again, return the stored result
\item Each subproblem is solved at most once
\BIT
\item Some may not be solved at all!
\EIT
\EIT
\end{frame}
\begin{frame}[fragile]{Recursive Function without Memoization}
\begin{Verbatim}[xleftmargin=25pt]
int fib(int n)
{
if(n <= 1) return n;
return fib(n - 1) + fib(n - 2);
}
\end{Verbatim}
\vfill
\BIT
\item How many times is \verb,fib(1), called?
\EIT
\end{frame}
\begin{frame}[fragile]{Memoization using \texttt{std::map}}
\begin{Verbatim}[xleftmargin=25pt]
map<int, int> memo;
int fib(int n)
{
if(memo.count(n)) return memo[n];
if(n <= 1) return n;
return memo[n] = fib(n - 1) + fib(n - 2);
}
\end{Verbatim}
\vfill
\BIT
\item How many times is \verb,fib(1), called?
\EIT
\end{frame}
\begin{frame}{Minimax Algorithm Pseudocode}
\BIT
\item Given state $S$, want to compute $f(S)$
\vfill
\item If we know $f(S)$ already, return it
\item Set return value $x \leftarrow -\infty$
\item For each valid next state $T$:
\BIT
\item Update return value $x \leftarrow \max\{x, -f(T)\}$
\EIT
\item Write a memo $f(S) = x$ and return $x$
\EIT
\end{frame}
\begin{frame}{Possible Extensions}
\BIT
\item The game is not zero-sum
\BIT
\item Each player wants to maximize his own score
\item Each player wants to maximize the difference between his score and the opponent's
\EIT
\item There are more than two players
\vfill
\item All of above can be solved using a similar idea
\EIT
\end{frame}
\section{Nim Game}
\begin{frame}{Nim Game}
\BIT
\item Settings: There are $n$ piles of stones. Two players take turns. Each player chooses a pile, and removes any number of stones from the pile. The one who takes the last stone wins. Find out the winner if both players play perfectly
\vfill
\item Can't really use DP if there are many piles, because the state space is huge
\EIT
\end{frame}
\begin{frame}{Nim Game Example}
\BIT
\item Starts with heaps of 3, 4, 5 stones
\BIT
\item We will call them heap A, heap B, and heap C
\EIT
\vfill
\item Alice takes 2 stones from A: $(1, 4, 5)$
\item Bob takes 4 from C: $(1, 4, 1)$
\item Alice takes 4 from B: $(1, 0, 1)$
\item Bob takes 1 from A: $(0, 0, 1)$
\item Alice takes 1 from C and wins: $(0, 0, 0)$
\EIT
\end{frame}
\begin{frame}{Solution to Nim}
\BIT
\item Given heaps of size $n_1, n_2, \ldots, n_m$
\item The first player wins if and only if the \emph{nim-sum} $n_1 \oplus n_2 \oplus \cdots \oplus n_m$ is nonzero ($\oplus$ is bitwise XOR operator)
\vfill
\item Why?
\BIT
\item If the nim-sum is zero, then whatever the current player does, the nim-sum of the next state is nonzero
\item If the nim-sum is nonzero, it is possible to force it to become zero (not obvious, but true)
\EIT
\EIT
\end{frame}
\section{Grundy Numbers (Nimbers)}
\begin{frame}{Playing Multiple Games at Once}
\BIT
\item Suppose that multiple games are played at the same time. At each turn, the player chooses a game and make a move. You lose if there is no possible move. We want to determine the winner
\EIT
\begin{center}
\includegraphics[height=0.5\textheight]{figures/games}
Figure from \url{http://sps.nus.edu.sg/~limchuwe/cgt/}
\end{center}
\end{frame}
\begin{frame}{Grundy Numbers (Nimbers)}
\BIT
\item For each game, we compute its \emph{Grundy number}
\item The first player wins if and only if the XOR of all the Grundy numbers is nonzero
\BIT
\item For example, the Grundy number of a one-pile version of the nim game is equal to the number of stones in the pile (we will see this again later)
\EIT
\vfill
\item Let's see how to compute the Grundy numbers for general games
\EIT
\end{frame}
\begin{frame}{Grundy Numbers}
\BIT
\item Let $S$ be a state, and $T_1, T_2, \ldots, T_m$ be states can be reached from $S$ using a single move
\vfill
\item The Grundy number $g(S)$ of $S$ is the smallest nonnegative integer that doesn't appear in $\{g(T_1), g(T_2), \ldots, g(T_m)\}$
\BIT
\item Note: the Grundy number of a losing state is 0
\item Note: I made up the notation $g(\cdot)$. Don't use it in other places
\EIT
\EIT
\end{frame}
\begin{frame}{Grundy Numbers Example}
\BIT
\item Consider a one-pile nim game
\item $g(0) = 0$, because it is a losing state
\item State 0 is the only state reachable from state 1, so $g(1)$ is the smallest nonnegative integer not appearing in $\{g(0)\} = \{0\}$. Thus, $g(1) = 1$
\item Similarly, $g(2) = 2$, $g(3) = 3$, and so on
\item Grundy numbers for this game is then $g(n) = n$
\BIT
\item That's how we got the nim-sum solution
\EIT
\EIT
\end{frame}
\begin{frame}{Another Example}
\BIT
\item Let's consider a variant of the game we considered before; only 1 or 2 stones can be removed at each turn
\item Now we're going to play many copies of this game at the same time
\item Grundy number table:
\begin{center}
\begin{tabular}{|c|cccccccc|}
\hline
$n$&0&1&2&3&4&5&6&7 \\ \hline
$g(n)$&0&1&2&0&1&2&0&1 \\
\hline
\end{tabular}
\end{center}
\EIT
\end{frame}
\begin{frame}{Another Example (continued)}
\BIT
\item Grundy number table:
\begin{center}
\begin{tabular}{|c|cccccccc|}
\hline
$n$&0&1&2&3&4&5&6&7 \\ \hline
$g(n)$&0&1&2&0&1&2&0&1 \\
\hline
\end{tabular}
\end{center}
\vfill
\item Who wins if there are three piles of stones $(2, 4, 5)$?
\item What if we start with $(5, 11, 13, 16)$?
\item What if we start with $(10^{100}, 10^{200})$?
\EIT
\end{frame}
\begin{frame}{Tips for Solving Game Problems}
\BIT
\item If the state space is small, use memoization
\item If not, print out the result of the game for small test data and look for a pattern
\BIT
\item This actually works really well!
\EIT
\item Try to convert the game into some nim-variant
\item If multiple games are played at once, use Grundy numbers
\EIT
\end{frame}
\end{document}
| {
"alphanum_fraction": 0.7172795571,
"avg_line_length": 27.1935483871,
"ext": "tex",
"hexsha": "28140ee80f5a4649bb7a5130cea85a3a5094fc1c",
"lang": "TeX",
"max_forks_count": 598,
"max_forks_repo_forks_event_max_datetime": "2022-03-15T20:25:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-05-03T10:50:20.000Z",
"max_forks_repo_head_hexsha": "1cc79c15e8e0e9c27e1470c7400cdb50aaa6bb82",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Charleo85/stanfordacm",
"max_forks_repo_path": "97si_slides/combinatorial_games.tex",
"max_issues_count": 12,
"max_issues_repo_head_hexsha": "1cc79c15e8e0e9c27e1470c7400cdb50aaa6bb82",
"max_issues_repo_issues_event_max_datetime": "2019-04-26T01:54:14.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-05-03T17:12:19.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Charleo85/stanfordacm",
"max_issues_repo_path": "97si_slides/combinatorial_games.tex",
"max_line_length": 236,
"max_stars_count": 1624,
"max_stars_repo_head_hexsha": "1cc79c15e8e0e9c27e1470c7400cdb50aaa6bb82",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Charleo85/stanfordacm",
"max_stars_repo_path": "97si_slides/combinatorial_games.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-29T17:26:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-08-11T03:23:37.000Z",
"num_tokens": 3240,
"size": 10116
} |
\section{Discussion}
In these experiments, we assessed the role of motor cortical structures by making targeted lesions to areas responsible for forelimb control \cite{Kawai2015,Otchy2015}. Consistent with previous studies, we did not observe any conspicuous deficits in movement execution for rats with bilateral motor cortex lesions when negotiating a stable environment. Even when exposed to a sequence of unstable obstacles, animals were able to learn an efficient strategy for crossing these more challenging environments, with or without motor cortex. These movement strategies also include a preparatory component that might reflect the state of the world an animal expected to encounter. Surprisingly, these preparatory responses also did not require the presence of motor cortex.
It was only when the environment did not conform to expectation, and demanded a rapid adjustment, that a difference between the lesion and control groups was obvious. Animals with extensive damage to the motor cortex did not deploy a change in strategy. Rather, they halted their progression for several seconds, unable to robustly respond to the new motor challenge. In an ecological setting, such hesitation could easily prove fatal. Control animals, on the other hand, were able to rapidly and flexibly reorganize their motor response to an entirely unexpected change in the environment.
Our preliminary investigations of the neurophysiological basis of these robust responses with ECoG have revealed the presence of large amplitude evoked potentials in the motor cortex arising specifically in response to an unexpected collapse of the steps during locomotion. Compared with evoked responses obtained from normal stepping under stable conditions (\SI{-100}{\micro\volt} peak at \SI{10}{\milli\second}), these potentials are both much larger (\SI{-300}{\micro\volt}) and delayed in time (peak at \SI{70}{\milli\second}). Still, they preceded any overt behaviour corrections from the animal following the perturbation, as observed in the high-speed video recordings. The onset of these evoked potentials is in the range of the long-latency stretch reflex, which has been suggested to involve a transcortical loop through the motor cortex \cite{Phillips1969,Matthews1990,Capaday1991}. However, the simultaneous complexity and rapidity of adaptive motor responses we observed in control animals is striking, as they appear to go beyond simple corrective responses to reach a predetermined goal and include a fast switch to entirely different investigatory or compensatory motor strategies adapted to the novel situation. What is the nature of these robust responses that animals without motor cortex seem unable to deploy? What do they allow an animal to achieve? Why are cortical structures necessary for their successful and rapid deployment?
| {
"alphanum_fraction": 0.8255199154,
"avg_line_length": 354.625,
"ext": "tex",
"hexsha": "d8795eece8cd43ac973e64f43ef1f2875382a41c",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "702f8fc19f21b7d96899a474bd7ace1d3ec4b354",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "glopesdev/phd-thesis",
"max_forks_repo_path": "chapters/subsectChBehaviour/discussion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "702f8fc19f21b7d96899a474bd7ace1d3ec4b354",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "glopesdev/phd-thesis",
"max_issues_repo_path": "chapters/subsectChBehaviour/discussion.tex",
"max_line_length": 1453,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "702f8fc19f21b7d96899a474bd7ace1d3ec4b354",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "glopesdev/phd-thesis",
"max_stars_repo_path": "chapters/subsectChBehaviour/discussion.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 537,
"size": 2837
} |
\section{Linear Classifier}
Feature vectors $x$, labels $y$
\begin{align*}
x \in \mathbb{R}^d\\
y \in \{-1,1\}
\end{align*}
Training set
\begin{align*}
S_n = \{(x^{(i)}, y^{(i)}), i=1,...,n\}
\end{align*}
Classifier
\begin{align*}
h: \mathbb{R}^d \rightarrow \{-1,1\}\\
\chi^{+} = \{x \in \mathbb{R}^d: h(x) =1\}\\
\chi^{-} = \{x \in \mathbb{R}^d: h(x) =-1\}
\end{align*}
Training error
\begin{align*}
\varepsilon_n(h)= \frac{1}{n} \sum_{i=1}^n \textbf{1}\{h(x^{(i)}) \neq y^{(i)} \}
\end{align*}
Test error (over disjoint set of examples)
\begin{align*}
\varepsilon(h)
\end{align*}
Set of classifiers
\begin{align*}
h \in H
\end{align*}
\subsection{Linear classifiers through origin}
Set of all points that satisfies a line through the origin.
\begin{align*}
\theta &= \begin{bmatrix}
\theta_{1} \\
\theta_{2}
\end{bmatrix}\\
X &= \begin{bmatrix}
x_{1} \\
x_{2}
\end{bmatrix}\\
\end{align*}
Decision Boundary
\begin{align*}
\{x&: \theta_1 x_1 + \theta_2 x_2 = 0\}\\
\{x&: \theta \cdot X=0\}
\end{align*}
Linear Classifier through origin
\begin{align*}
h(x,\theta)=sign(\theta \cdot X)\\
\Theta \in \mathbb{R}^d
\end{align*}
\subsection{Linear classifiers}
General linear Classifier (with Intercept)
\begin{align*}
\theta &= \begin{bmatrix}
\theta_{1} \\
\theta_{2}
\end{bmatrix}\\
X &= \begin{bmatrix}
x_{1} \\
x_{2}
\end{bmatrix}\\
\end{align*}
Decision Boundary
\begin{align*}
\{x&: \theta \cdot X + \theta_0 = 0\}
\end{align*}
Linear Classifier through origin
\begin{align*}
h(x,\Theta, \theta_0)=sign(\theta \cdot X + \theta_0)\\
\theta \in \mathbb{R}^d\\
\theta_0 \in \mathbb{R}
\end{align*}
\subsection{Linear Separation}
Traning examples $S_n = \{(x^{(i)}, y^{(i)}), i=1,...,n\}$ are linear separable if there exists a parameter vector $\hat{\theta}$ and offset parameter $\hat{\theta}_0$ such that $y^{(i)}(\hat{\theta} \cdot x^{(i)} + \hat{\theta}_0 )>0$ for all $i=1,\cdots,n$.
\begin{align*}
(\hat{\theta} \cdot x^{(i)})>0
\begin{cases}
y^{(i)}>0 \text{ and } \theta \cdot x^{(i)} >0\\
y^{(i)}<0 \text{ and } \theta \cdot x^{(i)} <0\\
\end{cases}
\end{align*}
$y^{(i)}(\theta \cdot x^{(i)})>0$ if label and classified result match. This leads to a new definition of the \textbf{Training error}:
\begin{align*}
\varepsilon_n(\theta)&= \frac{1}{n} \sum_{i=1}^n \textbf{1}\{y^{(i)}(\theta \cdot x^{(i)}) \leq 0\}\\
\varepsilon_n(\theta,\theta_0)&= \frac{1}{n} \sum_{i=1}^n \textbf{1}\{y^{(i)}(\theta \cdot x^{(i)} + \theta_0) \leq 0\}
\end{align*}
\subsection{Perceptron through Origin}
\textbf{Perceptron}$\displaystyle \left(\big \{ (x^{(i)}, y^{(i)}), i=1,...,n\big \} , T \right):$
\begin{enumerate}[\indent {}]
\item initialize $\theta=0$ (vector);
\begin{enumerate}[\indent {}]
\item for $t=1,\cdots,T$ do
\begin{enumerate}[\indent {}]
\item for $i=1,\cdots,n$ do
\begin{enumerate}[\indent {}]
\item if $y^{(i)}(\theta \cdot x^{(i)})\leq 0$ then
\item update $\theta = \theta + y^{(i)}x^{(i)}$
\end{enumerate}
\end{enumerate}
\end{enumerate}
\end{enumerate}
\subsection{Perceptron with Offset}
\textbf{Perceptron}$\displaystyle \left(\big \{ (x^{(i)}, y^{(i)}), i=1,...,n\big \} , T \right):$
\begin{enumerate}[\indent {}]
\item initialize $\theta=0$ (vector); $\theta_0=0$ (scalar)
\begin{enumerate}[\indent {}]
\item for $t=1,\cdots,T$ do
\begin{enumerate}[\indent {}]
\item for $i=1,\cdots,n$ do
\begin{enumerate}[\indent {}]
\item if $y^{(i)}(\theta \cdot x^{(i)} + \theta_0)\leq 0$ then
\item update $\theta = \theta + y^{(i)}x^{(i)}$
\item update $\theta_0=\theta_0 + y^{(i)}$
\end{enumerate}
\end{enumerate}
\end{enumerate}
\end{enumerate}
\subsection{Margin Boundary}
The Margin Boundary is the set of points $x$ which satisfy $\theta \cdot x + \theta _0= \pm 1$. So, the distance (with signed direction) from the decision boundary to the margin boundary is $\displaystyle \frac{1}{\mid \mid \theta \mid \mid }$.
$\frac{y^{(i)}(\theta \cdot x^{(i)} + \theta _0)}{\mid \mid \theta \mid \mid }=\frac{1}{\mid \mid \theta \mid \mid }.$
\textbf{Hinge Loss (agreement)}
\begin{align*}
Agreement &= z = y^{(i)}(\theta \cdot x^{(i)} + \theta _0)\\
Loss_h(z) &=
\begin{cases}
0 \text{ if } z \geq 1\\
1-z \text{ if } z < 1
\end{cases}
\end{align*}
\textbf{Regularization} means pushing out the margin boundaries by adding $max(\frac{1}{\mid \mid \theta \mid \mid })$ or $min(\frac{1}{2}\mid \mid \theta \mid \mid^2)$ to the objective function.
Alternatively, the sum of the hinge losses can be calculated by
$\sum _{i=1}^{n} \max \{ 0, 1 - y^{(i)} (\theta \cdot x^{(i)} + \theta _0)\}$
\textbf{Objective Function}
Objective function = average loss + regularization\\
Objective function is minimized, learning becomes an optimization problem. Using hinge loss and margin boundaries is called \textbf{Support Vector Machine} or \textbf{Large margin linear classification}:
\begin{align*}
J(\theta , \theta _0) = \frac{1}{n} \sum _{i=1}^{n} \text {Loss}_ h (z) + \frac{\lambda }{2} \mid \mid \theta \mid \mid ^2.
\end{align*}
Where $\lambda > 0$ is called the regularization parameter that regulates how important the margin boundaries are in comparison to the average hinge loss.
\textbf{Cost:} is an averaged loss.
\subsection{Gradient Descent}
Assume $\theta \in \mathbb{R}$ the goal is to find $\theta$ that minimizes $J(\theta , \theta _0) = \frac{1}{n} \sum _{i=1}^{n} \text {Loss}_ h (y^{(i)} (\theta \cdot x^{(i)} + \theta _0 )) + \frac{\lambda }{2} \mid \mid \theta \mid \mid ^2$ through gradient descent.\\
In other words, we will
\begin{itemize}
\item Start $\theta$ at an arbitrary location: $\theta \leftarrow \theta _{start}$
\item Update $\theta$ repeatedly with $\theta \leftarrow \theta - \eta \frac{\partial J(\theta , \theta _0)}{\partial \theta }$ until $\theta$ does not change significantly.
\end{itemize}
Where $\eta >0$ is called the stepsize or \textbf{learning parameter}.
\subsection{Stochastic Gradient Descent}
\begin{align*}
J(\theta , \theta _0) &= \frac{1}{n} \sum _{i=1}^{n} \text {Loss}_ h (z) + \frac{\lambda }{2} \mid \mid \theta \mid \mid ^2\\
&= \frac{1}{n} \sum _{i=1}^{n}\big [ \text {Loss}_ h (z) + \frac{\lambda }{2} \mid \mid \theta \mid \mid ^2 \big ]
\end{align*}
With stochastic gradient descent, we choose $i \in \big \{ 1,...,n \big \}$ at random and update $\theta$ such that\\
\begin{align*}
\theta \leftarrow \theta - \eta \nabla _{\theta } \big [\text {Loss}_ h(z) + \frac{\lambda }{2}\mid \mid \theta \mid \mid ^2 \big ]
\end{align*}
| {
"alphanum_fraction": 0.6188253012,
"avg_line_length": 33.3668341709,
"ext": "tex",
"hexsha": "4b630975ae4f9139309adff782832c319f374ffe",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "dfb1242b5984b29f0951ba9e14d96db9bf475d54",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sharov-am/MITx_capstone_2",
"max_forks_repo_path": "content/basic_concepts.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dfb1242b5984b29f0951ba9e14d96db9bf475d54",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sharov-am/MITx_capstone_2",
"max_issues_repo_path": "content/basic_concepts.tex",
"max_line_length": 269,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "dfb1242b5984b29f0951ba9e14d96db9bf475d54",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sharov-am/MITx_capstone_2",
"max_stars_repo_path": "content/basic_concepts.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2446,
"size": 6640
} |
\section{Sequential games}
| {
"alphanum_fraction": 0.7586206897,
"avg_line_length": 7.25,
"ext": "tex",
"hexsha": "392638a528165e883399d14aa2ea4be416f562b6",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/ai/sequential/01-00-Sequential.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/ai/sequential/01-00-Sequential.tex",
"max_line_length": 26,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/ai/sequential/01-00-Sequential.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7,
"size": 29
} |
\filetitle{dbclip}{Clip all tseries entries in database down to specified date range}{dbase/dbclip}
\paragraph{Syntax}\label{syntax}
\begin{verbatim}
D = dbclip(D,Range)
\end{verbatim}
\paragraph{Input arguments}\label{input-arguments}
\begin{itemize}
\item
\texttt{D} {[} struct {]} - Database or nested databases with tseries
objects.
\item
\texttt{Range} {[} numeric \textbar{} cell {]} - Range or a cell array
of ranges to which all tseries objects will be clipped; multiple
ranges can be specified, each for a different date
frequency/periodicity.
\end{itemize}
\paragraph{Output arguments}\label{output-arguments}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\texttt{D} {[} struct {]} - Database with tseries objects cut down to
\texttt{range}.
\end{itemize}
\paragraph{Description}\label{description}
This functions looks up all tseries objects within the database
\texttt{d}, including tseries objects nested in sub-databases, and cuts
off any values preceding the start date of \texttt{Range} or following
the end date of \texttt{range}. The tseries object comments, if any, are
preserved in the new database.
If a tseries entry does not match the date frequency of the input range,
a warning is thrown.
Multiple ranges can be specified in \texttt{Range} (as a cell array),
each for a different date frequency/periodicity (i.e.~one or more of the
following: monthly, bi-monthly, quarterly, half-yearly, yearly,
indeterminate). Each tseries entry will be clipped to the range that
matches its date frequency.
\paragraph{Example}\label{example}
\begin{verbatim}
d = struct();
d.x = tseries(qq(2005,1):qq(2010,4),@rand);
d.y = tseries(qq(2005,1):qq(2010,4),@rand)
d =
x: [24x1 tseries]
y: [24x1 tseries]
dbclip(d,qq(2007,1):qq(2007,4))
ans =
x: [4x1 tseries]
y: [4x1 tseries]
\end{verbatim}
| {
"alphanum_fraction": 0.7367006985,
"avg_line_length": 26.9710144928,
"ext": "tex",
"hexsha": "c25985e3981484ccc38f44ac3c02519eb46abda9",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-01-17T07:06:39.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-17T07:06:39.000Z",
"max_forks_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "OGResearch/IRIS-Toolbox-For-Octave",
"max_forks_repo_path": "-help/dbase/dbclip.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef",
"max_issues_repo_issues_event_max_datetime": "2020-09-02T10:40:25.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-03-28T08:13:20.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "OGResearch/IRIS-Toolbox-For-Octave",
"max_issues_repo_path": "-help/dbase/dbclip.tex",
"max_line_length": 103,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "OGResearch/IRIS-Toolbox-For-Octave",
"max_stars_repo_path": "-help/dbase/dbclip.tex",
"max_stars_repo_stars_event_max_datetime": "2017-12-06T13:38:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-06T13:38:38.000Z",
"num_tokens": 549,
"size": 1861
} |
\documentclass[a4paper]{article}
\usepackage[english]{babel}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage[colorinlistoftodos]{todonotes}
\title{CSE 3211 : Operating System Assignment 2} %title of the report
% author name: you and your partner
\author{Saif Mahmud\\
2015-116-815 \& SH-54
\and
Tauhid Tanjim\\
2015-716-819 \& SH-58
}
\date{January 30, 2019}
\begin{document}
\maketitle
\section{Introduction}
\label{sec:intro}
We have implemented the following system calls regarding file system in OS161:\\
\textbf{open, close, read, write, lseek and dup2} as part of the assigned task. In order to accomplish the task of implementing syscalls, we have modified the following 7 files which are listed below :
\begin{itemize}
\item kern/syscall/file.c
\item kern/include/file.h
\item kern/include/syscall.h
\item kern/arch/mips/syscall/syscall.c
\item kern/include/uio.h
\item kern/lib/uio.c
\item kern/include/proc.h
\end{itemize}
\section{Description of Implementation}
\label{sec:details}
\subsection{kern/include/syscall.h}
In this file we have included the header \textit{\textbf{``file.h"}}.
\subsection{kern/arch/mips/syscall/syscall.c}
In this C file, we have used the function calls needed for each system call related to the implemented file system. We have obtained the syscall numbers from the file \textit{\textbf{kern/include/kern/syscall.h}}. In these system calls discussed above, we have used the corresponding functions and they are SYS\_open(), SYS\_close(), SYS\_write(), SYS\_read(), SYS\_lseek() and SYS\_dup2(). We gained the arguments from the trapframe structure which is used as a parameter of the syscall functions. The result of the system calls is assigned in the ret\_val variable that is passed through the v0 and v1 variables in the trapframe. In case of SYS\_lseek() we used another type of variable in order to return the result. Here we have used an off\_t type variable named retval\_off.
\subsection{kern/include/proc.h}
In this header file, we have initialized a file descriptor table in the proc structure. We have included structure :
\textbf{\textit{file\_t *file\_descriptor\_table [MAX\_PROCESS\_OPEN\_FILES];}}\\Therefore, the file descriptor table is an array of ``file\_t” type and is of size MAX\_PROCESS\_OPEN\_FILES.
\subsection{kern/include/uio.h}
In this file, we implemented a function to initialize input-output buffers.
\subsection{kern/lib/uio.c}
In this C file, we have added the function that initializes buffers to carry blocks of data after executing file system calls. It creates a buffer, pointer to the created buffer, its offset, its size and other information that construct the uio structure before allowing the transfer of data.
\subsection{kern/include/file.h}
In this header file for the system calls of OS161 file system, we have defined a structure named \textbf{\textit{``file\_t”}}. This structure keeps all necessary pieces of information for a file. The variables of this structure are listed below :
\begin{itemize}
\item \textbf{off\_t offset} : To keep track of the offset. It is modified after read, write and seek operation.
\item \textbf{int openflags} : To store the flags of the file.
\item \textbf{int32\_t references} : To store the references count. Its purpose is same as unix file systems reference count.
\item \textbf{struct lock *file\_lock} : A lock type pointer used to provide mutually exclusive access to files.
\item \textbf{struct vnode *v\_ptr} : A vnode pointer required for vfs function calls.
\end{itemize}
In addition to this, we have declared the function prototypes we used in \textit{file.c}.
\subsection{kern/syscall/file.c}
The basic requirement of the implementation was to initialize per process file descriptor tables with a global open file table. The implementation of the per process file descriptor tables was done by including a reference
to a table in the process structure. Hence we were able to access each
process' fd table through \textit{curproc}. The table itself is simply an array of pointers to the entries in the global open file table and new slots are assigned through a linear search for empty slots.
The entries of the global open file table contained :
\begin{itemize}
\item Reference to the vnode that was acquired from the vfs
\item Lock for mutual exclusion when reading and writing. This would protect against two entries writing at once and also prevents two reads from messing with the offset file pointer
\item File pointer to keep track of the current offset
\item Flags that this file was opened with number of handles that reference this entry
\end{itemize}
In case of the implementation of the global file table, we decided to not keep the entries in any data structure but leave them in the kernel heap. We made this decision since we found that there was no need to keep the
entries in a structure which would lead to unnecessarily complicate the implementation. If we simply keep references to them in the file descriptor tables.
The implementation details of the system calls function for the file system is described below :
\begin{itemize}
\item \textbf{sys\_open} : This function essentially passes most of the work off to vfs\_open and creates the new open\_file entry in the process descriptor table and subsequently creates the lock, initializes offset to 0 etc. First thing that needed to be done to prevent security issues with the userptr to the filename is we used copyinstr to transfer the filename into kernel memory safely before passing to vfs\_open. We also check the userptr filename isn’t NULL first. We have also reused most of the code used by this function to bind STDOUT and STDERR to the 2 and 3 descriptors respectively.
\item \textbf{sys\_close} : For this function we had to consider what would happen if sys\_close was called on a descriptor that had been cloned whether by fork() or dup2(). Since we need to free all the memory allocations only when we delete the last reference, we decided to keep a count of references on the open\_file data structure, only freeing the memory when it reached 0.
\item \textbf{sys\_read} : We first do some user input checking like checking whether the file has not been opened in O\_WRONLY mode. We then acquire the open file lock and create a uio in UIO\_USERSPACE and UIO\_READ mode and pass this to vop\_read with the open file pointer to read the data directly into the userptr buffer. We then compute the amount read data by subtracting the initial file offset before reading from the new offset returned by the uio. The reading is done with the lock acquired to ensure mutual exclusion with multiple processes reading the same open file and advancing the offset file pointer.
\item \textbf{sys\_write} : Write is in fact the replica of read, for example call vop\_write instead and initialize the UIO in UIO\_WRITE mode, check that file hasn’t been opened in O\_RDONLY mode.
\item \textbf{sys\_lseek} : This system call requires more than 4 registers, since the 2nd argument is a 64 bit value, causing the arguments to be assigned to a2 and a3. This subsequently required us to fetch the last argument from the stack. Hence the implementation of the function incorporates modifying the offset field in the open\_file entry and taking concurrency into account.
\item \textbf{sys\_dup2} : This function simply (after error checking) copies the reference in oldfd to newfd, calling sys\_close on newfd if it was already populated. We also increment the reference counter, taking concurrency into account through lock.
\end{itemize}
\section{Conclusion}
According to above discussion, we have successfully implemented the basic system calls for the file system as per the assigned task in order to access and edit the files organised in the file system of OS161 with respect to our conceived design principle described above. It should be noted that we have encountered a number of system inconsistency while framing the syscalls which we did troubleshooted through debugger and fixed the issues regarding the file system calls implementation.
\end{document} | {
"alphanum_fraction": 0.7891514632,
"avg_line_length": 77.780952381,
"ext": "tex",
"hexsha": "1d38cda7c057fc9f8f65fdaa60951844d65d52c7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "91587df9a339062322aa4df5bfa792fd082154d0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Saif-M-Dhrubo/OS161-Assignments",
"max_forks_repo_path": "asst2_documentation/design.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "91587df9a339062322aa4df5bfa792fd082154d0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Saif-M-Dhrubo/OS161-Assignments",
"max_issues_repo_path": "asst2_documentation/design.tex",
"max_line_length": 780,
"max_stars_count": 13,
"max_stars_repo_head_hexsha": "91587df9a339062322aa4df5bfa792fd082154d0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Saif-M-Dhrubo/OS161-Assignments",
"max_stars_repo_path": "asst2_documentation/design.tex",
"max_stars_repo_stars_event_max_datetime": "2021-06-20T03:11:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-05-27T18:55:10.000Z",
"num_tokens": 1906,
"size": 8167
} |
\chapter[My first chapter]{My first chapter}
\label{chap:chap1}
\chapterabstract
{
Read this chapter.
}
\pagestyle{plain}
\section{Introduction}
This is an interesting chapter~\citep{bradford1976rapid}.
\section{Conclusion}
That chapter was interesting.
| {
"alphanum_fraction": 0.7292418773,
"avg_line_length": 15.3888888889,
"ext": "tex",
"hexsha": "5a3f01cb89afddff7322c447a56ac786c8f4c0a5",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-08-29T09:57:52.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-08-29T09:57:52.000Z",
"max_forks_repo_head_hexsha": "9cfc91d8648a6c5ab67dcecabb0eb420aad2b642",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rossant/phd-thesis-template",
"max_forks_repo_path": "chap1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9cfc91d8648a6c5ab67dcecabb0eb420aad2b642",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rossant/phd-thesis-template",
"max_issues_repo_path": "chap1.tex",
"max_line_length": 58,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "9cfc91d8648a6c5ab67dcecabb0eb420aad2b642",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rossant/phd-thesis-template",
"max_stars_repo_path": "chap1.tex",
"max_stars_repo_stars_event_max_datetime": "2020-10-14T21:11:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-12-04T18:18:34.000Z",
"num_tokens": 68,
"size": 277
} |
\subsubsection{ComparisonStatistics}
\label{ComparisonStatistics}
The \textbf{ComparisonStatistics} post-processor computes statistics
for comparing two different dataObjects. This is an experimental
post-processor, and it will definitely change as it is further
developed.
There are four nodes that are used in the post-processor.
\begin{itemize}
\item \xmlNode{kind}: specifies information to use for comparing the
data that is provided. This takes either uniformBins which makes
the bin width uniform or equalProbability which makes the number
of counts in each bin equal. It can take the following attributes:
\begin{itemize}
\item \xmlAttr{numBins} which takes a number that directly
specifies the number of bins
\item \xmlAttr{binMethod} which takes a string that specifies the
method used to calculate the number of bins. This can be either
square-root or sturges.
\end{itemize}
\item \xmlNode{compare}: specifies the data to use for comparison.
This can either be a normal distribution or a dataObjects:
\begin{itemize}
\item \xmlNode{data}: This will specify the data that is used. The
different parts are separated by $|$'s.
\item \xmlNode{reference}: This specifies a reference distribution
to be used. It takes distribution to use that is defined in the
distributions block. A name parameter is used to tell which
distribution is used.
\end{itemize}
\item \xmlNode{fz}: If the text is true, then extra comparison
statistics for using the $f_z$ function are generated. These take
extra time, so are not on by default.
\item \xmlNode{interpolation}: This switches the interpolation used
for the cdf and the pdf functions between the default of quadratic
or linear.
\end{itemize}
The \textbf{ComparisonStatistics} post-processor generates a variety
of data. First for each data provided, it calculates bin boundaries,
and counts the numbers of data points in each bin. From the numbers
in each bin, it creates a cdf function numerically, and from the cdf
takes the derivative to generate a pdf. It also calculates statistics
of the data such as mean and standard deviation. The post-processor
can generate a CSV file only.
The post-processor uses the generated pdf and cdf function to
calculate various statistics. The first is the cdf area difference which is:
\begin{equation}
cdf\_area\_difference = \int_{-\infty}^{\infty}{\|CDF_a(x)-CDF_b(x)\|dx}
\end{equation}
This given an idea about how far apart the two pieces of data are, and
it will have units of $x$.
The common area between the two pdfs is calculated. If there is
perfect overlap, this will be 1.0, if there is no overlap, this will
be 0.0. The formula used is:
\begin{equation}
pdf\_common\_area = \int_{-\infty}^{\infty}{\min(PDF_a(x),PDF_b(x))}dx
\end{equation}
The difference pdf between the two pdfs is calculated. This is calculated as:
\begin{equation}
f_Z(z) = \int_{-\infty}^{\infty}f_X(x)f_Y(x-z)dx
\end{equation}
This produces a pdf that contains information about the difference
between the two pdfs. The mean can be calculated as (and will be
calculated only if fz is true):
\begin{equation}
\bar{z} = \int_{-\infty}^{\infty}{z f_Z(z)dz}
\end{equation}
The mean can be used to get an signed difference between the pdfs,
which shows how their means compare.
The variance of the difference pdf can be calculated as (and will be
calculated only if fz is true):
\begin{equation}
var = \int_{-\infty}^{\infty}{(z-\bar{z})^2 f_Z(z)dz}
\end{equation}
The sum of the difference function is calculated if fz is true, and is:
\begin{equation}
sum = \int_{-\infty}^{\infty}{f_z(z)dz}
\end{equation}
This should be 1.0, and if it is different that
points to approximations in the calculation.
\textbf{Example:}
\begin{lstlisting}[style=XML]
<Simulation>
...
<Models>
...
<PostProcessor name="stat_stuff" subType="ComparisonStatistics">
<kind binMethod='sturges'>uniformBins</kind>
<compare>
<data>OriData|Output|tsin_TEMPERATURE</data>
<reference name='normal_410_2' />
</compare>
<compare>
<data>OriData|Output|tsin_TEMPERATURE</data>
<data>OriData|Output|tsout_TEMPERATURE</data>
</compare>
</PostProcessor>
<PostProcessor name="stat_stuff2" subType="ComparisonStatistics">
<kind numBins="6">equalProbability</kind>
<compare>
<data>OriData|Output|tsin_TEMPERATURE</data>
</compare>
<Distribution class='Distributions' type='Normal'>normal_410_2</Distribution>
</PostProcessor>
...
</Models>
...
<Distributions>
<Normal name='normal_410_2'>
<mean>410.0</mean>
<sigma>2.0</sigma>
</Normal>
</Distributions>
</Simulation>
\end{lstlisting}
| {
"alphanum_fraction": 0.7276515941,
"avg_line_length": 38.392,
"ext": "tex",
"hexsha": "7de497c86894dfaa3ab6825d7e99f8acfaf7ff78",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f36cc108f7500b0e2717df4832b69b801b43960d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "dgarrett622/raven",
"max_forks_repo_path": "doc/user_manual/PostProcessors/ComparisonStatistics.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f36cc108f7500b0e2717df4832b69b801b43960d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "dgarrett622/raven",
"max_issues_repo_path": "doc/user_manual/PostProcessors/ComparisonStatistics.tex",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f36cc108f7500b0e2717df4832b69b801b43960d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "dgarrett622/raven",
"max_stars_repo_path": "doc/user_manual/PostProcessors/ComparisonStatistics.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1294,
"size": 4799
} |
%========================================================
\section{Dividends History}\label{sec:dividends}
%========================================================
Historical dividend payments data in the {\tt dividends.txt} file is given in three columns; Equity Name, Ex-Dividend Date and
Dividend Amount in the Currency of the Equity Curve. Columns are separated by semicolons ";" or blanks.
Dividends are used in some trades with path dependent features.
\begin{itemize}
\item Ex-Dividend Date: The day the stock starts trading without the value of the dividend payment
Allowable values: See \lstinline!Date! in Table \ref{tab:allow_stand_data}.
\item Equity Name: The name of the dividend paying equity.
Allowable values are the names of the Equity Curves defined in {\tt curveconfig.xml}.
\item Dividend Amount: The amount of the dividend payment
date.
Allowable values: Any real number (not expressed as a percentage).
\end{itemize}
An excerpt of a fixings file is shown in Listing \ref{lst:dividends_file}.
\begin{listing}[H]
%\hrule\medskip
\begin{minted}[fontsize=\footnotesize]{xml}
20130411 DAI:GR 2.2
20140410 DAI:GR 2.25
20150402 DAI:GR 2.45
20160407 DAI:GR 3.25
20170330 DAI:GR 3.25
20180406 DAI:GR 3.65
20190523 DAI:GR 3.25
20120815 HSBA:LN 5.5538
20121024 HSBA:LN 5.604
20130320 HSBA:LN 11.585
20130522 HSBA:LN 6.58
20130821 HSBA:LN 6.2033
20131023 HSBA:LN 6.102
20140312 HSBA:LN 11.2919
20140521 HSBA:LN 5.8768
20140820 HSBA:LN 6.1622
20141023 HSBA:LN 6.3633
20150305 HSBA:LN 13.4
20150521 HSBA:LN 6.3709
20150813 HSBA:LN 6.4436
20151022 HSBA:LN 6.6015
20160303 HSBA:LN 14.7908
20160519 HSBA:LN 7.5421
20160811 HSBA:LN 7.6633
20161020 HSBA:LN 8.0417
20170223 HSBA:LN 16.6757
20170518 HSBA:LN 7.8636
20170803 HSBA:LN 7.577
20171012 HSBA:LN 7.6405
20180222 HSBA:LN 14.762
20180517 HSBA:LN 7.5502
20180816 HSBA:LN 7.632
20181011 HSBA:LN 7.78
20190221 HSBA:LN 15.9271
20190516 HSBA:LN 7.8368
\end{minted}
\caption{Excerpt of a dividends file}
\label{lst:dividends_file}
\end{listing} | {
"alphanum_fraction": 0.734754586,
"avg_line_length": 30.1044776119,
"ext": "tex",
"hexsha": "3c961b8fe677b3a7fdd47621c51c003f550ca1bc",
"lang": "TeX",
"max_forks_count": 180,
"max_forks_repo_forks_event_max_datetime": "2022-03-28T10:43:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-10-08T14:23:50.000Z",
"max_forks_repo_head_hexsha": "c46ff278a2c5f4162db91a7ab500a0bb8cef7657",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "mrslezak/Engine",
"max_forks_repo_path": "Docs/UserGuide/dividenddata.tex",
"max_issues_count": 59,
"max_issues_repo_head_hexsha": "c46ff278a2c5f4162db91a7ab500a0bb8cef7657",
"max_issues_repo_issues_event_max_datetime": "2022-01-03T16:39:57.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-10-31T04:20:24.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "mrslezak/Engine",
"max_issues_repo_path": "Docs/UserGuide/dividenddata.tex",
"max_line_length": 126,
"max_stars_count": 335,
"max_stars_repo_head_hexsha": "c46ff278a2c5f4162db91a7ab500a0bb8cef7657",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "mrslezak/Engine",
"max_stars_repo_path": "Docs/UserGuide/dividenddata.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-02T07:12:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-10-07T16:31:10.000Z",
"num_tokens": 731,
"size": 2017
} |
\subsection{Open regions}
| {
"alphanum_fraction": 0.75,
"avg_line_length": 7,
"ext": "tex",
"hexsha": "da6d564ca39214b45673663687cc83b417a7e0f3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/analysis/complexAnalysis/01-04-openRegions.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/analysis/complexAnalysis/01-04-openRegions.tex",
"max_line_length": 25,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/analysis/complexAnalysis/01-04-openRegions.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7,
"size": 28
} |
\chapter{Introduction}
The ``Python library'' contains several different kinds of components.
It contains data types that would normally be considered part of the
``core'' of a language, such as numbers and lists. For these types,
the Python language core defines the form of literals and places some
constraints on their semantics, but does not fully define the
semantics. (On the other hand, the language core does define
syntactic properties like the spelling and priorities of operators.)
The library also contains built-in functions and exceptions ---
objects that can be used by all Python code without the need of an
\code{import} statement. Some of these are defined by the core
language, but many are not essential for the core semantics and are
only described here.
The bulk of the library, however, consists of a collection of modules.
There are many ways to dissect this collection. Some modules are
written in C and built in to the Python interpreter; others are
written in Python and imported in source form. Some modules provide
interfaces that are highly specific to Python, like printing a stack
trace; some provide interfaces that are specific to particular
operating systems, like socket I/O; others provide interfaces that are
specific to a particular application domain, like the World-Wide Web.
Some modules are avaiable in all versions and ports of Python; others
are only available when the underlying system supports or requires
them; yet others are available only when a particular configuration
option was chosen at the time when Python was compiled and installed.
This manual is organized ``from the inside out'': it first describes
the built-in data types, then the built-in functions and exceptions,
and finally the modules, grouped in chapters of related modules. The
ordering of the chapters as well as the ordering of the modules within
each chapter is roughly from most relevant to least important.
This means that if you start reading this manual from the start, and
skip to the next chapter when you get bored, you will get a reasonable
overview of the available modules and application areas that are
supported by the Python library. Of course, you don't \emph{have} to
read it like a novel --- you can also browse the table of contents (in
front of the manual), or look for a specific function, module or term
in the index (in the back). And finally, if you enjoy learning about
random subjects, you choose a random page number (see module
\code{rand}) and read a section or two.
Let the show begin!
| {
"alphanum_fraction": 0.7963180572,
"avg_line_length": 53.1875,
"ext": "tex",
"hexsha": "70d7478c8d9cefcb6a6d36375676759ab760e08e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2a80562c5a163490f444181cb75ca1b3089759ec",
"max_forks_repo_licenses": [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
],
"max_forks_repo_name": "AtjonTV/Python-1.4",
"max_forks_repo_path": "Doc/libintro.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2a80562c5a163490f444181cb75ca1b3089759ec",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
],
"max_issues_repo_name": "AtjonTV/Python-1.4",
"max_issues_repo_path": "Doc/libintro.tex",
"max_line_length": 70,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2a80562c5a163490f444181cb75ca1b3089759ec",
"max_stars_repo_licenses": [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
],
"max_stars_repo_name": "AtjonTV/Python-1.4",
"max_stars_repo_path": "Doc/libintro.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 536,
"size": 2553
} |
\documentclass[12pt]{article}
\author{David Alves}
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{dirtytalk}
\usepackage[a4paper, total={6.5in, 8.5in}]{geometry}
\usepackage{forest}
\usepackage{skak}
\usepackage{tikz}
\usepackage{titling}
\usepackage{wrapfig}
\def\multichoose#1#2{\ensuremath{\left(\kern-.3em\left(\genfrac{}{}{0pt}{}{#1}{#2}\right)\kern-.3em\right)}}
\title{Math 142 Problem Set 5}
\author{David Alves}
\date{2016-09-27}
\begin{document}
\pagenumbering{gobble}
\begin{center}
\Large \thetitle \\
\large \theauthor \\
\thedate
\end{center}
\subsection*{Sources}
\begin{itemize}
\item http://tex.stackexchange.com and https://www.sharelatex.com for help with \LaTeX
\end{itemize}
\section{Edges in a Complete Graph }
\subsection*{Problem Statement}
Consider a complete graph on 6 vertices. This means that every pair of vertices has exactly one edge between them. How many edges are there in the graph? What if the graph has $n$ vertices?
\subsection*{Solution}
Consider a complete undirected graph $G$ with $n$ vertices. Each edge connects two distinct vertices from the vertex set, and since it is an undirected graph the order of those vertices does not matter. Therefore there is a bijection between the set of edges in $G$ and the set of subsets of length two on a in the vertex set. Therefore there are $\binom{n}{2}$ edges on a complete graph with $n$ vertices. For $n=6$, there are 15 such edges.
\section{Triangles in a Grid}
\subsection*{Problem Statement}
How many triangles can be made with vertices from a $4\times4$ square grid of points? A $2\times2$ square grid would allow 4 triangles. Triangles must use three distinct points and the points cannot be colinear.
\subsection*{Solution}
516 valid triangles can be formed from a grid of $4 \times 4$ points.
\begin{proof}
In a $4 \times 4$ grid there are 16 points. A valid triangle is formed by three distinct noncolinear points. There are $\binom{16}{3} = 560$ ways to choose the three distinct points for a triangle. We know that 560 is equal to the set of valid triangles plus the set of triangles which contain three colinear points by the sum principle. Therefore we can subtract the number of colinear triangles from 560 to determine the number of valid triangles.
The number of colinear triangles is equal to the number of colinear triangles on rows plus the number of colinear triangles on columns plus the number of colinear triangles on NW-SE diagonals plus the number of colinear triangles on NE-SW diagonals.
Each of the four rows contains $\binom{4}{3} = 4$ ways of choosing three colinear points, giving a total of 16 colinear triangles in the rows. By symmetry, there are also 16 colinear triangles in columns. The NE-SW long diagonals also has $\binom{4}{3} = 4$ ways of choosing three colinear points. There are two length-3 NE-SW diagonals, shown here:
\begin{center}
\begin{tikzpicture}
\foreach \x in {0,1,2,3}
\foreach \y in {0,1,2,3}
{
\fill (\x,\y) circle (0.1cm);
}
\draw[rounded corners=8pt,rotate around={45:(0,1)}] (-.3,.7) rectangle (3.242,1.3);
\draw[rounded corners=8pt,rotate around={45:(1,0)}] (.7,-.3) rectangle (4.242,.3);
\end{tikzpicture}
\end{center}
Each of the length-3 diagonals has $\binom{3}{3} = 1$ way of choosing points for a colinear triangle. This gives $4+1+1 = 6$ colinear triangles on NE-SW diagonals, and therefore 6 on the NW-SE diagonal by symmetry. Thus we have 560 - 16 - 16 - 6 - 6 = 516 noncolinear triangles on a $4 \times 4$ grid of points.
\end{proof}
\section{Stirling Numbers of the Second Kind}
\subsection*{Problem Statement}
Prove that
\[
S(n, m) = \sum_{i=m}^{n}m^{n-i}S(i-1, m-1)
\]
\subsection*{Solution 1}
\begin{proof}
Consider $n$ distinguishable balls being placed into $k$ indistinguishable bins such that each bin contains at least one ball. Without loss of generality, number the balls $b_1, b_2, \ldots, b_n$. Let $b_i$ be the first ball such that $b_1, b_2, \ldots, b_i$ puts at least one ball into each bin. $k \leq i \leq n$ because we need at least $k$ balls to place one in each of $k$ bins. Thus we know that $b_1, b_2, \ldots, b_{i-1}$ require $k-1$ bins. $b_i$ can only be placed into one bin (the empty one). All balls $b_{i+1}, b_{i+2}, \ldots, b_n$ can be placed into any bin since all bins already contain at least one ball.
The number of ways to place $n$ distinguishable balls into $k$ indistinguishable bins is defined as $S(n,k)$. Thus we can sum over all possible choices of $i$ from $k$ to $n$. For each value of $i$, there are $S(i-1, k-1)$ ways to place the balls before $b_i$ into $k-1$ bins, and $k^{n-i}$ ways to place the balls that come after $b_i$ since they can go into any bin. Thus $S(n,k) = \sum_{i=k}^{n}k^{n-1}S(i-1, k-1)$.
\end{proof}
\subsection*{Solution 2}
\begin{proof}
The Stirling number of the second kind $S(n,k)$ is defined as the number of ways to divide $n$ items into $k$ groups. Without loss of generality, consider a single item. If that item is in a group by itself, then there are $S(n-1, k-1)$ to partition the remaining items. If that item is not in a group by itself, there are $S(n-1, k)$ ways to partition the remaining items, and the item can go into any of the existing $k$ groups. This leads to the following recurrence relation:
\[
S(n,k) = S(n-1, k-1) + kS(n-1,k)
\]
This recurrence relation can be applied recursively to the $kS(n-1,k)$ term, giving
\[
S(n,k) = S(n-1, k-1) + k\Big( S(n-2, k-1) + k\big( S(n-3, k-1) + k(\ldots ) \big) \Big)
\]
Multiplying through gives
\begin{multline*}
S(n,k) = S(n-1, k-1) + kS(n-2, k-1) + k^2S(n-3, k-1) + \ldots + k^{n-k}S(k-1,k-1)
\end{multline*}
which can be written as:
\[
\sum_{i=k}^{n}k^{n-1}S(i-1, k-1)
\]
\end{proof}
\section{Squares and Cubes}
\subsection*{Problem Statement}
Show that for all positive integers $n$,
\[
\big(1 + 2 + \ldots + n\big)^2 = 1^3 + 2^3 + \ldots + n^3
\]
\subsection*{Solution}
\begin{proof}
We prove this by induction on the statement $p(k) = \big(1 + 2 + \ldots + k\big)^2 = 1^3 + 2^3 + \ldots + k^3$. For $k = 1$, $p(1)$ gives $1^2 = 1^3$, which is true. For $k > 1$, we show that if $p(k-1)$ is true, then $p(k)$ is true. First note that the sum of integers $1 + 2 + \ldots + x = \frac{x(x+1)}{2} $. Thus $p(k-1)$ can be written as
\begin{equation}\label{eq:squares1}
\left(\frac{(k-1)k}{2}\right)^2 = 1^3 + 2^3 + \ldots + (k-1)^3
\end{equation}
and $p(k)$ can be written as
\begin{equation}\label{eq:squares2}
\left(\frac{k(k+1)}{2}\right)^2 = 1^3 + 2^3 + \ldots + k^3
\end{equation}
Subtracting equation \ref{eq:squares1} from equation \ref{eq:squares2} gives
\begin{equation}\label{eq:squares3}
\frac{k^2(k+1)^2 - k^2(k-1)^2}{4} = k^3
\end{equation}
Which can be further simplified as follows:
\begin{equation}
\frac{(k^2+2k+1) - (k^2-2k+1)}{4} = k
\end{equation}
or $\frac{4k}{4} = k$. Thus we have shown that if $p(k-1)$ is true, then $p(k)$ is true, which completes the inductive proof.
\end{proof}
\section{Multisets}
\subsection*{Problem Statement}
Define a \emph{multiset} to be a set that allows multiple copies of any element, knows how many copies of each element are present, but does not know the order. (Example: $\{2,3,2\} = \{2,2,3\}$, but $\{2,2,3\} \neq \{2,3\}$). What is the number of multisets of size 3 using elements from $[5]$? What about in general (size $m$ using elements from $[n]$?)
\subsection*{Solution}
There are $\binom{7}{5} = 21$ multisets on $[5]$ with 3 elements. More generally, there are $\binom{m+n-1}{n}$ multisets on $[n]$ with $m$ elements.
\begin{proof}
There is a bijection between multisets on $[n]$ with $m$ elements and lists of length $m+n-1$ where all elements are either 0 or 1 and exactly $n$ elements are 1. A multiset can be represented as a list of $m$ \say{stars} (0s) and $n-1$ \say{bars} (1s) such that the number of stars before the first bar is the the number of 1s in the multiset, the number of stars before the second bar is the number of 2s in the multiset, etc. For example, the multiset $\{1,3,3\}$ on $[5]$ would be represented as $0,1,1,0,0,1,1$. Thus there are $\binom{m+n-1}{n}$ multisets on $[n]$ with $m$ elements.
\end{proof}
\section{Time Spent}
I spent about six hours on this problem set. I got stuck for a while on how to intuitively understand the Stirling numbers recurrence. Overall this wasn't a very difficult problem set, but it was useful practice.
\end{document}
| {
"alphanum_fraction": 0.6913407821,
"avg_line_length": 51.7590361446,
"ext": "tex",
"hexsha": "a416125587744ed544f4dd0611a1a1fcc17837fd",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "059a05b548401df59099a6ba93109f736e0b9ed7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dalves/combinatorics",
"max_forks_repo_path": "math142_ps5.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "059a05b548401df59099a6ba93109f736e0b9ed7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dalves/combinatorics",
"max_issues_repo_path": "math142_ps5.tex",
"max_line_length": 624,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "059a05b548401df59099a6ba93109f736e0b9ed7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dalves/combinatorics",
"max_stars_repo_path": "math142_ps5.tex",
"max_stars_repo_stars_event_max_datetime": "2016-10-20T14:26:36.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-10-20T14:26:36.000Z",
"num_tokens": 2820,
"size": 8592
} |
% Chapter X
\chapter{Introduction} % Chapter title
\label{ch:introduction} % For referencing the chapter elsewhere, use \autoref{ch:name}
%----------------------------------------------------------------------------------------
This report details the design and implementation of \emph{Handy}, a Haskell-based simulator of the ARM4T Instruction Set Architecture as seen in the ARM7TDMI microprocessor. \emph{Handy} implements a great majority of the instructions present in the ARM4T ISA in a clean functional style. \emph{Handy} consists of a small virtual machine, complete with registers and a true Von Neumann memory architecture, including an instruction encoder and decoder capable of assembling or disassembling ARM machine code.
While first and foremost a simulator that implements the operational semantics of the ARM4T Instruction Set Architecture, \emph{Handy} also implements several features inspired by the physical ARM7TDMI microprocessor. These are:
\begin{itemize}
\item Instruction pipelining into three stages:
\begin{itemize}
\item Fetch, where an instruction is retrieved from memory.
\item Decode, where a previously fetched instruction is prepared for execution.
\item Execute, where an instruction is executed and processor state updated based on its result.
\end{itemize}
\item Stalling of execution, where some instructions require the processor to halt for a number of cycles while they complete.
\end{itemize}
The inclusion of these features allows for a richer and more accurate simulation, allowing \emph{Handy} to emulate an ARM7TDMI for simple purposes. \emph{Handy} permits a programmer to execute simple ARM assembly programs that do not require use of interrupts or physical hardware in a light weight environment, removing the need for a physical development board or full-featured emulator, both typically costly endeavours both in terms of money and effort.
%------------------------------------------------
\subsection{Motivation}
The original motivation for development of \emph{Handy} is to serve as a platform for use in teaching Compiler Design to students in Trinity College Dublin. Students of Trinity College Dublin study the ARM4T Instruction Set through use of the ARM7TDMI processor in their first and second years of university. In their third year they undertake a mandatory class in Compiler Design in which they implement a simple programming language on a virtual machine which executes its own bytecode and utilises an assembly language contrived specifically for that purpose. While this solution is adequate, the potential to use the ARM assembly language as a target language is very appealing, as it leverages two years worth of study already undertaken by students.
The main constraint to using the ARM assembly language is a lack of a platform to execute it. While Trinity College has a significant number of ARM7TDMI development boards they are in high demand, and requiring their use by students studying Compiler Design might be considered excessive. ARM provide their own development environment $\mu$Vision which includes a full emulator, but this is a very substantial program that students must apply for an educational license to download and use. Additionally both these options face the significant problem of being very challenging to integrate seamlessly into a work flow where they are not the primary focus.
The solution, given the unsuitability of the two major options already available, was to forge a new path. By implementing a virtual machine that executes only the ARM assembly language without simulating the full range of physical components as a single purpose binary it could easily be included in a tool chain provided to students.
%------------------------------------------------
\subsection{Literature Review}
The use of Haskell to implement an ARM4T simulator is not without precedent. A package in the Haskell ``Hackage'' repository entitled HARM written for use in a University of Connecticut Computer Science class, ``CSE240 Intermediate Computer Systems'' in 2001 seeks to accomplish a similar goal. The possible use or extension of this project was investaged but it was found not to be desirable. The style of its implementation is unidiomatic to the point that many of the benefits of using Haskell for this purpose are lost. It was quickly dismissed.
Excluding HARM we were unable to find any other extant projects in this vein, and so turned to implementation from scratch. The primary source of information throughout this endeavour was the ARM Architecture Reference Manual\citep{armarm:2005}. When this proved too dense or required further explanation, we made reference to ARM Assembly Language: Fundamental and Techniques\citep{hohl:2009} and ARM System-on-Chip Architecture\citep{furber2000arm}. Between these three texts more than enough information for a full implementation could be found, and in particular the ARM Architecture Reference Manual\citep{armarm:2005} will be referenced many times throughout this report.
| {
"alphanum_fraction": 0.7825147348,
"avg_line_length": 121.1904761905,
"ext": "tex",
"hexsha": "367974973a6868c05aca9963f72f0c57874eb616",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2015-10-09T10:53:36.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-10-09T10:53:36.000Z",
"max_forks_repo_head_hexsha": "814648634f43db0053fc20491cc4e2f1a6ea8bb1",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "minuteman3/Handy",
"max_forks_repo_path": "report/Chapters/Introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "814648634f43db0053fc20491cc4e2f1a6ea8bb1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "minuteman3/Handy",
"max_issues_repo_path": "report/Chapters/Introduction.tex",
"max_line_length": 755,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "814648634f43db0053fc20491cc4e2f1a6ea8bb1",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "minuteman3/Handy",
"max_stars_repo_path": "report/Chapters/Introduction.tex",
"max_stars_repo_stars_event_max_datetime": "2021-02-02T02:53:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-06-22T13:01:25.000Z",
"num_tokens": 983,
"size": 5090
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% "ModernCV" CV and Cover Letter
% LaTeX Template
% Version 1.1 (9/12/12)
%
% This template has been downloaded from:
% http://www.LaTeXTemplates.com
%
% Original author:
% Xavier Danaux ([email protected])
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
% Important note:
% This template requires the moderncv.cls and .sty files to be in the same
% directory as this .tex file. These files provide the resume style and themes
% used for structuring the document.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%----------------------------------------------------------------------------------------
% PACKAGES AND OTHER DOCUMENT CONFIGURATIONS
%----------------------------------------------------------------------------------------
\documentclass[11pt,letterpaper,sans]{moderncv} % Font sizes: 10, 11, or 12; paper sizes: a4paper, letterpaper, a5paper, legalpaper, executivepaper or landscape; font families: sans or roman
\moderncvstyle{banking} % CV theme - options include: 'casual' (default), 'classic', 'oldstyle' and 'banking'
\moderncvcolor{blue} % CV color - options include: 'blue' (default), 'orange', 'green', 'red', 'purple', 'grey' and 'black'
%\usepackage{lipsum} % Used for inserting dummy 'Lorem ipsum' text into the template
\usepackage[scale=0.80]{geometry} % Reduce document margins
%\setlength{\hintscolumnwidth}{3cm} % Uncomment to change the width of the dates column
%\setlength{\makecvtitlenamewidth}{10cm} % For the 'classic' style, uncomment to adjust
%the width of the space allocated to your name
%----------------------------------------------------------------------------------------
% NAME AND CONTACT INFORMATION SECTION
%----------------------------------------------------------------------------------------
\firstname{Philip M.} % Your first name
\familyname{Johnson} % Your last name
% All information in this block is optional, comment out any lines you don't need
\title{Curriculum Vitae}
\address{Information and Computer Sciences, University of Hawaii}{Honolulu, HI 96822}
\phone{808.956.3489}
\email{[email protected]}
\homepage{philipmjohnson.org}{philipmjohnson.org}
%\extrainfo{additional information}
%\photo[70pt][0.4pt]{pictures/picture.jpg} % The first bracket is the picture height, the second is the thickness of the frame around the picture (0pt for no frame)
%\quote{"A witty and playful quotation" - John Smith}
\extrainfo{\homepagesymbol \httplink[github.com/philipmjohnson]{github.com/philipmjohnson}}
%----------------------------------------------------------------------------------------
\begin{document}
\makecvtitle % Print the CV title
%----------------------------------------------------------------------------------------
% EDUCATION SECTION
%----------------------------------------------------------------------------------------
\section{Education}
\cventry{1990}{Ph.D., Computer Science}{University of Massachusetts}{Amherst, MA}{}{}
\cventry{1985}{M.S., Computer Science}{University of Massachusetts}{Amherst, MA}{}{}
\cventry{1980}{B.S., Computer Science}{University of Michigan}{Ann Arbor, MI}{}{}
\cventry{1980}{B.S., Biology}{University of Michigan}{Ann Arbor, MI}{}{}
%----------------------------------------------------------------------------------------
% WORK EXPERIENCE SECTION
%----------------------------------------------------------------------------------------
\section{Academic Experience}
\cventry{1990--Present}{Professor}{University of Hawaii}{Honolulu, HI}{}{}
\begin{itemize}
\item Professor \hfill {\em 2001--Present}
\item Associate Chair \hfill {\em 2010--2016}
\item Director, \homepagesymbol \href{http://csdl.ics.hawaii.edu}{Collaborative Software
Development Laboratory} \hfill {\em 1990--Present}
\item Associate Professor \hfill {\em 1995--2001}
\item Assistant Professor \hfill {\em 1990-1995}
\end{itemize}
%\medskip
\cventry{2006}{Visiting Professor}{School of Engineering, Blekinge Institute of Technology}{Karlskrona, Sweden}{}{}
%\medskip
\cventry{1997}{Senior Research Fellow}{Distributed Systems Technology Centre, University
of Queensland}{Brisbane, Australia}{}{}
%\medskip
\cventry{1984--1986, 1987--1990}{Research Assistant}{Department of Computer Science,
University of Massachusetts}{Amherst, MA}{}{}
%\medskip
\cventry{1981--1982}{Lecturer}{Department of Computer Science,
University of Michigan}{Ann Arbor, MI}{}{}
%\newpage
%------------------------------------------------
\section{Industry Experience}
\cventry{2013--2019}{Co-Founder}{OpenPowerQuality.com}{Honolulu, HI}{}{}
\cventry{2006--2009}{Member, Technical Advisory Board}{Sixth Sense Analytics}{Raleigh, NC}{}{}
\cventry{2006--2009}{Member, Board of Directors}{Hawaii Strategic Development Corporation}{Honolulu, HI}{}{}
\cventry{2003--2005}{Member, Board of Directors}{Tiki Technologies, Inc.}{Honolulu, HI}{}{}
\cventry{2002--2005}{Member, Board of Directors}{LavaNet, Inc.}{Honolulu, HI}{}{}
\cventry{2000--Present}{Member, Professional Advisory Board}{BreastCancer.org}{Philadelphia, PA}{}{}
\cventry{2000--2004}{Member, Board of Directors}{High Technology Development Corporation}{Honolulu, HI}{}{}
\cventry{2000}{Co-Founder}{hotU, Inc.}{Honolulu, HI}{}{}
\cventry{2000--2002}{Member, Professional Advisory Board}{Referentia, Inc.}{Honolulu, HI}{}{}
\cventry{1986--1987}{Lisp Hacker}{Department of Computer Science, University of Massachusetts}{Amherst, MA}{}{}
\cventry{1982--1983}{Systems Programmer}{Software Services Corporation}{Ann Arbor, MI}{}{}
\cventry{1981--1983}{Systems Analyst}{Veterans Hospital}{Ann Arbor, MI}{}{}
\cventry{1978}{Programmer}{Great Lakes Software Systems}{Ann Arbor, MI}{}{}
%----------------------------------------------------------------------------------------
% AWARDS SECTION
%----------------------------------------------------------------------------------------
\section{Publications}
\subsection{Journal articles}
\cvpubitemlink{Is GPA enough? A platform for promoting computer science undergraduates' pursuit of career related extracurricular activities}
{S. Paek, P. Leong, P. Johnson, and C. Moore}
{International Journal of Technology in Education and Science}
{Volume 5, Number 1, 2021}
{http://csdl.ics.hawaii.edu/techreports/2020/20-09/20-09.pdf}
\cvpubitemlink{Beyond course work: expanding what's valued in computer science degree programs}
{S. Paek, P. Leong, P. Johnson, and C. Moore}
{Journal of Applied Research in Higher Education}
{Volume 12, 2020}
{http://csdl.ics.hawaii.edu/techreports/2020/20-08/20-08.pdf}
\cvpubitemlink{Design, Implementation, and Evaluation of Open Power Quality}
{A. Christe, S. Negrashov, and P. Johnson}
{Energies}
{Volume 13, Issue 15, August, 2020}
{http://csdl.ics.hawaii.edu/techreports/2020/20-07/20-07.pdf}
\cvpubitemlink{Design and evaluation of an ``athletic" approach to software engineering education}
{P. Johnson}
{ACM Transactions on Computing Education}
{Volume 19, Issue 4, August, 2019}
{http://csdl.ics.hawaii.edu/techreports/2019/19-03/19-03.pdf}
\cvpubitemlink{Is an athletic approach the future of software engineering?}
{E.Hill, P. Johnson, D.Port}
{IEEE Software}
{January 2016}
{http://csdl.ics.hawaii.edu/techreports/2015/15-03/15-03.pdf}
\cvpubitemlink{Three principals for the design of energy feedback visualizations}
{R.Brewer, Y. Xu, G. Lee, M. Katchuck, C. Moore, P. Johnson}
{International Journal on Advances in Intelligent Systems}
{Volume 6, No. 3, December 2013}
{http://csdl.ics.hawaii.edu/techreports/2013/13-05/13-05.pdf}
\cvpubitemlink{Searching under the streetlight for useful software analytics}
{P. Johnson}
{IEEE Software}
{Volume 30, No. 4, July 2013}
{http://csdl.ics.hawaii.edu/techreports/2012/12-11/12-11.pdf}
\cvpubitemlink{Operational definition and automated inference of test-driven development with Zorro}
{H. Kou, P. Johnson, H. Erdogmus}
{Automated Software Engineering}
{Volume 16, Number 4, December 2009}
{http://csdl.ics.hawaii.edu/techreports/09-01/09-01.pdf}
\cvpubitemlink{Protocols in the use of empirical software engineering artifacts}
{V. Basili, M. Zelkowitz, D. Sjoberg, P. Johnson, T. Cowling}
{Empirical Software Engineering}
{Volume 12, February 2007}
{http://csdl.ics.hawaii.edu/techreports/06-07/06-07.pdf}
\cvpubitemlink{Experiments to understand HPC time to development}
{L. Hochstein, T. Nakamura, V. Basili, S. Asgari, M. Zelkowitz, J. Hollingsworth, F. Shull, J. Carver, M. Voelp, N. Zazworka, P. Johnson}
{CTWatch Quarterly}
{November 2006}
{http://csdl.ics.hawaii.edu/techreports/06-08/06-08.pdf}
\cvpubitemlink{Improving software development management through software project telemetry}
{P. Johnson, H.~Kou, M.~Paulding, Q.~Zhang, A.~Kagawa, T.~Yamashita}
{IEEE Software}
{Volume 22, No. 4, July 2005}
{http://csdl.ics.hawaii.edu/techreports/04-11/04-11.pdf}
\cvpubitemlink{Measuring {HPC} productivity}
{S.~Faulk, J.~Gustafson, P.~Johnson, A.~Porter, W.~Tichy, L.~Votta}
{International Journal of High Performance Computing Applications}
{December 2004}
{http://csdl.ics.hawaii.edu/techreports/04-04/04-04.pdf}
\cvpubitemlink{Lessons learned from VCommerce: A virtual environment for interdisciplinary learning about software entrepreneurship}
{P.~Johnson, M.~Moffett, B.~Pentland}
{Communications of the ACM}
{Volume 46, No. 12, December 2003}
{http://csdl.ics.hawaii.edu/techreports/01-02/01-02.pdf}
\cvpubitemlink{Empirically guided software effort guesstimation}
{P.~Johnson, C.~Moore, J.~Dane, R.~Brewer}
{IEEE Software}
{Volume 17, No. 6, December 2000}
{http://csdl.ics.hawaii.edu/techreports/00-03/00-03.pdf}
\cvpubitemlink{A critical analysis of PSP data quality: Results from a case study}
{P.~Johnson, A.~Disney}
{Journal of Empirical Software Engineering}
{Volume 4, December 1999}
{http://csdl.ics.hawaii.edu/techreports/98-13/98-13.pdf}
\cvpubitem{The Personal Software Process: A cautionary case study}
{P.~Johnson, A.~Disney}
{IEEE Software}
{Volume 15, No. 6, November 1998}
\cvpubitem{Reengineering inspection}
{P.~Johnson}
{Communications of the ACM}
{Volume 41, No. 2, February 1998}
\cvpubitem{Does every inspection really need a meeting?}
{P.~Johnson, D.~Tjahjono}
{Journal of Empirical Software Engineering}
{Volume 4, No. 1, January 1998}
\cvpubitem{Assessing software review meetings: Results of a comparative analysis of two experimental studies}
{A.~Porter, P.~Johnson}
{IEEE Transactions on Software Engineering}
{Volume 23, No. 3, March 1997}
\cvpubitem{Design for instrumentation: High quality measurement of formal technical review}
{P.~Johnson}
{Software Quality Journal}
{Volume 5, March 1996}
\cvpubitem{Experiences with CLARE: a computer-supported collaborative learning environment}
{D.~Wan, P.~Johnson}
{International Journal of Human-Computer Studies}
{Volume 41, December 1994}
\cvpubitem{Experiences with EGRET: An exploratory group work environment}
{P. Johnson}
{Collaborative Computing, Volume 1, No. 1}
{March, 1994}
\cvpubitem{The clinical significance of electrogastrography}
{B.~Walker, M.~Walker, S.~Achem, P.~Johnson, R.~Gregg}
{Psychophysiology}
{Volume 20, 1983}
\subsection{Book chapters}
\cvpubitem{An instrumented approach to improving software quality through formal technical review}
{P. Johnson}
{Software Inspection: An Industry Best Practice}
{David A. Wheeler, Bill Brykczynski, Reginald N. Meeson, Jr., Editors, IEEE Computer Society Press, 1996.
Also appearing in the Proceedings of the 16th International Conference on Software Engineering, Sorrento, Italy, 1994}
\cvpubitem{Beyond exploratory programming: A methodology and environment for natural language processing}
{P.~Johnson, W.~Lehnert}
Proceedings of the 2014 International Conference on Sustainability, Technology, and Education {Artificial Intelligence and Software Engineering}
{D.~Partridge, editor, Ablex 1990. Also appearing in Proceedings of the Fifth National Conference on
Artificial Intelligence (AAAI-86), Philadelphia, PA.}
\cvpubitem{Achieving flexibility, efficiency, and generality in blackboard architectures}
{D.~Corkill, K.~Gallagher, and P.~Johnson}
{Readings in Distributed Artificial Intelligence}
{A.~Bond and L.~Gasser, editors. Morgan-Kaufman, 1988. Also appearing in Proceedings of the Sixth National Conference on Artificial Intelligence (AAAI-87), Seattle, WA.}
\cvpubitem{Design and evaluation of the Makahiki open source serious game framework for sustainability education}
{Y. Xu, P. Johnson, G. Lee, C. Moore, and R. Brewer}
{Sustainability, Green IT and Education Strategies in the 21st Century}
{T. Issa, editor. Springer, 2016. An earlier version appears in Proceedings of the 2014 International Conference on Sustainability, Technology, and Education, Taipei, Taiwan.}
\subsection{Conference publications}
\cvpubitemlink{RadGrad: Removing the 'Extra' from Extracurricular to Improve Student Engagement, Retention, and Diversity,}
{Philip M. Johnson, Carleton A. Moore, Peter Leong, and Seungoh Paek}
{Proceedings of the 51st ACM Technical Symposium on Computer Science Education (SIGCSE 2020}
{Portland, OR, March, 2020}
{http://csdl.ics.hawaii.edu/techreports/2019/19-04/19-04.pdf}
\cvpubitemlink{A transient classification system implementation on an open source distributed power quality network,}
{Charles Dickens, Anthony J. Christe, and Philip M. Johnson}
{Proceedings of the Ninth International Conference on Smart Grids, Green Communications and IT Energy-aware Technologies}
{Athens, Greece, June, 2019}
{http://csdl.ics.hawaii.edu/techreports/2019/19-02/19-02.pdf}
\cvpubitemlink{OPQ Version 2: An Architecture for Distributed, Real-Time, High Performance Power Data Acquisition, Analysis, and Visualization}
{A. Christe, S. Negrashov, P. Johnson, D. Nakahodo, D. Badke, D. Aghalarpour}
{Proceedings of the Seventh Annual IEEE International Conference on CYBER Technology in Automation, Control, and Intelligent Systems}
{Honolulu, HI, USA, July, 2017}
{http://csdl.ics.hawaii.edu/techreports/2017/17-03/17-03.pdf}
\cvpubitemlink{OpenPowerQuality: An Open Source Framework for Power Quality Collection, Analysis, Visualization, and Privacy}
{A. Christe, S. Negrashov, P. Johnson}
{Proceedings of the Seventh Conference on Innovative Smart Grid Technologies (ISGT2016)}
{Minneapolis, MN, USA, September, 2016}
{http://csdl.ics.hawaii.edu/techreports/2016/16-02/16-02.pdf}
\cvpubitemlink{An athletic approach to software engineering education}
{P.Johnson, D.Port, E.Hill}
{Proceedings of the 29th IEEE Conference on Software Engineering Education and Training}
{Dallas, Texas, USA, April, 2016}
{http://csdl.ics.hawaii.edu/techreports/2016/16-01/16-01.pdf}
\cvpubitemlink{Makahiki: An open source serious game framework for sustainability education and conservation}
{Y. Xu, P. Johnson, G. Lee, C. Moore, R. Brewer}
{Proceedings of the 2014 International Conference on Sustainability, Technology, and Education}
{New Taipei City, Taiwan, December, 2014}
{http://csdl.ics.hawaii.edu/techreports/2014/14-10/14-10.pdf}
\cvpubitemlink{SGSEAM: Assessing serious game frameworks from a stakeholder experience perspective}
{Y. Xu, P. Johnson, C. Moore, R. Brewer, J. Takayama}
{Proceedings of the First International Conference on Gameful Design, Research, and Applications}
{Stratford, Ontario, Canada, October 2013}
{http://csdl.ics.hawaii.edu/techreports/2013/13-03/13-03.pdf}
\cvpubitemlink{Energy feedback for smart grid consumers: Lessons learned from the Kukui Cup}
{R. Brewer, Y. Xu, G. Lee, M. Katchuck, C. Moore, P. Johnson}
{Proceedings of Energy 2013}
{Lisbon, Portugal, March 2013}
{http://csdl.ics.hawaii.edu/techreports/2012/12-12/12-12.pdf}
\cvpubitemlink{Makahiki+WattDepot: An open source software stack for next generation energy research and education}
{P. Johnson, Y. Xu, R. Brewer, C. Moore, G. Lee, A. Connell}
{Proceedings of the 2012 Conference on Information and Communication Technologies for Sustainability}
{Zurich, Switzerland, February 2013}
{http://csdl.ics.hawaii.edu/techreports/2012/12-06/12-06.pdf}
\cvpubitemlink{Beyond kWh: Myths and fixes for energy competition game design}
{P. Johnson, Y. Xu, R. Brewer, G. Lee, M. Katchuck, C. Moore}
{Proceedings of Meaningful Play 2012}
{Lansing, Michigan, October 2012}
{http://csdl.ics.hawaii.edu/techreports/2012/12-08/12-08.pdf}
\cvpubitemlink{The Kukui Cup: A dorm energy competition focused on sustainable behavior change and energy literacy}
{R. Brewer, G. Lee, P. Johnson}
{Proceedings of the 43rd Hawaii International Conference on System Sciences}
{Poipu, Hawaii, January 2011}
{http://csdl.ics.hawaii.edu/techreports/2010/10-07/10-07.pdf}
\cvpubitemlink{WattDepot: An open source software ecosystem for enterprise-scale energy data collection, storage, analysis, and visualization}
{R. Brewer and P. Johnson}
{Proceedings of the First IEEE International Conference on Smart Grid Communications}
{Gaithersburg, Maryland, October 2010}
{http://csdl.ics.hawaii.edu/techreports/2010/10-05/10-05.pdf}
\cvpubitemlink{We need more coverage, stat! Experiences with the Software ICU}
{P. Johnson and S. Zhang}
{Proceedings of the 2009 Conference on Empirical Software Engineering and Measurement}
{Orlando, Florida, October 2009}
{http://csdl.ics.hawaii.edu/techreports/2009/09-02/09-02.pdf}
\cvpubitemlink{Requirement and design trade-offs in Hackystat: An in-process software engineering measurement and analysis system}
{P. Johnson}
{Proceedings of the 2007 International Symposium on Empirical Software Engineering and Measurement}
{Madrid, Spain, September 2007}
{http://csdl.ics.hawaii.edu/techreports/2006/06-06/06-06.pdf}
\cvpubitem{Generalizing fault contents from a few classes}
{H. Scott and P. Johnson}
{Proceedings of the 2007 International Symposium on Empirical Software Engineering and Measurement}
{Madrid, Spain, September 2007}
\cvpubitemlink{Automated recognition of test-driven development with Zorro}
{P. Johnson and H. Kou}
{Proceedings of Agile 2007}
{Washington, D.C., August 2007}
{http://csdl.ics.hawaii.edu/techreports/2006/06-13/06-13.pdf}
\cvpubitemlink{Practical automated process and product metric collection and analysis in a classroom setting: {L}essons learned from {Hackystat-UH}}
{P.~Johnson, H.~Kou, J.~Agustin, Q.~Zhang, A.~Kagawa, T.~Yamashita}
{Proceedings of the 2004 Symposium on Empirical Software Engineering}
{Los Angeles, CA., August 2004}
{http://csdl.ics.hawaii.edu/techreports/2003/03-12/03-12.pdf}
\cvpubitemlink{Beyond the Personal Software Process: Metrics collection and analysis for the differently disciplined}
{P.~Johnson, H.~Kou, J.~Agustin, C.~Chan, C.~Moore, J.~Miglani, S.~Zhen, and W.~Doane}
{Proceedings of the 2003 International Conference on Software Engineering}
{Portland, OR., May 2003}
{http://csdl.ics.hawaii.edu/techreports/2002/02-07/02-07.pdf}
\cvpubitemlink{Leap: A ``Personal Information Environment'' for software engineers}
{P.~Johnson}
{Proceedings of the 1999 International Conference on Software Engineering}
{Los Angeles, CA., May 1999}
{http://csdl.ics.hawaii.edu/techreports/1999/99-08/99-08.pdf}
\cvpubitemlink{Investigating data quality problems in the PSP}
{A.~Disney, P.~Johnson}
{Proceedings of the Sixth International Symposium on the Foundations of Software Engineering}
{Orlando, FL., November 1998}
{http://csdl.ics.hawaii.edu/techreports/1998/98-04/98-04.pdf}
\cvpubitemlink{Assessing software review meetings: A controlled experimental study using CSRS}
{P.~Johnson, D.~Tjahjono}
{Proceedings of the 1997 International Conference on Software Engineering}
{Boston, MA., May 1997}
{http://csdl.ics.hawaii.edu/techreports/1996/96-06/96-06.pdf}
\cvpubitem{Computer supported collaborative learning using CLARE: The approach and experimental findings}
{D.~Wan and P.~Johnson}
{Proceedings of the 1994 ACM Conference on Computer Supported Cooperative Work}
{Chapel Hill, NC., 1994}
\cvpubitem{Supporting technology transfer of formal technical review through a computer supported collaborative review system}
{P.~Johnson}
{Proceedings of the Fourth International Conference on Software Quality}
{Reston, VA. 1994}
\cvpubitem{An instrumented approach to improving software quality through formal technical review}
{P.~Johnson}
{Proceedings of the 16th International Conference on Software Engineering}
{Sorrento, Italy, May 1994}
\cvpubitem{Experiences with CSRS: An instrumented software review environment}
{P.~Johnson, D.~Tjahjono, D.~Wan, R.~Brewer}
{Proceedings of the 11th Annual Pacific Northwest Software Quality Conference}
{Portland, OR. 1993}
\cvpubitem{Improving software quality through computer supported collaborative review}
{P.~Johnson, D.~Tjahjono}
{Proceedings of the Third European Conference on Computer Supported Cooperative Work}
{Milan, Italy. 1993}
\cvpubitem{Supporting Exploratory CSCW with the EGRET Framework}
{P.~Johnson}
{Proceedings of the ACM 1992 Conference on Computer Supported Cooperative Work}
{Toronto, Canada. 1992}
\cvpubitem{An Ada restructuring assistant}
{P.~Johnson, D.~Hildum, A.~Kaplan, C.~Kay, and J.~Wileden}
{Proceedings of the Fourth Annual Conference on Artificial Intelligence and Ada}
{Fairfax, VA. 1988}
\cvpubitem{Achieving flexibility, efficiency, and generality in blackboard architectures}
{D.~Corkill, K.~Gallagher, and P.~Johnson}
{Proceedings of the Sixth National Conference on Artificial Intelligence}
{Seattle, WA, 1987}
\cvpubitem{Beyond exploratory programming: A methodology and environment for natural language processing}
{P.~Johnson and W.~Lehnert}
{Proceedings of the Fifth National Conference on Artificial Intelligence}
{Philadelphia, PA, 1986}
\subsection{Workshop publications}
\cvpubitemlink{Lights Off. Game On. The Kukui Cup: A dorm energy competition}
{R. Brewer, G. Lee, Y. Xu, C. Desiato, M. Katchuck, P. Johnson}
{Proceedings of the 2011 CHI Workshop on Gamification}
{May 2011}
{http://csdl.ics.hawaii.edu/techreports/2011/11-02/11-02.pdf}
\cvpubitemlink{Ultra-automation and ultra-autonomy for software engineering management of ultra-large-scale systems}
{P. Johnson}
{Proceedings of the 2007 Workshop on Ultra Large Scale Systems}
{Minneapolis, Minnesota, May 2007}
{http://csdl.ics.hawaii.edu/techreports/2007/07-03/07-03.pdf}
\cvpubitemlink{Automated recognition of low-level process: A pilot validation study of Zorro for test-driven development}
{H. Kou, P.~Johnson}
{Proceedings of the 2006 International Workshop on Software Process}
{Shanghai, China, May 2006}
{http://csdl.ics.hawaii.edu/techreports/2006/06-02/06-02.pdf}
\cvpubitemlink{Understanding HPC Development through automated process and product measurement with Hackystat}
{P.~Johnson, M.~Paulding}
{Proceedings of the Second Workshop on Productivity and Performance in High-End Computing}
{February 2005}
{http://csdl.ics.hawaii.edu/techreports/2004/04-22/04-22.pdf}
\cvpubitemlink{You can't even ask them to push a button: Toward ubiquitous, developer-centric, empirical software engineering}
{P.~Johnson}
{Proceedings of the Workshop on New Visions for Software Design and Productivity: Research and Application}
{December 2001}
{http://csdl.ics.hawaii.edu/techreports/2001/01-12/01-12.pdf}
\cvpubitem{Project {LEAP}: Lightweight, empirical, anti-measurement dysfunction, and portable software developer improvement}
{P.~M.~Johnson}
{Software Engineering Notes}
{Volume 24, Number 6, December 1999}
\cvpubitem{Egret: A framework for advanced CSCW applications}
{P.~Johnson}
{Software Engineering Notes}
{Volume 21, Number 5, September 1996}
\cvpubitem{Assessing software review meetings: An empirical study using CSRS}
{P.~Johnson}
{Proceedings of the 1996 International Software Engineering Research Network Meeting}
{Sydney, Australia, August 1996}
\cvpubitem{Investigating Strong Collaboration with the Annotated Egret Navigator}
{P.~Johnson and C. Moore}
{Proceedings of the Fourth IEEE Workshop on Enabling Technologies: Infrastructure for Collaborative Enterprises}
{April 1995}
\cvpubitem{Computer supported formal technical review with CSRS}
{P.~Johnson}
{Software Inspection and Review Organization Newsletter}
{Volume 5, Number 3, December 1994}
\cvpubitem{Collaboration-in-the-large vs. Collaboration-in-the-small}
{Proceedings of the 1994 CSCW Workshop on Software Architectures for Cooperative Systems}
{P.~Johnson}
{Chapel Hill, VA. October 1994}
\cvpubitem{From principle-centered to organization-centered design: A case study of evolution in a computer-supported formal technical review environment}
{Proceedings of the 15th Interdisciplinary Workshop on Informatics and Psychology}
{P.~Johnson}
{Scharding, Austria 1994}
\cvpubitem{Report from the 1993 ECSCW Workshop on Tools and Technologies}
{P.~Johnson}
{SIGOIS Bulletin}
{April 1994}
\cvpubitem{Methodological issues in CSCW research}
{P.~Johnson}
{Proceedings of the 1993 ECSCW Workshop on Tools and Technologies}
{Milan, Italy 1993}
\cvpubitem{An architectural perspective on EGRET}
{P.~Johnson}
{Proceedings of the 1992 ACM Conference on Computer Supported Cooperative Work, Workshop on Tools and Technologies}
{Toronto, Canada 1992}
\cvpubitem{Collaborative software review for capturing design rationale}
{P.~Johnson}
{Proceedings of the AAAI Workshop on AI and Design Rationale}
{San Jose, CA 1992}
\cvpubitem{Supporting scientific learning and research review using COREVIEW}
{P.~Johnson}
{Proceedings of the AAAI Workshop on Communicating Scientific and Technical Knowledge}
{San Jose, CA. 1992}
\cvpubitem{EGRET: Exploring open, evolutionary, and emergent collaborative systems}
{P.~Johnson}
{Proceedings of the 1991 ECSCW Tools and Technologies Workshop}
{Amsterdam, The Netherlands 1991}
\cvpubitem{Structural evolution in exploratory software development}
{P.~Johnson}
{Proceedings of the 1989 AAAI Spring Symposium on AI and Software Engineering}
{Stanford University, CA. 1989}
\cvpubitem{A knowledge-based rhythm composition tool}
{S.~Founds, P.~Johnson}
{Proceedings of the 1989 IJCAI Workshop on Artificial Intelligence and Music}
{Detroit, MI. 1989}
\cvpubitem{Integrating BB1-style control into the generic blackboard system}
{P.~Johnson}
{Proceedings of the 1987 AAAI Workshop on Blackboard Systems}
{Seattle, WA. 1987}
\cvpubitem{Combining software engineering and artificial intelligence}
{P.~Johnson}
{Proceedings of the First International Workshop on Computer-Aided Software Engineering}
{Cambridge, MA. 1987}
\cvpubitem{From prototype to product: Evolutionary development from within the blackboard paradigm}
{D.~Corkill, K.~Gallagher, and P.~Johnson}
{Proceedings of the Workshop on High-level Tools for Knowledge-based Systems}
{Columbus, OH. 1986}
\cvpubitem{Requirements definition for a PLUMber's apprentice}
{P.~Johnson}
{Proceedings of the Second Annual Workshop on Theoretical Issues in Conceptual Information Processing}
{New Haven, CT. 1985}
\newpage
\section{Grants}
\cvgrantitem {Change Hawaii: Harnessing the Data Revolution for Island Resilience} %Title
{National Science Foundation} %Agency
{2022-2027} %Year
{G.~Jacobs, Principal Investigator; T.~Giambelluca, P.~Johnson, J.~Leigh, H.~Turner, Co-Principal Investigators} %Note
{\$20,000,000} %Amount
\cvgrantitem {Using Degree Experience Plans to Improve Engagement, Retention, and Diversity of Undergraduates in Computer Science} %Title
{National Science Foundation} %Agency
{2018-2022} %Year
{P. Johnson, Principal Investigator; Peter Leong, Seungoh Paek, Carleton Moore, Co-Principal Investigators} %Note
{\$331,208} %Amount
\cvgrantitem {Providing agile power quality monitoring to support the UH net zero energy mandate and microgrid operations} %Title
{University of Hawaii President's Green Implementation Award} %Agency
{2018} %Year
{P. Johnson, Principal Investigator} %Note
{\$10,000} %Amount
\cvgrantitem {Human centered information integration for the smart grid} %Title
{National Science Foundation} %Agency
{2010--2014} %Year
{P. Johnson, Principal Investigator} %Note
{\$413,467} %Amount
\cvgrantitem{Supporting next generation energy education with the Kukui Cup}
{HEI Charitable Foundation}
{2012}
{P. Johnson, Principal Investigator}
{\$10,777}
\cvgrantitem{Sponsorship of the 2011 Kukui Cup} %Title
{Hawaii State Department of Business, Economic Development, and Tourism} %Agency
{2012} %Year
{P. Johnson, Principal Investigator} %Note
{\$5,000} %Amount
\cvgrantitem {Renewable energy and island sustainability} %Title
{University of Hawaii} %Agency
{2009--2011} %Year
{Principal Investigator: T. Kuh; Co-Principal Investigators: O. Boric-Lubecke, B. Chao, M. Coffman, D. Garmire, M. Nejhad, R. Ghorbani, P. Johnson, A. Kavcic, D. Konan, B. Liaw, E. Miller, W. Qu, M. Teng, X. Zhou.} %Note
{\$1,000,000} %Amount
\cvgrantitem {Google Summer of Code: Hackystat} %Title
{Google, Inc.} %Agency
{2008--2009} %Year
{P. Johnson, Principal Investigator} %Note
{\$45,000} %Amount
\cvgrantitem{CSDL affiliates program} %Title
{Expedia, Inc.} %Agency
{2008} %Year
{P. Johnson, Principal Investigator} %Note
{\$25,000} %Amount
\cvgrantitem{CSDL affiliates program} %Title
{Sixth Sense Analytics, Inc.} %Agency
{2006} %Year
{P. Johnson, Principal Investigator} %Note
{\$25,000} %Amount
\cvgrantitem {Student engagement grant} %Title
{University of Hawaii and Maui High Performance Computing Center} %Agency
{2005} %Year
{P. Johnson, Principal Investigator} %Note
{\$42,000} %Amount
\cvgrantitem {Eclipse innovation grant award} %Title
{IBM Corporation} %Agency
{2004} %Year
{P. Johnson, Principal Investigator} %Note
{\$15,000} %Amount
\cvgrantitem {Supporting development of highly dependable software through continuous, automated, \protect \newline in-process, and individualized software measurement validation} %Title
{Joint NSF/NASA Highly Dependable Computing Program} %Agency
{2002--2006} %Year
{P. Johnson, Principal Investigator} %Note
{\$638,000} %Amount
\cvgrantitem {Aligning the financial services, fulfillment distribution infrastructure, and small business sectors in Hawaii through B2B technology innovation} %Title
{University of Hawaii New Economy Research Grant Program} %Agency
{2000--2001} %Year
{P. Johnson, Principal Investigator} %Note
{\$30,000} %Amount
\cvgrantitem{Internet entrepreneurship: Theory and practice} %Title
{University of Hawaii Entrepreneurship Course Development Grant} %Agency
{1999--2000} %Year
{P.~Johnson and Glen Taylor, Principal Investigators} %Note
{\$10,000} %Amount
\cvgrantitem {Java-based software engineering technology for high quality development in "Internet Time" organizations} %Title
{Sun Microsystems Academic Equipment Grant Program} %Agency
{1999} %Year
{P. Johnson, Principal Investigator} %Note
{\$39,205} %Amount
\cvgrantitem {Project LEAP: Lightweight, empirical, anti-measurement dysfunction, and portable software developer improvement} %Title
{National Science Foundation} %Agency
{1998--2001} %Year
{P. Johnson, Principal Investigator} %Note
{\$265,000} %Amount
\cvgrantitem {Internet-enabled engineering tool for dynamically analyzing and planning world-wide subsea cable and array installations} %Title
{Makai Ocean Engineering, Inc.} %Agency
{1998--1999} %Year
{P. Johnson, Principal Investigator} %Note
{\$83,286} %Amount
\cvgrantitem {Kona: A distributed, collaborative technical review environment} %Title
{Digital Equipment Corporation External Research Program} %Agency
{1997} %Year
{P. Johnson, Principal Investigator} %Note
{\$101,413} %Amount
\cvgrantitem {CSDL affiliates program} %Title
{Makai Ocean Engineering, Inc.} %Agency
{1997} %Year
{P. Johnson, Principal Investigator} %Note
{\$10,000} %Amount
\cvgrantitem {CSDL affiliates program} %Title
{Tektronix, Inc.} %Agency
{1996--1998} %Year
{P. Johnson, Principal Investigator} %Note
{\$45,000} %Amount
\cvgrantitem {Improving software quality through instrumented formal technical review} %Title
{National Science Foundation} %Agency
{1995--1997} %Year
{P. Johnson, Principal Investigator} %Note
{\$161,754} %Amount
\cvgrantitem {Collaboration mechanisms for Project HI-TIME} %Title
{Pacific International Center for High Technology Research} %Agency
{1995} %Year
{P. Johnson, Principal Investigator} %Note
{\$30,280} %Amount
\cvgrantitem {Three dimensional interfaces for evolving collaborative systems} %Title
{University of Hawaii Research Council Seed Money Grant} %Agency
{1992--1993} %Year
{P. Johnson, Principal Investigator} %Note
{\$5,000} %Amount
\cvgrantitem {Support for structural evolution in exploratory software development} %Title
{National Science Foundation Research Initiation Award Program in Software Engineering} %Agency
{1991--1993} %Year
{P. Johnson, Principal Investigator} %Note
{\$54,810} %Amount
\cvgrantitem {An investigation of software structure evolution} %Title
{University of Hawaii Research Council Seed Money Grant} %Agency
{1990--1991} %Year
{P. Johnson, Principal Investigator} %Note
{\$6,000} %Amount
\section{Honors and Awards}
\cvgrantitem {Board of Regents Medal for Excellence in Teaching}
{University of Hawaii}
{2019} {} {}
\section{Professional activities}
\cvitem {Editorial Board}{}
\begin{itemize}
\item Journal of Empirical Software Engineering, 2004-2008
\item IEEE Transactions on Software Engineering, 2000-2004
\item International Journal of Computer Supported Cooperative Work, 1997-2004
\end{itemize}
\cvitem{Program Chair/Co-Chair}{}
\begin{itemize}
\item International Workshop on In-Process Software Engineering Measurement and Analysis, 2007
\item International Workshop on Software Engineering for High Performance Computing
System Applications, 2004, 2005
\item International Software Engineering Research Network Annual Meeting, 2000
\item Software Architectures for Cooperative Systems Workshop, ACM Conference on
Computer Supported Cooperative Work, 1994
\item CSCW Tools and Technologies Workshop, European Conference on Computer Supported
Cooperative Work, 1993
\item CSCW Tools and Technologies Workshop, ACM Conference on Computer Supported Cooperative Work, 1992
\end{itemize}
\cvitem{Program Committee}{}
\begin{itemize}
\item Energy 2011--present
\item International Workshop on Software Engineering for High Performance Computing
System Applications, 2007
\item PROFES 2005-2010
\item Workshop on Productivity and Performance in High-End Computing, 2005, 2006
\item XP/Agile Universe, 2004
\item International Software Metrics Symposium, 2003, 2004
\item International Symposium on Empirical Software Engineering, 2002-2004
\item European Conference on Computer Supported Cooperative Work, 1997, 1999
\end{itemize}
\cvitem {Judge}{Hawaii State Science Fair, Honolulu, Hawaii, 1998-present}
\cvitem {Founder and Chair}{Hawaii Java Users Group, Honolulu, Hawaii, 1996-2010}
\cvitem {Member}{International Software Engineering Research Network (ISERN), 1996-2010}
\cvitem {Journal article reviewing}{
IEEE Transactions on Software Engineering,
ACM Transactions on Software Engineering and Methodology,
ACM Transactions on Programming Languages and Systems,
IEEE Software,
IEEE Computer,
Journal of Collaborative Computing,
Artificial Intelligence in Engineering, Design, and Manufacturing
}
%----------------------------------------------------------------------------------------
\end{document}
| {
"alphanum_fraction": 0.6785897567,
"avg_line_length": 47.0108564536,
"ext": "tex",
"hexsha": "22cb47a3fbf7fff60b3cacea3b2fab27acb0f7e1",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "643bd7be6d5d8cbb959d0ea54ff032ea0a539338",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "philipmjohnson/cv",
"max_forks_repo_path": "philipmjohnson-cv.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "643bd7be6d5d8cbb959d0ea54ff032ea0a539338",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "philipmjohnson/cv",
"max_issues_repo_path": "philipmjohnson-cv.tex",
"max_line_length": 230,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "643bd7be6d5d8cbb959d0ea54ff032ea0a539338",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "philipmjohnson/cv",
"max_stars_repo_path": "philipmjohnson-cv.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 10063,
"size": 38972
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{The HTCondor Job Router}\label{sec:JobRouter}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{Job Router}
\index{HTCondor daemon!condor\_job\_router@\Condor{job\_router}}
\index{daemon!condor\_job\_router@\Condor{job\_router}}
\index{condor\_job\_router daemon}
The HTCondor Job Router is an add-on to the \Condor{schedd} that transforms
jobs from one type into another according to a configurable policy.
This process of transforming the jobs is called \emph{job routing}.
One example of how the Job Router can be used is for the task of sending
excess jobs to one or more remote grid sites.
The Job Router can transform the jobs such as vanilla universe jobs into grid universe
jobs that use any of the grid types supported by HTCondor. The rate at
which jobs are routed can be matched roughly to the rate at which the
site is able to start running them. This makes it possible to balance
a large work flow across multiple grid sites, a local HTCondor pool, and
any flocked HTCondor pools, without having to guess in advance how quickly
jobs will run and complete in each of the different sites.
Job Routing is most appropriate for high throughput work flows,
where there are many more jobs than computers,
and the goal is to keep as many of the computers busy as possible.
Job Routing is less suitable when there are a small number of jobs,
and the scheduler needs to choose the best place for each job,
in order to finish them as quickly as possible.
The Job Router does not know which site will run the jobs faster,
but it can decide whether to send more jobs to a site,
based on whether jobs already submitted to that site are
sitting idle or not,
as well as whether the site has experienced recent job failures.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{\label{sec:RouterMechanism}Routing Mechanism}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The \Condor{job\_router} daemon and configuration determine a policy
for which jobs may be transformed and sent to
grid sites.
By default, a job is transformed into a grid universe job
by making a copy of the original job ClassAd, and
modifying some attributes in this copy of the job.
The copy is called the routed copy,
and it shows up in the job queue under a new job id.
Until the routed copy finishes or is removed,
the original copy of the job passively mirrors the state of the routed job.
During this time,
the original job is not available for matchmaking,
because it is tied to the routed copy.
The original job also does not evaluate periodic expressions,
such as \Attr{PeriodicHold}.
Periodic expressions are evaluated for the routed copy.
When the routed copy completes,
the original job ClassAd is updated such that it reflects the
final status of the job.
If the routed copy is removed,
the original job returns to the normal idle state,
and is available for matchmaking or rerouting.
If, instead, the original job is removed or goes on hold,
the routed copy is removed.
Although the default mode routes vanilla universe jobs to
grid universe jobs, the routing rules may be configured
to do some other transformation of the job. It is also
possible to edit the job in place rather than creating
a new transformed version of the job.
The \Condor{job\_router} daemon utilizes a \Term{routing table},
in which a ClassAd describes each site to where jobs may be sent.
The routing table is given in the New ClassAd language,
as currently used by HTCondor internally.
A good place to learn about the syntax of New ClassAds
is the Informal Language Description in the C++ ClassAds
tutorial: \URL{http://htcondor.org/classad/c++tut.html}.
Two essential differences distinguish the New ClassAd language
from the current one.
In the New ClassAd language,
each ClassAd is surrounded by square brackets.
And, in the New ClassAd language,
each assignment statement ends with a semicolon.
When the New
ClassAd is embedded in an HTCondor configuration file,
it may appear all on a single line,
but the readability is often improved by inserting line continuation
characters
after each assignment statement.
This is done in the examples.
Unfortunately, this makes the insertion of comments into
the configuration file awkward,
because of the interaction between comments and line continuation
characters in configuration files.
An alternative is to use C-style comments (\Code{/* \Dots */}).
Another alternative is to read in the routing table entries
from a separate file,
rather than embedding them in the HTCondor configuration file.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{\label{sec:RouterJobSubmission}Job Submission with Job Routing Capability}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
If Job Routing is set up, then the following items
ought to be considered for jobs to have the
necessary prerequisites to be considered for routing.
\begin{itemize}
\item Jobs appropriate for routing to the grid must not rely on access to
a shared file system, or other services that are only available on the
local pool.
The job will use HTCondor's file transfer mechanism,
rather than relying on a shared file system
to access input files and write output files.
In the submit description file, to enable file transfer, there
will be a set of commands similar to
\begin{verbatim}
should_transfer_files = YES
when_to_transfer_output = ON_EXIT
transfer_input_files = input1, input2
transfer_output_files = output1, output2
\end{verbatim}
Vanilla universe jobs and most types of grid universe jobs differ in the
set of files transferred back when the job completes.
Vanilla universe jobs transfer back all files created or modified,
while all grid universe jobs,
except for HTCondor-C,
only transfer back the \SubmitCmd{output} file,
as well as those explicitly listed
with \SubmitCmd{transfer\_output\_files}.
Therefore, when routing jobs to grid universes other than HTCondor-C, it is
important to explicitly specify all
output files that must be transferred upon job completion.
An additional difference between the vanilla universe jobs
and \SubmitCmdNI{gt2} grid universe jobs
is that \SubmitCmdNI{gt2} jobs do not return
any information about the job's exit status.
The exit status as reported in the job ClassAd and job event log are
always 0.
Therefore, jobs that may be routed to a \SubmitCmdNI{gt2} grid site
must not rely upon a non-zero job exit status.
\item One configuration for routed jobs requires the jobs to
identify themselves as candidates for Job Routing.
This may be accomplished by inventing a ClassAd attribute
that the configuration utilizes in setting the policy
for job identification,
and the job defines this attribute to identify itself.
If the invented attribute is called \Attr{WantJobRouter},
then the job identifies itself as a job that may be routed
by placing in the submit description file:
\begin{verbatim}
+WantJobRouter = True
\end{verbatim}
This implementation can be taken further,
allowing the job to first be rejected within the local pool,
before being a candidate for Job Routing:
\begin{verbatim}
+WantJobRouter = LastRejMatchTime =!= UNDEFINED
\end{verbatim}
\item As appropriate to the potential grid site,
create a grid proxy, and specify it in the submit description file:
\begin{verbatim}
x509userproxy = /tmp/x509up_u275
\end{verbatim}
This is not necessary if the \Condor{job\_router} daemon is configured
to add a grid proxy on behalf of jobs.
\end{itemize}
Job submission does not change for jobs that may be routed.
\begin{verbatim}
$ condor_submit job1.sub
\end{verbatim}
where \File{job1.sub} might contain:
\begin{verbatim}
universe = vanilla
executable = my_executable
output = job1.stdout
error = job1.stderr
log = job1.ulog
should_transfer_files = YES
when_to_transfer_output = ON_EXIT
+WantJobRouter = LastRejMatchTime =!= UNDEFINED
x509userproxy = /tmp/x509up_u275
queue
\end{verbatim}
The status of the job may be observed as with any other HTCondor job,
for example by looking in the job's log file.
Before the job completes,
\Condor{q} shows the job's status.
Should the job become routed,
a second job will enter the job queue.
This is the routed copy of the original job.
The command \Condor{router\_q} shows a more specialized view of routed jobs,
as this example shows:
\begin{verbatim}
$ condor_router_q -S
JOBS ST Route GridResource
40 I Site1 site1.edu/jobmanager-condor
10 I Site2 site2.edu/jobmanager-pbs
2 R Site3 condor submit.site3.edu condor.site3.edu
\end{verbatim}
\Condor{router\_history} summarizes the history of routed jobs,
as this example shows:
\begin{verbatim}
$ condor_router_history
Routed job history from 2007-06-27 23:38 to 2007-06-28 23:38
Site Hours Jobs Runs
Completed Aborted
-------------------------------------------------------
Site1 10 2 0
Site2 8 2 1
Site3 40 6 0
-------------------------------------------------------
TOTAL 58 10 1
\end{verbatim}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{\label{ExampleJobRouterConfiguration} An Example Configuration}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The following sample configuration sets up potential job routing
to three routes (grid sites).
Definitions of the configuration variables specific to the Job Router
are in section~ \ref{sec:JobRouter-Config-File-Entries}.
One route is an HTCondor site accessed via the Globus gt2 protocol.
A second route is a PBS site, also accessed via Globus gt2.
The third site is an HTCondor site accessed by HTCondor-C.
The \Condor{job\_router} daemon
does not know which site will be best for a given job.
The policy implemented in this sample configuration
stops sending more jobs to a site,
if ten jobs that have already been sent to that site are idle.
These configuration settings belong in the local configuration file
of the machine where jobs are submitted.
Check that the machine can successfully submit grid jobs
before setting up and using the Job Router.
Typically, the single required element that needs to be
added for GSI authentication
is an X.509 trusted certification authority directory,
in a place recognized by HTCondor
(for example, \File{/etc/grid-security/certificates}).
The VDT (\URL{http://vdt.cs.wisc.edu}) project provides
a convenient way to set up and install a trusted CA,
if needed.
Note that, as of version 8.5.6, the configuration language supports
multi-line values, as shown in the example below (see section
~\ref{sec:Multi-Line-Values} for more details).
\footnotesize
\begin{verbatim}
# These settings become the default settings for all routes
JOB_ROUTER_DEFAULTS @=jrd
[
requirements=target.WantJobRouter is True;
MaxIdleJobs = 10;
MaxJobs = 200;
/* now modify routed job attributes */
/* remove routed job if it goes on hold or stays idle for over 6 hours */
set_PeriodicRemove = JobStatus == 5 ||
(JobStatus == 1 && (time() - QDate) > 3600*6);
delete_WantJobRouter = true;
set_requirements = true;
]
@jrd
# This could be made an attribute of the job, rather than being hard-coded
ROUTED_JOB_MAX_TIME = 1440
# Now we define each of the routes to send jobs on
JOB_ROUTER_ENTRIES @=jre
[ GridResource = "gt2 site1.edu/jobmanager-condor";
name = "Site 1";
]
[ GridResource = "gt2 site2.edu/jobmanager-pbs";
name = "Site 2";
set_GlobusRSL = "(maxwalltime=$(ROUTED_JOB_MAX_TIME))(jobType=single)";
]
[ GridResource = "condor submit.site3.edu condor.site3.edu";
name = "Site 3";
set_remote_jobuniverse = 5;
]
@jre
# Reminder: you must restart HTCondor for changes to DAEMON_LIST to take effect.
DAEMON_LIST = $(DAEMON_LIST) JOB_ROUTER
# For testing, set this to a small value to speed things up.
# Once you are running at large scale, set it to a higher value
# to prevent the JobRouter from using too much cpu.
JOB_ROUTER_POLLING_PERIOD = 10
#It is good to save lots of schedd queue history
#for use with the router_history command.
MAX_HISTORY_ROTATIONS = 20
\end{verbatim}
\normalsize
%Some questions you may have after reading the above policy: Can the
%routing table be dynamically generated from grid information systems?
%Do users have to have their own grid credentials or can the \Condor{job\_router} daemon
%insert service credentials for them? What's up with the syntax of the
%routing table: C-style comments, strange ClassAd expressions, escaped
%end of lines? The next section covers the specifics of the \Condor{job\_router} daemon
%configuration. Read on!
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{\label{RoutingTableAttributes} Routing Table Entry ClassAd Attributes}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The conversion of a job to a routed copy may require the
job ClassAd to be modified.
The Routing Table specifies attributes of the different possible
routes and it may specify specific modifications that should be made
to the job when it is sent along a specific route. In addition
to this mechanism for transforming the job, external programs may be
invoked to transform the job. For more information, see
section~\ref{sec:job-hooks-JR}.
The following attributes and instructions for modifying job attributes
may appear in a Routing Table entry.
\begin{description}
\index{Job Router Routing Table ClassAd attribute!GridResource}
\item[GridResource] Specifies the value for the \Attr{GridResource}
attribute that will be inserted into the routed copy of the job's ClassAd.
\index{Job Router Routing Table ClassAd attribute!Name}
\item[Name] An optional identifier that will be used in log
messages concerning this route. If no name is specified, the default
used will be the value of \Attr{GridResource}.
The \Condor{job\_router} distinguishes routes and advertises
statistics based on this attribute's value.
\index{Job Router Routing Table ClassAd attribute!Requirements}
\item[Requirements] A \Attr{Requirements} expression
that identifies jobs that may be matched to the route. Note
that, as with all settings, requirements specified in
the configuration variable
\MacroNI{JOB\_ROUTER\_ENTRIES} override the setting of
\MacroNI{JOB\_ROUTER\_DEFAULTS}. To specify global requirements that
are not overridden by \MacroNI{JOB\_ROUTER\_ENTRIES}, use
\MacroNI{JOB\_ROUTER\_SOURCE\_JOB\_CONSTRAINT}.
\index{Job Router Routing Table ClassAd attribute!MaxJobs}
\item[MaxJobs] An integer maximum number of jobs permitted on the route at
one time. The default is 100.
\index{Job Router Routing Table ClassAd attribute!MaxIdleJobs}
\item[MaxIdleJobs] An integer maximum number of routed jobs in the
idle state. At or above this value, no more jobs will be sent
to this site.
This is intended to prevent too many jobs from being sent to sites
which are too busy to run them.
If the value set for this attribute is too small,
the rate of job submission to the site will slow,
because the \Condor{job\_router} daemon will submit jobs up to this limit,
wait to see some of the jobs enter the running state,
and then submit more.
The disadvantage of setting this attribute's value too high
is that a lot of jobs may be sent
to a site, only to site idle for hours or days.
The default value is 50.
\index{Job Router Routing Table ClassAd attribute!FailureRateThreshold}
\item[FailureRateThreshold] A maximum tolerated rate of job failures.
Failure is determined by the expression sets for
the attribute \Attr{JobFailureTest} expression.
The default threshold is 0.03 jobs/second.
If the threshold is exceeded,
submission of new jobs is throttled until jobs begin succeeding,
such that the failure rate is less than the threshold.
This attribute implements \Term{black hole throttling},
such that a site at which jobs are sent only to fail (a black hole)
receives fewer jobs.
\index{Job Router Routing Table ClassAd attribute!JobFailureTest}
\item[JobFailureTest] An expression
evaluated for each job that finishes,
to determine whether it was a failure.
The default value if no expression is defined
assumes all jobs are successful.
Routed jobs that are removed are considered to be failures.
An example expression to treat all jobs running for less than 30 minutes as
failures is \Expr{target.RemoteWallClockTime < 1800}. A more flexible
expression might reference a property or expression of the job that
specifies a failure condition specific to the type of job.
\index{Job Router Routing Table ClassAd attribute!TargetUniverse}
\item[TargetUniverse] An integer value specifying the desired
universe for the routed copy of the job. The default value is 9,
which is the \SubmitCmdNI{grid} universe.
\index{Job Router Routing Table ClassAd attribute!UseSharedX509UserProxy}
\item[UseSharedX509UserProxy] A boolean expression
that when \Expr{True} causes the value of \Attr{SharedX509UserProxy}
to be the X.509 user proxy for the routed job.
Note that if the \Condor{job\_router} daemon is running as root,
the copy of this file that is given to the job
will have its ownership set to that of the user running the job.
This requires the trust of the user.
It is therefore recommended to avoid this mechanism when possible.
Instead,
require users to submit jobs with \Attr{X509UserProxy}
set in the submit description file.
If this feature is needed,
use the boolean expression to only allow specific values of \Expr{target.Owner}
to use this shared proxy file.
The shared proxy file should be owned by the \Login{condor} user.
Currently, to use a shared proxy, the job must also
turn on sandboxing by having the attribute \Attr{JobShouldBeSandboxed}.
\index{Job Router Routing Table ClassAd attribute!SharedX509UserProxy}
\item[SharedX509UserProxy]
A string representing file containing the X.509 user proxy for the routed job.
\index{Job Router Routing Table ClassAd attribute!JobShouldBeSandboxed}
\item[JobShouldBeSandboxed] A boolean expression
that when \Expr{True} causes the created copy of the job to be sandboxed.
A copy of the input files will be placed in the
\Condor{schedd} daemon's spool area for the target job,
and when the job runs,
the output will be staged back into the spool area.
Once all of the output has been successfully staged back,
it will be copied again,
this time from the spool area of the sandboxed job back to the
original job's output locations.
By default, sandboxing is turned off.
Only to turn it on if using a shared X.509
user proxy or if direct staging of remote output files
back to the final output locations is not desired.
\index{Job Router Routing Table ClassAd attribute!OverrideRoutingEntry}
\item[OverrideRoutingEntry] A boolean value that when \Expr{True},
indicates that this entry
in the routing table replaces any previous entry in the table
with the same name.
When \Expr{False}, it indicates that if there is a
previous entry by the same name, the previous entry should be retained
and this entry should be ignored.
The default value is \Expr{True}.
\index{Job Router Routing Table ClassAd attribute!Set\_<ATTR>}
\item[Set\_<ATTR>] Sets the value of \Attr{<ATTR>} in the routed
copy's job ClassAd to the specified value. An example of
an attribute that might be set is \Attr{PeriodicRemove}.
For example, if the routed job goes on hold or stays idle for too long,
remove it and return the original copy of the job to a normal state.
\index{Job Router Routing Table ClassAd attribute!Eval\_Set\_<ATTR>}
\item[Eval\_Set\_<ATTR>] Defines an expression.
The expression is evaluated, and the resulting value
sets the value of the routed copy's job ClassAd attribute \Attr{<ATTR>}.
Use this attribute to set a custom or local value,
especially for modifying an attribute which may have been already
specified in a default routing table.
\index{Job Router Routing Table ClassAd attribute!Copy\_<ATTR>}
\item[Copy\_<ATTR>] Defined with the name of a routed copy ClassAd
attribute. Copies the value of \Attr{<ATTR>} from the
original job ClassAd into the specified attribute named of the routed copy.
Useful to save the value of an
expression, before replacing it with something else that references the
original expression.
\index{Job Router Routing Table ClassAd attribute!Delete\_<ATTR>}
\item[Delete\_<ATTR>] Deletes \Attr{<ATTR>} from the routed copy
ClassAd. A value assigned to this attribute in the routing table
entry is ignored.
\index{Job Router Routing Table ClassAd attribute!EditJobInPlace}
\item[EditJobInPlace] A boolean expression that, when \Expr{True},
causes the original job to be transformed in place rather than creating a new
transformed version (a routed copy) of the job.
In this mode, the Job Router Hook
\Macro{<Keyword>\_HOOK\_TRANSLATE\_JOB} and transformation
rules in the routing table are applied during the job
transformation. The routing table attribute \Attr{GridResource} is
ignored, and there is no default transformation of the job from a
vanilla job to a grid universe job as there is otherwise. Once
transformed, the job is still a candidate for matching routing rules,
so it is up to the routing logic to control whether the job may be
transformed multiple times or not. For example, to transform the job
only once, an attribute could be set in the job ClassAd to prevent it from
matching the same routing rule in the future. To transform the job
multiple times with limited frequency, a timestamp could be inserted
into the job ClassAd marking the time of the last transformation, and the
routing entry could require that this timestamp either be undefined
or older than some limit.
\end{description}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{\label{JobRouterReSSExample}Example: constructing the routing table from ReSS}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The Open Science Grid has a service called ReSS (Resource Selection
Service). It presents grid sites as ClassAds in an HTCondor collector.
This example builds a routing table from the site ClassAds in the ReSS
collector.
Using \Macro{JOB\_ROUTER\_ENTRIES\_CMD}, we tell the \Condor{job\_router} daemon to call a
simple script which queries the collector and outputs a routing table.
The script, called \verb|osg_ress_routing_table.sh|, is just this:
\footnotesize
\begin{verbatim}
#!/bin/sh
# you _MUST_ change this:
export condor_status=/path/to/condor_status
# if no command line arguments specify -pool, use this:
export _CONDOR_COLLECTOR_HOST=osg-ress-1.fnal.gov
$condor_status -format '[ ' BeginAd \
-format 'GridResource = "gt2 %s"; ' GlueCEInfoContactString \
-format ']\n' EndAd "$@" | uniq
\end{verbatim}
\normalsize
Save this script to a file and make sure the permissions on the file
mark it as executable. Test this script by calling it by hand before
trying to use it with the \Condor{job\_router} daemon. You may supply additional arguments
such as \Opt{-constraint} to limit the sites which are returned.
Once you are satisfied that the routing table constructed by the
script is what you want, configure the \Condor{job\_router} daemon to use it:
\footnotesize
\begin{verbatim}
# command to build the routing table
JOB_ROUTER_ENTRIES_CMD = /path/to/osg_ress_routing_table.sh <extra arguments>
# how often to rebuild the routing table:
JOB_ROUTER_ENTRIES_REFRESH = 3600
\end{verbatim}
\normalsize
Using the example configuration, use the
above settings to replace \Macro{JOB\_ROUTER\_ENTRIES}. Or,
leave \Macro{JOB\_ROUTER\_ENTRIES} there and have a routing table
containing entries from both sources. When you restart or reconfigure
the \Condor{job\_router} daemon,
you should see messages in the Job Router's log indicating that it
is adding more routes to the table.
| {
"alphanum_fraction": 0.7501445206,
"avg_line_length": 42.1916376307,
"ext": "tex",
"hexsha": "8f92f5a29c8c6a747bc7dd99f0c83800c9eff284",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2020-07-14T20:20:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-11-09T01:42:58.000Z",
"max_forks_repo_head_hexsha": "9e00a5874cc2579f5fdc81bb778f540b40b48c87",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "pavlo-svirin/htcondor",
"max_forks_repo_path": "doc/grids/job_router.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9e00a5874cc2579f5fdc81bb778f540b40b48c87",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "pavlo-svirin/htcondor",
"max_issues_repo_path": "doc/grids/job_router.tex",
"max_line_length": 91,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "9e00a5874cc2579f5fdc81bb778f540b40b48c87",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "pavlo-svirin/htcondor",
"max_stars_repo_path": "doc/grids/job_router.tex",
"max_stars_repo_stars_event_max_datetime": "2015-05-22T16:26:34.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-05-22T16:26:34.000Z",
"num_tokens": 5551,
"size": 24218
} |
%%==============
%%==============
\section{Optimization study}
This section describes the changes that have to be included in \texttt{main} and \texttt{data} files for running optimization studies. An additional parameter file is required in this case:
\begin{itemize}
\item opt.prm (or opt.xml): This file is used ONLY if in the main file section \texttt{Simulator}, subsection \texttt{Analysis type} is set to \texttt{Optimization}. The file is used to specify the required information to run an optimization simulation. Two optimization examples for cathode electrode are shown in the example folder.
\end{itemize}
In case that the user wants to run an optimization study using the GUI, the file opt.xml should be also loaded.
%-----
\subsection{Graphical user interface}
A project for an optimization study using the OpenFCST GUI is made of three files:
\begin{itemize}
\item \texttt{main.xml} contains the main selections for OpenFCST, such as the type of application, nonlinear solver, and type of study to be performed, i.e. one analysis run or a parametric analysis run.
\item \texttt{data.xml} contains the parameters to setup the simulation for the selected application.
\item \texttt{opt.xml} is an optional file used to setup parameters for optimization.
\end{itemize}
The following steps should be followed in case the user is creating a new project or opening a new one:
\begin{enumerate}
\item Starting a new project: After the data file has been generated, an optimization file may be also generated, depending on if you have compiled OpenFCST with Dakota.
\item Opening an Existing Project: After specifying the location of the data file, you may load the optimization parameter file.
\end{enumerate}
In case the user wants to edit the GUI's \textbf{settings.ini} file, the following parameter can be also modified:
\begin{itemize}
\item \textcolor{grey}{\textbf{optFileName:}} The name of the optimization file which OpenFCST will create when generating a new Project.
\end{itemize}
%-----
\subsection{The \texttt{main} file}
In the \texttt{Simulator} section of the \texttt{main} file, the parameter \texttt{Analysis type} should be set to \texttt{Optimization} when an optimization study is to be performed. The file that includes all optimization parameters should be included in subsection \texttt{Optimization}. There are only two entries in this subsection:
\begin{itemize}
\item \texttt{optimization parameter file name}: Enter here the name of the optimization file. By default, if using the GUI, opt.xml should be used.
\item \texttt{Dakota direct}: Set to true if you would like OpenFCST to directly interact with Dakota, i.e. OpenFCST will call Dakota as needed to run the optimization simulation. If set to false, then OpenFCST will run once and output a file that Dakota can read. In this case Dakota would be the optimization driver.
\end{itemize}
%
%
% \subsection{Parameter/Optimization Application File}
%
%
% \item
% \texttt{opt\_app\_parametric\_default.prm}
%
% The \texttt{opt\_app\_parametric\_default.prm} is used when carrying out parametric studies.
%
%
%
% \begin{lstlisting}
% ######################################################################
% #
% # This file is used to run a multi-dimensional parametric study.
% # See end of file for list of possible design variables.
% #
% ######################################################################
%
% subsection Optimization Parameters
%
% #### NOTE THAT THIS SECTION ONLY EXISTS WHEN RUNNING IN OPTIMIZATION MODE ###
% ####----------------------------------------------------------------------###
% subsection Optimization Program Options
% set Use dakota input file = false # (default) false
% set Dakota_Input_File = dakota_input.in # not needed if -Use dakota input file = false-
%
% set Optimization method = multidim_parameter_study # multidim_parameter_study | optpp_q_newton | nl2sol | ncsu_direct
% end
%
% subsection Design Variables
% set num_design_variables = 1 # 2
% set DV_0_name = V_cell # P_cell
% set DV_1_name = T_cell # P_c | RH_a
% set DV_2_name = prc_Pt_c # RH_c | prc_Pt_c
%
% ####### Lower Bound #######
% ####### lb < -1e30 for -inf #######
% #---------------------------------#
% set DV_0_lb = -1.1 # V # Changed to -1.1, force dekota to start at -1.1
% set DV_1_lb = 303 # K #
% set DV_2_lb = 0.2 # % #
%
% ####### Upper Bound #######
% ####### ub > 1e30 for inf #######
% #-------------------------------#
% set DV_0_ub = -0.1 # V #
% set DV_1_ub = 353 # K #
% set DV_2_ub = 0.5 # % #
%
% ####### Parameter Study Partitions #######
% ### NOTE: Evaluated at n+1 points between lower and upper bound ###
% ###-------------------------------------------------------------###
% set DV_0_partition = 50
% set DV_1_partition = 8
% set DV_2_partition = 10
% end
%
% subsection Responses
% set num_objectives = 1
% set num_nl_constraints = 0 # (default) 0
% set num_eq_constraints = 0 # (default) 0
%
% set RESP_0_name = current
% end
% end
% \end{lstlisting}
%
%
%
%
% Located at the bottom of all \texttt{opt\_app} files in both parametric \& optimization is a list of design variables available for the user to carry out a parametric studies or optimization. As of \textbf{1-SEP-2013} the following table lists the current parameters that can be passed to DAKOTA for parametric studies/ optimization.
%
% If the user requires additional variables for parametric/optimization studies, modification of the \\ \texttt{dakota\_application.cc} file should be carried out.
%
% \bigskip
%
% \begin{lstlisting}
% ######### List of Possible Design Variable Names #########
% #########----------------------------------------#########
% # // Conventional_CL.cc
% # V_Pt_c | V_Pt_a // Platinum loading per unit volume [mg/cm3] (Cathode | Anode)
% # prc_Pt_c | prc_Pt_a // Platinum loading on support [%wt] (Cathode | Anode)
% # prc_N_c | prc_N_a // Electrolyte loading [%wt] (Cathode | Anode)
% # Av_c | Av_a // Active area [cm^2/cm^3] (Cathode | Anode)
%
% # // Agglomerate_CL.cc
% # r_agg_c | r_agg_a // Radius of the agglomerate [nm] (Cathode | Anode)
% # r_agg // Radius of the agglomerate [nm] **possibly redundant**
% # epsilon_agg_c | epsilon_agg_a // Agglomerate porosity (Cathode | Anode)
% # epsilon_agg // Agglomerate porosity **possibly redundant**
%
% # // Operating_Conditions.cc
% # V_cell // Cell Voltage
% # T_cell // Cell Temperature
% # dV_a // Voltage drop in the Anode
% # P_c | P_a // Pressure (Cathode | Anode)
% #
% # RH_c | RH_a // Relative Humidity (Cathode | Anode)
% # OCV // Open Circuit Voltage
%
% # // Geometries.cc
% # L_CCL | L_ACL // CL thickness (Cathode | Anode)
% # L_CGDL | L_AGDL // GDL thickness (Cathode | Anode)
% # L_CMPL | L_AMPL // MPL thickness (Cathode | Anode)
% # Ch_width // Channel Width (Cathode | Anode)
%
% \end{lstlisting}
%
%
%
% \paragraph{Optimization Program Options:}
%
% The \texttt{Optimization Program Options} of the \texttt{opt\_app} parametric file is responsible for telling OpenFCST whether it is required to formulate its own \texttt{dakota\_input.in} file or if you are supplying DAKOTA with a predefined input file (\textit{line 13 \& 14}).
%
%
% \paragraph{``Use dakota input file'' \& ``Dakota\_Input\_File'':}
%
% If \texttt{Use dakota input file} is set to \texttt{false} then OpenFCST will pass on the information specified in the \texttt{opt\_app\_parametric\_default.prm} file and DAKOTA will print out a new \texttt{dakota\_input.in} at run time. If however it is set to \texttt{true} we are telling OpenFCST that we have already specified an input file and that DAKOTA should use this directly rather than reading the information from the rest of the \texttt{opt\_app} file.
%
% \paragraph{Note:}
%
% For completeness \texttt{``Use dakota input file'' \& ``Dakota\_Input\_File''} have been included in the default parametric file, however, when the user is not using their own \texttt{dakota\_input.in} file both line 13 \& 14 can be deleted.
%
% Given that in most cases the user specifies all the parametric \& optimization information in the \texttt{opt\_app} file. The following descriptions will be relevant for cases when \texttt{Use dakota input file = false}.
%
% \paragraph{Optimization Method:}
%
% The \texttt{Optimization method} command is used to specify the type of study that is being carried out (optimization, parametric study, least squares fit, ...) for additional information on \texttt{Optimization methods} see section \ref{Optimization_using_FCST}. In our case we are looking to carry out a parametric study so the \texttt{multidim\_parameter\_study} should be specified.
%
% \paragraph{Design Variables:}
% In the design variables section (\textit{line 19-45}) we specify the number of design variables that we want to change (\textit{line 20}), the upper and lower bounds for that variable (\textit{line 25-37}), and the number of points that we want to evaluate between the upper and lower bounds (\textit{line 39-45}).
%
%
% \paragraph{num\_design\_variables:}
% In the example above we have specified one design variable \texttt{V\_cell} for a single parametric study. The corresponding upper, lower bounds, and partitions can be found at line 28, 35, and 42.
%
% If the user wants to conduct a multi-dimensional parametric study we would simply change \texttt{num\_design\_variables} value from one to whatever number of variables required. In the example about we have the capabilities of increasing the number of variables to three. If the user requires more variables than this the user can simply add additional \texttt{DV\_\#\_name} and the corresponding upper, lower bound and partitions.
%
% \paragraph{Note:} The upper and lower bound of the voltage have been set to negative. This is because DAKOTA will vary its parameters from the lowest value to the highest value (In the non-negative case this is from 0.1 - 1.1 [V]).
%
% During the solving process OpenFCST uses the last mesh data and node values as the initial starting point for the next point evaluation. As the function evaluations become more difficult as we enter the mass transport region (\texttt{V\_cell of 0.3 - 0.1}) the time taken to evaluate these points is much longer. If we change the voltage values to their negative the parametric study will go from 1.1 to 0.1 [V], this in turn decreases the solving time and allows the solver to use the previous values as appose to starting at the 0.1 [V] (the most difficult case).
%
% Additional advantages as well as reduced time is that in some cases if the solver begins at lower voltages (e.g. 0.1 [V]) the solver is unable to to converge due to the low oxygen values however if the solver starts at the 'easier case' (high voltages 1.1 - 0.8 [V]) it will carry on the previous solutions and be able to converge at the lower voltages.
%
%
%
% \paragraph{Responses:}
% The response section of the \texttt{opt\_app} parametric file, specifies the number of outputs desired in the \texttt{dakota\_tabular.dat} data file, in our case there is \textsc{only one} objective value (\textit{Current Density [$A/cm^2$]}) line 52.
%
% It also is responsibly for specifying the type and number of constraints. There are two types of constraints; Equality (\textit{line 49}) and Inequality (\textit{line 50}), in general we do not typically use constraints in parametric studies so this section will be covered in more detail in the \textbf{optimization section}.
%
%
%
% % Closing file numbering (3 main files)
% %-----------------------
% \end{enumerate}
%
%
%-----------------------------------------------------------------------------------------------------
%--------------------- Running OpenFCST
%--------------------- Optimization
%-----------------------------------------------------------------------------------------------------
%
%
% \section{Optimization using OpenFCST} \label{Optimization_using_FCST}
% When running an Optimization study the user requires three files, as seen above with parametric studies. The only difference however is that we change out(\textit{alter}) the third file to an optimization file/format.
%
% \texttt{opt\_app\_optimization\_default.prm}
%
% \begin{lstlisting}
% ######################################################################
% #
% # This file is used to run the optimization interface.
% # See end of file for a list of optimization variables.
% #
% ######################################################################
%
% subsection Optimization Parameters
%
% #### NOTE THAT THIS SECTION ONLY EXISTS WHEN RUNNING IN OPTIMIZATION MODE ###
% ####----------------------------------------------------------------------###
% subsection Optimization Program Options
% set Use dakota input file = false # (default) false
% set Dakota_Input_File = dakota_input.in
%
% set Optimization strategy = single_method # single_method | multi_start | pareto_set | hybrid
% set Optimization method = optpp_q_newton # (default) optpp_q_newton | nl2sol | ncsu_direct
%
%
% ######### Method Independent Parameters #########
% #########-------------------------------#########
% set Maximum iterations = 200 # (default) 100
% set Maximum function evaluations = 2000 # (default) 1000
% set Constraint tolerance = 1.0e-4 # (default) 1.0e-4
% set Convergence tolerance = 1.0e-4 # (default) 1.0e-4
%
% ######### Numerical Gradient Parameter #########
% #########------------------------------#########
% set Numerical gradients = true # (default) false | true
% set Numerical gradient type = central # (default) forward | central
%
%
% ######### Method Specific Parameters #########
% ######### OPT++ #########
% #########----------------------------#########
% subsection OPT++
% set Gradient tolerance = 1.0e-4 # (default) 1.0e-4
% set Steplength to boundary = 0.2 # (default) 0.9
% set Centering parameter = 0.8 # (default) 0.2
% set Merit function = argaez_tapia # (default) argaez_tapia
% end
% end
%
% subsection Design Variables
% set num_design_variables = 1
% set DV_0_name = L_CCL
% set DV_1_name = prc_N_c
%
% ####### Initial Point #######
% #######---------------#######
% set DV_0_ip = 1.65e-4
% set DV_1_ip = 0.30
%
% ####### Lower Bound #######
% ####### lb < -1e30 for -inf #######
% #----------------------------------#
% set DV_0_lb = 0.8e-4
% set DV_1_lb = 0.20
%
% ####### Upper Bound #######
% ####### ub > 1e30 for inf #######
% #-------------------------------#
% set DV_0_ub = 10e-4
% set DV_1_ub = 0.50
%
% ####### Scales #######
% #######--------#######
% set DV_0_scale_method = value # none | auto | value | log
% set DV_1_scale_method = value # none | auto | value | log
%
% set DV_0_scale = 1e-4
% set DV_1_scale = 0.1
%
% ####### Step size #######
% #######-----------#######
% set DV_0_step = 1e-5
% set DV_1_step = 1e-4
%
% end
%
% subsection Responses
% set num_objectives = 1
% set num_nl_constraints = 3
% set num_eq_constraints = 0
%
% set RESP_0_name = current
% set RESP_1_name = m_Pt_c
% set RESP_2_name = epsilon_V_cat_c
% set RESP_3_name = epsilon_N_cat_c
% set RESP_4_name = epsilon_S_cat_c
% set RESP_5_name = L_CCL
%
% ####### Response Numbers must match #######
% ####### Constraint Lower Bound #######
% ####### lb < -1e30 for -inf #######
% #######-----------------------------#######
% set RESP_2_lb = 0.118
% set RESP_3_lb = 0.118
% set RESP_4_lb = 0.118
% set RESP_5_lb = 0.8e-4 # (ESDLab, Ultra-thin CCM, = 2 microns) 2e-4
%
% ####### Constraint Upper Bound #######
% ####### ub > 1e30 for inf #######
% #######------------------------#######
% set RESP_2_ub = 1.0
% set RESP_3_ub = 1.0
% set RESP_4_ub = 1.0
% set RESP_5_ub = 2e-4 # (ESDLab, Ultra-thin CCM, = 2 microns) 2e-4
%
% ####### Equality Constraint #######
% #######---------------------#######
% set RESP_1_eq = 350
% end
% end
% \end{lstlisting}
%
% \bigskip
%
%
% In the above example of a \texttt{opt\_app\_optimization} file we will note that many of the variables have been seen earlier in the \texttt{opt\_app\_parametric} file. These next sections will look at describing the additional changes and variables applicable to optimization in OpenFCST.
%
% \paragraph{Optimization Method:}
%
% The \texttt{Optimization method} command is used to specify the type of study that is being carried out. There area
%
%
% \begin{enumerate}
% \item
% \texttt{single\_method}
%
% The \texttt{single\_method} is selected when the user is running parametric studies or optimization where they require only one optimization method.
%
% \item
% \texttt{multi\_start}
%
% The \texttt{multi\_start} method will restart the optimization multiple times specified by the user.
%
% \item
% \texttt{pareto\_set}
%
% The \texttt{pareto\_set} method is only utilize during multi-objective optimization (\ref{sec:multi_objective_optimization}).
%
% \item
% \texttt{hybrid}
%
% The \texttt{hybrid} method uses additional optimization methods. An example of this would be to use a global method to locate an area in the entire feasible region. Then once a sufficient criteria has been met the optimization method will be changed to a local method in order to take advantages of the high convergence rate.
%
% \end{enumerate}
%
%
%
% \paragraph{Optimization Program Options:}
%
% The \texttt{Optimization Program Options} consist of the same variables as seen in \texttt{opt\_app\_parametric} file however we also notice three additional Classifications:
%
%
% \begin{enumerate}
% \item
% Method Independent Parameters
% \item
% Numerical Gradient Parameters
% \item
% Method Specific Parameters
% \end{enumerate}
%
% \paragraph{Method Independent Parameters:}
%
% Consists of parameters that have no dependencies on the type of optimization method being used. This section tells OpenFCST the maximum number of iterations \& function evaluates (\textit{line 22 \& 23})that can be carried out during optimization.
%
% It also sets how strictly the method sticks to the constraints and the tolerance needed for convergence (\textit{line 24 \& 25}).
%
% \paragraph{Note:}
%
% Depending on the optimization problem, sometimes convergence issues can arise. One way to alleviate this issue is to relax the \texttt{Convergence tolerance} from the default $1.0e^{-4}$ to maybe $1.0e^{-3}$.
%
% The same idea can be applied to the \texttt{Constraint tolerance} depending on how heavily constrained the problem is.
%
%
% \paragraph{Numerical Gradient Parameters:}
%
% Here is where we specify the type of gradient method we want to employ.
%
% \begin{enumerate}
% \item
% Numerical Gradients, as seen in the example (\textit{line 30})
% \item
% Analytical Gradients
% \end{enumerate}
%
% When using numerical gradients we also have an additional specification on whether we want to use \textit{Forward} or \textit{Central} differentiation (\textit{line 31}).
%
%
%
% As we can see from figure \ref{forward_vs_central} using central differentiation is a much more accurate form of predicting the slop of a function. Having said this we must also take note of equations \ref{eq:Forward} \& \ref{eq:Central}. In equation \ref{eq:Central} we can see that we have doubled the function evaluations which in turn doubles the amount of time required to carry out the analysis.
%
% In some cases when carrying out function evaluations they will be highly expensive or in some cases convergence can be an issue. In these cases although not ideal it is preferable to use \textit{Forward} differentiation
%
%
% \begin{enumerate}
% \item
% \textbf{Forward}
%
% \begin{equation} \label{eq:Forward}
% \frac{\bigtriangleup f}{\bigtriangleup x} = \frac{ f(x + \bigtriangleup x) - f(x)}{\bigtriangleup x} \end{equation}
%
% \item
% \textbf{Central}
%
%
% \begin{equation} \label{eq:Central}
% \frac{\bigtriangleup f}{\bigtriangleup x} = \frac{ f(x + \frac{\bigtriangleup x}{2}) - f(x - \frac{\bigtriangleup x}{2})}{\bigtriangleup x}
% \end{equation}
% \end{enumerate}
%
% \FloatBarrier
% \begin{figure}[htbp]
% \begin{center}
% \includegraphics[width=0.45\textwidth]{figures/forward_vs_central_differentiation.png}
% \caption{Comparison of Forward, Backward, \& Central Differentiation}
% \label{forward_vs_central}
% \end{center}
% \end{figure}
% \FloatBarrier
%
%
%
% \paragraph{Method Specific Parameters:}
%
% This section is specific to the method being used. In the above example it is specific to the $OPT++$ library. An additional example has been given below however if curious the reader is advised to see the default optimization methods located in:
%
% \bigskip
%
% \begin{lstlisting}
% $./data/cathode/optimization/optimization_methods_cathode/
% \end{lstlisting}
% or
% \begin{lstlisting}
% $./data/mea/optimisation/optimization_methods_mea/
% \end{lstlisting}
%
% \bigskip
%
% In the following short example we are using a method from the SCOLIB library, the \texttt{coliny\_pattern\_search} algorithm. In this case we would change the \texttt{Optimization method = coliny\_pattern\_search} as appose to \texttt{optpp\_q\_newton} (\textit{line 17}).
%
% We then would then replace (\textit{line 33 - 41}) in the above \texttt{opt\_app\_optimization} file with the new method specific section.
%
% \begin{lstlisting}
% ######### Method Specific Parameters #########
% ######### SCOLIB (COLINY) #########
% #########----------------------------#########
% subsection coliny_pattern_search
% set Initial Delta = 2 # (default) 1
% set Threshold Delta = 0.0001 # (default) 0.0001
% end
% \end{lstlisting}
%
%
%
% \paragraph{Design Variables Section:}
%
% The \texttt{Design Variables} Section is similar to the \texttt{opt\_app\_parametric} file except for two additional subsections.
%
% \begin{enumerate}
% \item
% Scales
%
% \item
% Step size
% \end{enumerate}
%
%
% \paragraph{Scales:}
%
% The scales section has two specifications
%
% \begin{enumerate}
% \item
% \texttt{scale\_method}
%
% The \texttt{scale\_method} specifies whether you are going to specify no scale (\texttt{none}), \texttt{auto} scaling, \texttt{log}, or a \texttt{value}. In general it is good practice to specify a scale \texttt{value} as it allows the user to have a definite reference point, when using \texttt{auto} if there is a change in magnitude it will go unnoticed by the user in the final output solution.
%
% \item
% \texttt{scale} value
%
% The scale value is the magnitude of the variable. For example if the variable is temperature we know that the scale is 100 as temperature is given in Kelvin (353 - 368 [K]). If its Nafion loading the scale is 0.1 as Nafion loading is a percentage (20 - 50 \%).
% \end{enumerate}
%
%
%
% \paragraph{Step Size:}
%
% The step size refers to the $\bigtriangleup x $ in equations \ref{eq:Forward} \& \ref{eq:Central}. Greater the step size the less computations that will be required, however this also means the greatest error as the error is proportional to $(\bigtriangleup x)^2$. Therefore there is a fine trade off between computational time and error.
%
%
%
% \paragraph{Responses Section:}
%
% The Responses section has changes slightly compared to the \texttt{opt\_app\_parametric} file as we are now considering constrained optimization. If the above example was unconstrained optimization there would be no difference between the \texttt{opt\_app\_parametric} and \texttt{opt\_app\_optimization} responses section.
%
% There are two types of constraints:
%
% \begin{enumerate}
% \item
% Linear (Equality) Constraints (\textit{line 83})
%
% \item
% Non-Linear Constraints (\textit{line 84})
% \end{enumerate}
%
% In the above case we have three non-linear constrains and one linear constraint.
%
%
%
% \paragraph{Non-Linear Constraints:}
%
% Like the \texttt{Design Variable} section each nonlinear constraint requires a upper and lower bound (\textit{line 97 - 108}). If no finite upper or lower bound is to be specified $1e^{30}$ or $1e^{-30}$ can be specified.
%
%
% \paragraph{Linear (Equality) Constraints:} Unlike Non-Linear Constraints, Equality constraints only require the response variable to equal a value (\textit{line 112}).
%
%
%
%
%
% %-----------------------------------------------------------------------------------------------------
% %--------------------- Running FCST
% %--------------------- Multi-Objective Optimization
% %-----------------------------------------------------------------------------------------------------
% \section{Multi-Objective Optimization using OpenFCST} \label{sec:multi_objective_optimization}
%
% To achieve multi-objective optimization we must first change three parameters.
%
% \begin{enumerate}
% \item
% \texttt{Optimization strategy} (\textit{line 16})
%
% \item
% \texttt{num\_design\_variables} (\textit{line 84})
%
% \item
% \texttt{num\_objectives} (\textit{line 82})
%
% \end{enumerate}
%
%
%
% \paragraph{Optimization strategy}
%
% When carrying out multi-objective optimization we can no longer optimize for just one objective function this is especially the case when an improvement of one objective comes at the expense of another (\textit{Performance \& Cost}). In order account for the additional objective function we incorporate weighting factors which specifies the importance of one objective over the other. The weights are referred to a \textbf{Pareto Weights} or \textbf{Pareto Set}.
%
% In OpenFCST the default Pareto set is for two design variables. Figure \ref{pareto_set} below shows the multi-objective weights for two design variables.
%
%
%
% \FloatBarrier
% \begin{figure}[htbp]
% \begin{center}
% \includegraphics[width=0.4\textwidth]{figures/pareto_set.png}
% \caption{Pareto Set for 2 Design Variables}
% \label{pareto_set}
% \end{center}
% \end{figure}
% \FloatBarrier
%
%
% % \begin{center}
% %
% %
% % \begin{tabular}{lll}
% % Pareto Set \# &Obj\_Function\_1 & Obj\_Function\_2 \\
% % 0 & 1.0 & 0.0 \\
% % 1 & 0.9 & 0.1 \\
% % 2 & 0.8 & 0.2 \\
% % 3 & 0.7 & 0.3 \\
% % 4 & 0.6 & 0.4 \\
% % 5 & 0.5 & 0.5 \\
% % 6 & 0.4 & 0.6 \\
% % 7 & 0.3 & 0.7 \\
% % 8 & 0.2 & 0.8 \\
% % 9 & 0.1 & 0.9 \\
% % 10 & 0.0 & 1.0 \\
% % \end{tabular}
% %
% % \end{center}
%
%
%
% \paragraph{num\_design\_variables \& num\_objectives:}
%
% Once the \texttt{Optimization strategy} has been set to \texttt{pareto\_set} we then change both the num\_design\_variables \& num\_objectives to 2 or whatever number of design variables are specified.
% At present OpenFCST like most multi-objective engineering problems, considers only two design variables however this can be easily modified by changing the default Pareto set found in \texttt{dakota\_application.cc}.
%
% \section{DAKOTA Methods} \label{dakota_methods}
%
% The following list is all of the current DAKOTA Methods available as of \textbf{1-MAY-2013}. The methods are known to work with OpenFCST and can be utilized. For detailed discriptions on the individual methods see DAKOTA manuals.
%
% \begin{multicols}{3}
% \begin{enumerate}
% \item \textbf{asynch\_pattern\_search}
% \item bayes\_calibration
% \item centered\_parameter\_study
% \item \textbf{coliny\_cobyla}
% \item \textbf{coliny\_direct}
% \item \textbf{coliny\_ea}
% \item \textbf{coliny\_pattern\_search}
% \item \textbf{coliny\_solis\_wets}
% \item \textbf{conmin\_frcg}
% \item \textbf{conmin\_mfd}
% \item dace
% \item dl\_solver
% \item dot
% \item dot\_bfgs
% \item dot\_frcg
% \item dot\_mmfd
% \item dot\_slp
% \item dot\_sqp
% \item \textbf{efficient\_global}
% \item fsu\_cvt
% \item fsu\_quasi\_mc
% \item global\_evidence
% \item global\_interval\_est
% \item global\_reliability
% \item importance\_sampling
% \item list\_parameter\_study
% \item local\_evidence
% \item local\_interval\_est
% \item local\_reliability
% \item \textbf{moga}
% \item \textbf{multidim\_parameter\_study}
% \item \textbf{ncsu\_direct}
% \item \textbf{nl2sol}
% \item nlpql\_sqp
% \item nlssol\_sqp
% \item nonlinear\_cg
% \item npsol\_sqp
% \item optpp\_cg
% \item \textbf{optpp\_fd\_newton}
% \item \textbf{optpp\_g\_newton}
% \item optpp\_newton % Needs the Hessian Matrix
% \item \textbf{optpp\_pds}
% \item \textbf{optpp\_q\_newton}
% \item polynomial\_chaos
% \item \textbf{psuade\_moat}
% \item richardson\_extrap
% \item sampling
% \item \textbf{soga}
% \item stanford
% \item stoch\_collocation
% \item surrogate\_based\_global
% \item surrogate\_based\_local
% \item vector\_parameter\_study
% \end{enumerate}
% \end{multicols}
%
%
%
%
%
%
%
% %-----------------------------------------------------------------------------------------------------
% %--------------------- Running OpenFCST
% %--------------------- Optimization Path-line
% %-----------------------------------------------------------------------------------------------------
% \section{Fuel Cell Design \& Optimization Using OpenFCST}
% As we've seen above OpenFCST also has the capabilities to perform optimization studies. Any application that is inherited from \texttt{OptimizationBlockMatrixApplication} has the appropriate interface to be used for optimization studies. Information on how to run optimization can be found in sections \ref{Optimization_using_FCST} \& \ref{sec:multi_objective_optimization}.
%
% To perform optimization studies, OpenFCST interfaces with the open source libraries DAKOTA developed by Sandia National Laboratory. For more information about the DAKOTA library please \href{http://dakota.sandia.gov/software.html}{click here}. The OpenFCST developers have developed an interface so that DAKOTA and OpenFCST can interact seamlessly.
%
% \subsection{OpenFCST classes that interact with DAKOTA (Developers Only)}
% Interaction between OpenFCST and DAKOTA is achieved by using \texttt{simulation\_builder} which will call the \texttt{run\_optimization()} function.
%
% When OpenFCST is run as seen in figure \ref{analysis_schematic} the OpenFCST code is called on once in order to run a specific data point. However in parametric or optimization studies we require multiple points to be evaluated. This requires the use of the DAKOTA libraries in order to change the variables after each iteration. The two main files used to interface with DAKOTA are:
%
%
% \begin{enumerate}
% \item
% \texttt{dakota\_direct\_interface}
%
% \item
% \texttt{dakota\_application}
%
% \end{enumerate}
%
% Once the initial stages of the code have been carried out by \texttt{simulator\_builder}, \texttt{simulator\_selector}, and \texttt{dakota\_application}, declaring and initialing all the variables from the \texttt{main\_app\_}, \texttt{data\_app\_}, \& \texttt{opt\_app\_} files. The \texttt{main.cc} file then proceeds to the \texttt{run()} function in \texttt{simulator\_builder.cc} (see below) in order to run the simulation.
%
% In the \texttt{run()} function we can see in line 10 where the code checks to see if its running an analysis or parametric/optimization study, as explained in \ref{main_application_file}. During parametric/optimization studies the code will enter line 12 and proceed to the \texttt{run\_optimization()} function in \texttt{simulator\_builder.cc}.
%
%
% \begin{lstlisting}
% template<int dim>
% void SimulatorBuilder<dim>::run()
% {
% timer.restart();
%
% if (run_tests) run_test();
% else
% {
% if (dakota_use || dakota_direct)
% {
% run_optimization();
% }
% else
% {
% //-- Select the application you want to run:
% app_lin = sim_selector->select_application();
% //-- Select the solver you want to run:
% newton = sim_selector->select_solver(app_lin.get());
% //-- Select the solving method you want to run, e.g. adaptive refinement:
% solver = sim_selector->select_solver_method(app_lin.get(), newton.get());
% // Here we have collected all information:
% deallog << "Run program using input file: " << simulator_parameter_file_name << std::endl;
% deallog.pop();
% solver->solve(simulator_parameter_file_name, param);
% timer.stop();
% }
% }
%
% timer.stop();
% deallog.push("MAIN");
% deallog << "The program was executed in: " << timer.wall_time() << " seconds " << std::endl;
% deallog << "=============== END ====================" << std::endl;
% deallog.pop();
%
% }
% \end{lstlisting}
%
% \bigskip
%
% The main points to note once we enter the \texttt{run\_optimization()} function are:
%
% \begin{enumerate}
% \item
% Is DAKOTA running in Parallel or Series? (\textit{line 8})
%
% As of \textbf{1-MAY-2013} Series is the only option available. This may change in the future.
%
% \item
% Are we running a Non-Linear Least Squares (NLS) method or standard parametric/optimization routine? (\textit{line 16-25})
%
% \end{enumerate}
%
% \paragraph{Note:}
%
% These are questions that are answered in the \texttt{opt\_app\_} file explained earlier in section \ref{Optimization_using_FCST}.
%
%
% \bigskip
%
% Once these have been specified the code will execute the \texttt{run()} function (\textit{line 28}), which begins the iterative loop until the parametric study has been complete or the stopping criteria have been met in optimization.An illistration of this can be see in figure \ref{dakota_optimization_interface} taken from Peter Dobson's 2012 paper.
%
%
% \begin{lstlisting}
% void SimulatorBuilder<dim>::run_optimization()
% {
% deallog.pop();
% if (dakota_direct)
% {
% // NOTE: Must declare these in order for parameter handler to not complain when reading the parameter file specified.
% // Not exclusively required for dakota application to run.
% Dakota::ParallelLibrary parallel_lib;
% shared_ptr<Dakota::ProblemDescDB> problem_db(new Dakota::ProblemDescDB (parallel_lib));
% SIM::DakotaApplication optimization(problem_db, optimization_parameter_file_name);
% optimization.declare_parameters(param);
% optimization.manage_inputs(param);
%
% Dakota::DirectApplicInterface* optimization_interface;
%
% if (optimization.use_NLS())
% {
% deallog<<"Entering DakotaLeastSquaresInterface"<<std::endl;
% optimization_interface = new SIM::DakotaLeastSquaresInterface<dim> (optimization, problem_db, param, sim_selector, simulator_parameter_file_name);
% }
% else
% {
% deallog<<"Entering DakotaDirectInterface"<<std::endl;
% optimization_interface = new SIM::DakotaDirectInterface<dim > (optimization, problem_db, param, sim_selector, simulator_parameter_file_name);
% }
%
% optimization.assign_interface(optimization_interface);
% optimization.run();
%
% deallog << "Optimization completed" << std::endl;
% .
% .
% .
% \end{lstlisting}
%
%
% If running a standard parametric/optimization routine, the following \texttt{dakota\_direct\_interface} function will be used.
%
% \begin{lstlisting}
% template <int dim>
% int DakotaDirectInterface<dim>::derived_map_ac(const Dakota::String& ac_name)
% \end{lstlisting}
%
% If running a Non-Linear Least Squares (NLS) method, the following \texttt{dakota\_direct\_interface} function will be used.
%
% \begin{lstlisting}
% template <int dim>
% int DakotaLeastSquaresInterface<dim>::derived_map_ac(const Dakota::String& ac_name)
% \end{lstlisting}
%
%
% \FloatBarrier
% \begin{figure}[htbp]
% \begin{center}
% \includegraphics[width=1\textwidth]{figures/fcst_dakota_interface_Dobson.png}
% \caption{Schematic of Fuel Cell Analysis Code and DAKOTA Optimization Interface (Dobson, 2012)}
% \label{dakota_optimization_interface}
% \end{center}
% \end{figure}
% \FloatBarrier
%%===============================================================
%%=============================================================== | {
"alphanum_fraction": 0.6000098848,
"avg_line_length": 48.1738095238,
"ext": "tex",
"hexsha": "3d5de1dfcfeb2836ff66adb846422d073d403cea",
"lang": "TeX",
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2020-11-21T13:51:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-12-11T22:15:03.000Z",
"max_forks_repo_head_hexsha": "7e4404609bacd2ec796f6ca3ea118e8e34ab4a22",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jeremyjiezhou/Learn-PyTorch",
"max_forks_repo_path": "src/doc/RefGuide/LaTeX_files/optimization.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7e4404609bacd2ec796f6ca3ea118e8e34ab4a22",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jeremyjiezhou/Learn-PyTorch",
"max_issues_repo_path": "src/doc/RefGuide/LaTeX_files/optimization.tex",
"max_line_length": 569,
"max_stars_count": 24,
"max_stars_repo_head_hexsha": "7e4404609bacd2ec796f6ca3ea118e8e34ab4a22",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jeremyjiezhou/Learn-PyTorch",
"max_stars_repo_path": "src/doc/RefGuide/LaTeX_files/optimization.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-12T19:07:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-10-04T20:49:55.000Z",
"num_tokens": 10106,
"size": 40466
} |
% !TEX root = paper.tex
\section{Results}
\begin{figure*}[t!]
\centering
\includegraphics[width=0.32\linewidth]{./figures/benchmark/voi-Kasthuri-Luigi-Test.pdf}
\includegraphics[width=0.32\linewidth]{./figures/benchmark/voi-FlyEM-Vol--1.pdf}
\includegraphics[width=0.32\linewidth]{./figures/benchmark/voi-FlyEM-Vol--2.pdf}
\caption{Segmentation benchmark results on three volumes. We compare our method (red) to the baseline segmentation (green) and an oracle (blue) that optimally partitions our constructed graph from our method. Lower VI scores are better. Our method improves the segmentation accuracy over the baseline in all cases.
Note that our model is only trained on the Kasthuri training volume and it generalizes well to the FlyEM dataset.}
\label{fig:variation-of-information}
\end{figure*}
In Fig.~\ref{fig:variation-of-information}, we show the VI results of the pixel-based reconstructions of the Kasthuri and FlyEM data (Sec.~\ref{sec:dataset}) for varying thresholds of agglomeration (green).
We use one of these segmentations (green circle) as our input dataset with an agglomeration threshold of 0.3 for all datasets.
The results from our method are shown in red for varying the $\beta$ parameter.
We show comparisons to an oracle (blue) that correctly partitions the graph from our method based on ground truth.
Scores closer to the origin are better for this metric, and in every instance our results are below the green curve.
We see improvements on each data with a reduction in total VI score of $10.4\%$ on the Kasthuri data and $8.9\%$ and $5.4\%$ on the FlyEM datasets.
Fig.~\ref{fig:qualitative-results} (left) shows successful merges on the Kasthuri dataset.
Several of these examples combine multiple consecutive segments that span the volume.
In the third example on the left we correct the over-segmentation of a dendrite and attached spine-necks.
Fig.~\ref{fig:qualitative-results} (right) shows typical failure cases of our method (red circles).
In two of these examples the algorithm correctly predicts several merges before a single error renders the segment as wrong.
In the third example (blue circle) a merge error in the initial segmentation propagates to our output.
We now analyze how each major component of our method contributes to this final result.
\begin{figure}[t]
\begin{minipage}{0.45\linewidth}
\centering
\includegraphics[width=0.85\linewidth]{./figures/VI-results/multicut-correct1.png}
\includegraphics[width=0.85\linewidth]{./figures/VI-results/multicut-correct2.png}
\includegraphics[width=0.85\linewidth]{./figures/VI-results/multicut-correct3.png}
\includegraphics[width=0.85\linewidth]{./figures/VI-results/multicut-correct4.png}
\includegraphics[width=0.85\linewidth]{./figures/VI-results/multicut-correct5.png}
\end{minipage}
\begin{minipage}{0.45\linewidth}
\centering
\includegraphics[width=0.85\linewidth]{./figures/VI-results/multicut-incorrect1.png}
\includegraphics[width=0.85\linewidth]{./figures/VI-results/multicut-incorrect2.png}
\includegraphics[width=0.85\linewidth]{./figures/VI-results/multicut-incorrect3.png}
\includegraphics[width=0.85\linewidth]{./figures/VI-results/multicut-incorrect4.png}
\end{minipage}
\caption{(left) Segments of neurons before they were correctly merged by our method. (right) Circles indicate areas of wrong merges by our method (red) or by the initial pixel-based segmentation (blue).}
\label{fig:qualitative-results}
\end{figure}
\subsection{Empirical Ablation Studies}
\noindent\textbf{Graph Generation}
Table \ref{table:skeletonization} shows the results of pruning the skeleton graph using the algorithm discussed in Sec.~\ref{sec:skeletonization}.
This edge pruning is essential for the graph partitioning algorithm, which has a non-linear computational complexity dependence on the number of edges.
The baseline algorithm considers all adjacent regions for merging.
Our method removes a significant portion of these candidates while maintaining a large number of the true merge locations (e.g., 764 compared to 974).
Our pruning heuristic removes at least $3.5\times$ the number of edges on all datasets, achieving a maximum removal rate of $4.15\times$.
However, there are some adjacent over-segmented labels which are not considered.
\begin{table}
\caption{The results of our graph pruning approach compared to the baseline graph with all adjacent regions. We show the number of true merge locations (e.g., 974) compared to total number of edges in the graph (e.g., 25,798) for each case. The number of missed splits corresponds to the number of split errors that our method misses compared to an adjacency matrix.}
\resizebox{\linewidth}{!}{
\begin{tabular}{c c c c c} \hline
\textbf{Dataset} & \textbf{Segment Adjacency} & \textbf{Skeleton Pruning} & \textbf{Missed Splits} & \textbf{Gained Edges} \\ \hline
Kasthuri & 974 / 25,798 & 764 / 6,218 & 307 & 97 \\
FlyEM Vol. 1 & 304 / 15,949 & 212 / 4,578 & 105 & 13 \\
FlyEM Vol. 2 & 298 / 17,614 & 197 / 4,366 & 120 & 19 \\ \hline
\end{tabular}
}
\centering
\label{table:skeletonization}
\end{table}
We generate edges in our graph by using information from the skeletons.
In particular, we do not enforce the constraint that edges in our graph correspond to adjacent segments.
Although neurons are continuous, the EM images often have noisy spots which cause an interruption in the input segmentation.
We still want to reconstruct these neurons despite the fact that the initial segmentation is non-continuous.
The second and fourth examples in Fig.~\ref{fig:qualitative-results} show correctly reconstructed neurons where two of the segments are non-adjacent.
\\~\\
\noindent\textbf{Failure Cases}
There are some pairs of segments which we do not consider for merging because of our reliance on the skeletons.
Fig.~\ref{fig:skeleton-results} shows two such cases with the closest endpoints circled.
In the right example the small segment is carved from the larger segment in a location where there are no skeleton endpoints.
There are on average 177 such examples in our datasets.
\\~\\
\noindent\textbf{Edge Weight Learning}
%\subsubsection{Inference Augmentation}
%\noindent\textbf{Inference Augmentation.}
Data augmentation at test time can improve accuracy results~\cite{lee2017superhuman,zeng2017deepem3d}.
When computing the probability to merge two segments, we randomly rotate and flip the examples three times (in the same fashion as training augmentation).
The supplemental material contains experiments showing the trade-offs between increased accuracy and runtime when using these augmentation strategies.
\begin{figure}[t!]
\centering
\begin{minipage}{0.45\linewidth}
\includegraphics[width=\linewidth]{./figures/merge_candidate1.png}
\end{minipage}
\hfill
\begin{minipage}{0.45\linewidth}
\includegraphics[width=\linewidth]{./figures/merge_candidate2.png}
\end{minipage}
\begin{minipage}{0.45\linewidth}
\includegraphics[width=\linewidth]{./figures/merge_candidate3.png}
\end{minipage}
\begin{minipage}{0.45\linewidth}
\includegraphics[width=\linewidth]{./figures/merge_candidate4.png}
\end{minipage}
\caption{The top two examples correspond to segment pairs that we incorrectly prune from the graph. The distance between the circled endpoints is too great. The bottom two examples show pairs of segments that belong to the same neuron but are not adjacent in the input segmentation. However, we correctly merge these pairs.}
\label{fig:skeleton-results}
\end{figure}
Fig.~\ref{fig:receiver-operating-characteristic} shows the receiver operating characteristic (ROC) curve of our CNN classifier for all test datasets.
As shown by the ROC curve, the test results on the FlyEM data are better than the results for Kasthuri.
In part this comes from the disparity in the number of positive to negative merge candidates in the two graphs.
The network easily classifies most of the negative examples leaving only a few difficult examples to predict.
Since there are more negative examples in relation to the positive examples in the FlyEM data, the ROC curve is greater.
\begin{figure}
\centering
\includegraphics[width=0.45\linewidth]{./figures/receiver-operating-characteristic.png}
\caption{The receiver operating characteristic (ROC) curves of our classifier on three connectomics datasets. The classifier works best on previously unseen data of the Kasthuri volume.}
\label{fig:receiver-operating-characteristic}
\end{figure}
%\subsubsection{Generalization to Other Datasets}
%\noindent\textbf{Generalization to Other Datasets.}
Our neural network does not take as input the images or affinities so we can transfer network weights from one dataset to the next.
The classification results are comparable on the FlyEM datasets despite the fact that the data varies in resolution and even animal type from the training set.
\\~\\
%\subsection{Graph Optimization Results}
\noindent\textbf{Graph Partition}
The graph optimization strategy using multicut increases accuracy over using just the CNN.
Table \ref{table:multicut} shows the changes in precision, recall, and accuracy for all four datasets compared to the CNN with both the multicut and lifted multicut formulations.
These results correspond to $\beta = 0.95$ (Sec.~\ref{sec:edge-weights}).
The precision increases on each dataset, although the recall decreases on each datasets.
Since it is more difficult to correct merge errors than split errors, it is often desirable to sacrifice recall for precision.
Over the three testing datasets, applying a graph-based partitioning strategy increased the precision by $31.9\%$, $40.9\%$, and $27.8\%$ respectively.
\begin{table}[h]
\caption{Precision, recall, and accuracy changes between CNN only and CNN paired with graph-optimized reconstructions for the training and three test datasets. The combined method results in better precision and accuracy. The lifted multicut extension provides very slight improvements in recall and accuracy over these three datasets.}
\centering
\resizebox{\linewidth}{!}{
\begin{tabular}{c c c c | c c c} \hline
& \multicolumn{3}{c}{\textbf{Multicut}} & \multicolumn{3}{c}{\textbf{Lifted Multicut}} \\ \hline
\textbf{Dataset} & $\Delta$ \textbf{Precision} & $\Delta$ \textbf{Recall} & $\Delta$ \textbf{Accuracy} & $\Delta$ \textbf{Precision} & $\Delta$ \textbf{Recall} & $\Delta$ \textbf{Accuracy} \\ \hline
Kasthuri & 31.94\% & -36.24\% & 0.71\% & -1.01\% & 0.60\% & 0.02\% \\
FlyEM Vol. 1 & 40.87\% & -42.37\% & 1.26\% & 0.35\% & 0.85\% & 0.04\% \\
FlyEM Vol. 2 & 27.80\% & -44.95\% & 0.33\% & 0.54\% & 0.92\% & 0.04\% \\ \hline
\end{tabular}
}
\label{table:multicut}
\end{table}
\subsection{Computational Performance}
%\subsubsection{System}
\noindent\textbf{System}
All performance experiments ran on an Intel Core i7-6800K CPU 3.40 GHz with a Titan X Pascal GPU. All code is written in Python and is freely available (link omitted for review). We use the Keras deep learning library for our neural networks with Theano backend and cuDNN 7 acceleration for CUDA 8.0.
Every step in our framework is fast enough for large connectomics datasets.
On our training data, after downsampling, each segment was skeletonized in 0.56 seconds on average.
There are 4451 such segments resulting in a total running time of 2,492 seconds on an 800 megavoxel volume.
From here, it took just over 31 seconds to generate the edges for our graph.
On our hardware, inference on the neural network has a throughput of 30 examples per second.
Lastly, the multicut algorithm ran for 25.28 seconds while the lifted multicut variant took 36.5 seconds on the graph from this dataset.
| {
"alphanum_fraction": 0.7768475011,
"avg_line_length": 70.5120481928,
"ext": "tex",
"hexsha": "e65ad56cdeb851e7ef8b1d94505b6b771c74aee7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "898134a96e299d8106d9deb7b217671c39bfeca2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "romil797/ibex",
"max_forks_repo_path": "papers/eccv2018/4_results.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "898134a96e299d8106d9deb7b217671c39bfeca2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "romil797/ibex",
"max_issues_repo_path": "papers/eccv2018/4_results.tex",
"max_line_length": 368,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "898134a96e299d8106d9deb7b217671c39bfeca2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "romil797/ibex",
"max_stars_repo_path": "papers/eccv2018/4_results.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3071,
"size": 11705
} |
\newpage
\section*{Utility libraries}
\ocwsection \label{lib}
This chapter describes the various utility libraries used in the code
of \Coq.
| {
"alphanum_fraction": 0.7832167832,
"avg_line_length": 17.875,
"ext": "tex",
"hexsha": "35bd15fa1735f1bcaae132c56229f34f5f52396a",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-03-28T04:39:41.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-01-19T17:56:28.000Z",
"max_forks_repo_head_hexsha": "c344e76263de04311af8a0030c07aec95d87f71c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "reichel3/TacTok",
"max_forks_repo_path": "coq/lib/doc.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "c344e76263de04311af8a0030c07aec95d87f71c",
"max_issues_repo_issues_event_max_datetime": "2021-11-13T00:07:38.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-02-23T03:03:51.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "reichel3/TacTok",
"max_issues_repo_path": "coq/lib/doc.tex",
"max_line_length": 69,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "c344e76263de04311af8a0030c07aec95d87f71c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "reichel3/TacTok",
"max_stars_repo_path": "coq/lib/doc.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-18T03:03:33.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-23T02:45:36.000Z",
"num_tokens": 38,
"size": 143
} |
What follows are instructions for interacting with the Galant graphical user
interface, primarily for creating, editing, saving and loading graphs and
animations and for executing animations.
\subsection{Overview}
Galant provides three major components across two windows:
\begin{enumerate}
\item
a text window that can serve two distinct purposes --
\begin{enumerate}
\item as an editor of algorithms
\item as an editor of GraphML representations of graphs
\end{enumerate}
\item
a graph window that displays the current graph (independent of whether
the text window shows an algorithm or the GraphML representation of the graph)
\end{enumerate}
It is usually more convenient to edit algorithms
offline using a program editor such as Emacs or Notepad++, setting
preferences so that files with the .alg extension are treated as if they were
Java source files.
The primary use of the text editor is to correct minor errors and
to see the syntax highlighting related to Galant API functions and macros.
The graph window is the primary mechanism for editing graphs.
One exception is when precise adjustments node positions are desired.
Weights and labels are sometimes also easier to edit in the text window.
\input{Y-user_interface}
These components operate in two modes: edit mode and animation mode.
Edit mode allows the user to modify graphs -- see Sec.~\ref{sec:graph_editing},
or algorithms -- see Sec.~\ref{sec:algorithm_editing}. Animation mode disables all forms of modification, allowing the user to progress through
an animation by stepping forward or backward, as described in
Sec.~\ref{sec:animating_algorithms}.
\subsection{Workspace}
Opened graph and algorithm files are displayed in the text window, which has
tabs that allow the user to switch among different algorithms/graphs. New
algorithms are created using the icon that looks like a page of text at the
top left of the window; new graphs are created using the graph/tree icon to
the left of that. More commonly, algorithm and graph files are loaded via
the \Code{File$\rightarrow$Open} browser dialog. The \Code{File} drop-down
menu also allows saving of files and editing of preferences. Algorithm files
have the extension \Code{.alg} and graph files the extension \Code{.graphml}.
Fig.~\ref{fig:user_interface} shows both the graph window (top) and the text
window (bottom). Annotations on the graph window describe the components of
the window that can be used to edit a graph visually.
\subsection{Graph editing}
\label{sec:graph_editing}
Graphs can be edited in their GraphML representation using the text window
or visually using the graph window.
These editors are linked:
any change in the visual representation is immediately reflected in the text
representation (and will overwrite what was originally there);
a change in the GraphML representation will take effect in the visual representation
only when the file is saved.
An improperly formatted GraphML file loaded from an external source will
result in an error.
Galant reports errors of all kinds (during reading of files, compilation of
animation programs or execution of animations)
by displaying a pop up window that allows the user to choose whether to
continue (and usually return to a stable state) or quit the program.
Error information, including a stack trace, is also displayed on the console.
The graph window, as illustrated at the top of Fig.~\ref{fig:user_interface},
has a toolbar with four sections:
\begin{enumerate}
\item
\textbf{Graph edit mode -- }
this includes the \emph{select}, \emph{create node}, \emph{create edge}, and \emph{delete} buttons.
Only one button is active at any time; it determines the effect
of a user's interaction (mouse clicking, dragging, etc.) with the window.
If there are conflicts in selection of objects, nodes with higher id numbers have precedence (are above those with lower id numbers) and nodes
have precedence over edges (are above edges -- so that the lines
corresponding to edges are hidden below the filled circles corresponding to nodes).
\begin{itemize}
\item \emph{Select.} A mouse click selects the graph component with highest
precedence. If the component is a node, it is shaded light blue; if it's
an edge, it becomes dashed. The in-line editor at the bottom of the graph
window allows editing of the component's label, weight, and color. This is
the default mode. The square should be outlined in gray unless the user
wants to alter the graph in some way. If any other mode is active, e.g.,
delete, the consequences can be unpleasant (there is currently no "undo"
mechanism). A future release will have "modeless" editing.
\item \emph{Create node.}
A node is created at the location of a mouse click if there is not already a node there.
If another node is present it is simply selected.
\item \emph{Create edge.}
Two clicks are required to create an edge. The first falls on the desired
source node and the second on the target node.
The line representing the edge is shown after the first click.
If the first click does not land on a node, no edge is created.
If the second click does not land on a node, creation of the edge is canceled.
\item \emph{Delete.}
A mouse click deletes the highest-precedence component at the mouse location.
If a node is deleted, all of its incident edges are deleted as well.
\end{itemize}
\item
\textbf{Directedness toggles --}
These change both the interpretation and the
display of the graph between directed and undirected.
Pressing the undirected (line without arrows) button causes
all edges to be interpreted as undirected: this means that, when the code
calls for all incoming/outgoing edges, all incident edges are used.
Undirected edges are displayed as simple lines.
Pressing the directed (line with arrow) button causes the macros
\Code{for\_incoming}, \Code{for\_outgoing}, and \Code{for\_adjacent}
to have three distinct meanings (they are all the same for undirected graphs):
Incoming edges have the given node as target, outgoing as source, and adjacent applies to all incident edges.
\item
\textbf{Display toggles --}
The four display toggles turn on/off the display of node/edge labels and node/edge weights.
A shaded toggle indicates that the corresponding display is \emph{on}.
When Galant is executed for the first time, all of these are \emph{on},
but their setting persists from session to session.
Labels and weights are also all displayed at the beginning of execution of
an animation.
The animation program can choose to hide labels and/or weights with simple
directives.
Hiding is often unnecessary -- the graphs that are subjects of the animations
typically have only the desired attributes set.
\item
\textbf{Force directed drawing button -- }
Applies Hu's force directed algorithm~\cite{2006-Mathematica-Hu} to the graph.
Pushing the button a second time causes the drawing to revert to its previous state.
Fig.~\ref{fig:force_directed} illustrates the use of force directed drawing
to massage a randomly generated graph for convenient use in animations.
The graph was generated randomly as a list of edges with weights
and converted to graphml using a simple script.
Galant, when reading the graph initially, assigned random positions to the nodes.
\end{enumerate}
\begin{table}[t]
\centering
\begin{tabular}{| l | p{0.8\textwidth} |}
\hline
\textsf{Ctrl-n} & create a new node in a random position \\ \hline
\textsf{Ctrl-e} & create a new edge; user is prompted for id's of the nodes to
be connected \\ \hline
\textsf{Ctrl-i} & do a smart repositioning (force-directed)
of nodes of the graph, useful when positions were chosen randomly
\\ \hline
\textsf{Del-n} & (hold delete key when typing \Code{n})
delete a node; user is prompted for id \\ \hline
\textsf{Del-e} & delete an edge; user is prompted for id's of the endpoints \\ \hline
\textsf{Ctrl-$\ell$} & toggle display of node labels \\ \hline
\textsf{Ctrl-L} & toggle display of edge labels \\ \hline
\textsf{Ctrl-w} & toggle display of node weights \\ \hline
\textsf{Ctrl-W} & toggle display of edge weights \\ \hline
\Code{Ctrl-d} & toggle graph directedness
\\ \hline
\end{tabular}
\caption{Keyboard shortcuts for editing operations.}
\label{tab:keyboard_shortcuts}
\end{table}
\input{Y-force_directed}
Table~\ref{tab:keyboard_shortcuts} lists keyboard shortcuts for various
editing operations.
\subsection{Algorithm editing}
\label{sec:algorithm_editing}
Algorithms can be edited in the text window. The
editor uses Java keyword highlighting (default blue) and
highlighting of Galant API fields and methods (default green).
Since the current algorithm editor is fairly primitive (no search and replace, for example),
it is more efficient to edit animation code offline using a program editor --
for example \Code{emacs} with Java mode turned on.
The Galant editor is, however, useful for locating and correcting minor errors.
For more details on how to compose animation code, see the programmer guide
(Section~\ref{sec:programmer_guide}).
\subsection{Animating algorithms}
\label{sec:animating_algorithms}
\input{Y-dijkstra_running}
To animate an algorithm the code for it must be compiled and then run via the
algorithm controls
-- the bottom tabs on the text window shown in Fig.~\ref{fig:dijkstra_running}.
The algorithm runs on the \emph{active graph}, the one currently displayed
on the graph window, also shown in Fig.~\ref{fig:dijkstra_running}.
While the algorithm is running, its text is grayed out to indicated that it
cannot be edited.
If there are errors in compilation these will show up on the console (terminal
from which Galant was run) and in a dialog box that allows the user to ask for more details
and decide whether to exit Galant or not.
The console also displays the what the code looks like after macro replacement
in case obscure errors were the result of unexpected macro expansion.
Line numbers in the macro-expanded code match those of the original so that
all errors reported by the Java compiler will refer to the correct line number
in the original Galant code.
Runtime errors also open the above mentioned
dialog box.
When the user initiates execution of an animation by pushing the \textsf{Run}
button
the animation program
steps forward until
displays the next animation event or, if a \Code{beginStep()}
call has marked the start of a sequence of events, until
it reaches the next \Code{endStep()} call.
It then pauses execution and waits for the user to decide whether to
step forward, step backward, or exit.
A step forward resumes execution while a step backward returns the display to a previous
state.
The algorithm resumes execution only when the \emph{display state}
indicated by the user's sequence of forward and backward steps
($f-b$, where $f$ is the number of forward and $b$ the number of backward steps)
exceeds the \emph{algorithm state}, the number of animation steps the algorithm
has executed.
The user controls forward and backward steps using either the buttons at the
bottom of the graph window (shown in Fig.~\ref{fig:dijkstra_running})
or the right/left arrow keys.
During the execution of the animation, all graph editing functions are disabled.
These are re-enabled when the user exits the animation by pressing the red \textbf{X} button or the \textsf{Esc} (escape) key on the terminal.
\subsection{Preferences}
\label{sec:preferences}
Galant preferences can be accessed via the \Code{File$\rightarrow$Preferences}
menu item or by using the keyboard shortcut \textsf{Ctrl-P}
(\textsf{Cmd-P} for Mac).
Preferences that can be edited are:
\begin{itemize}
\item
Default directories for opening and saving files (\Code{Open/Save}).
\item
Directory where compiled animation code is stored (\Code{Compilation}).
\item
Font size and tab size for text window editing (\Code{Editors}).
\item
Colors for keyword and Galant API highlighting (\Code{Algorithm~Editor}).
\item
Color for GraphML highlighting (\Code{Textual~Graph~Editor}).
\item
Node radius (\Code{Graph~Display});
when the radius is below a threshold (9 pixels), node id's are not displayed;
this is useful when running animations on large graphs.
\item
Edge width and node boundary width, both normal and highlighted/colored
(\Code{Graph~Display}).
\end{itemize}
\input{Y-preference_panels}
Fig.~\ref{fig:preference_panels} shows the four preference panels of most
interest to the user.
% [Last modified: 2017 01 19 at 21:41:40 GMT]
| {
"alphanum_fraction": 0.7830188679,
"avg_line_length": 46.8464419476,
"ext": "tex",
"hexsha": "15bd07f8fdc30c70aa9518eb6abe8ad3a58029fd",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2021-03-18T13:01:54.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-03-13T13:36:47.000Z",
"max_forks_repo_head_hexsha": "dca42697fb3f12b9b8b624818badcadf83bf503b",
"max_forks_repo_licenses": [
"Naumen",
"Condor-1.1",
"MS-PL"
],
"max_forks_repo_name": "mfms-ncsu/galant",
"max_forks_repo_path": "Technical-Documentation/user_documentation.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dca42697fb3f12b9b8b624818badcadf83bf503b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Naumen",
"Condor-1.1",
"MS-PL"
],
"max_issues_repo_name": "mfms-ncsu/galant",
"max_issues_repo_path": "Technical-Documentation/user_documentation.tex",
"max_line_length": 143,
"max_stars_count": 24,
"max_stars_repo_head_hexsha": "dca42697fb3f12b9b8b624818badcadf83bf503b",
"max_stars_repo_licenses": [
"Naumen",
"Condor-1.1",
"MS-PL"
],
"max_stars_repo_name": "mfms-ncsu/galant",
"max_stars_repo_path": "Technical-Documentation/user_documentation.tex",
"max_stars_repo_stars_event_max_datetime": "2021-04-13T02:35:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-01-19T13:26:49.000Z",
"num_tokens": 2908,
"size": 12508
} |
%Contents of the plugin features section of D5.5
In order to guide the user on how best to exploit certain features of the MPI Parameters Plugin we will focus on the the Fish School Simulator application (FSSIM) \cite{Mostaccio05}.
\subsubsection{Multiple MPI flavors}\label{para:MPI-mult}
The MPI Parameters plugin offers some degree of support for three different implementations of MPI: IBM MPI, Intel MPI and OpenMPI. In these cases, the keywords {\tt ibm}, {\tt intel}, or {\tt openmpi} should appear at the beginning of the configuration file. Then, the plugin interprets that the user is specifying command line options and modifies the application execution command accordingly to the syntax established by the specific implementation.
Assuming that the user has included the parameter for tuning the eager limit in the configuration file, the MPI Parameters plugin will, for example, generate the following command line in each case:
\begin {itemize}
\item IBM MPI: mpiexec -n 64 executable -eager\_limit 16384
\item Intel MPI: mpiexec -genv I\_MPI\_EAGER\_THRESHOLD 16384 -n 64 executable
\item OpenMPI: mpiexec -mca osc\_pt2pt\_eager\_limit 16384 -n 64 executable
\end{itemize}
In any other case, the plugin interprets that the parameters indicated by the user are environment variables and produces the corresponding export commands before re-executing the application. In addition, three example configuration files are provided along with the MPI Parameters plugin, one for each supported implementation (Intel MPI, IBM MPI and OpenMPI). They can be used as a quite complete starting point for tuning a rich set of MPI parameters. However, the user can also modify them (eliminating, adding or changing parameters) with the objective of fitting them to a particular application.
The three configuration files included with the PTF release are the following:
\begin {enumerate}
\item IBM MPI
\begin{verbatim}
MPIPO_BEGIN ibm
eager_limit=4096:2048:65560;
buffer_mem=8388608:2097152:134217728;
use_bulk_xfer=yes,no;
bulk_min_msg_size=4096:4096:1048576;
pe_affinity=yes,no;
cc_scratch_buf=yes,no;
wait_mode=nopoll,poll;
css_interrupt=yes,no;
polling_interval=100000:10000:1000000;
SEARCH=gde3;
MPIPO_END
\end{verbatim}
\item Intel MPI
\begin{verbatim}
MPIPO_BEGIN intel
I_MPI_EAGER_THRESHOLD=4096:2048:65560;
I_MPI_INTRANODE_EAGER_THRESHOLD=4096:2048:65560;
I_MPI_SHM_LMT=shm,direct,no;
I_MPI_SPIN_COUNT=1:2:500;
I_MPI_SCALABLE_OPTIMIZATION=yes,no;
I_MPI_WAIT_MODE=yes,no;
I_MPI_USE_DYNAMIC_CONNECTIONS=yes,no;
I_MPI_SHM_FBOX=yes,no;
I_MPI_SHM_FBOX_SIZE=2048:512:65472;
I_MPI_SHM_CELL_NUM=64:4:256;
I_MPI_SHM_CELL_SIZE=2048:1024:65472;
SEARCH=gde3;
MPIPO_END
\end{verbatim}
\item OpenMPI
\begin{verbatim}
MPIPO_BEGIN openmpi
mpi_paffinity_alone=0,1;
btl_openib_eager_limit=1024:2048:65560;
btl_openib_free_list_num=2:4:128;
btl_openib_use_eager_rdma=0,1;
btl_openib_eager_rdma_num=1:2:32;
btl_sm_eager_limit=1024:2048:65560;
btl_sm_num_fifos=1:1:10;
btl_sm_fifo_size=2048:512:65472;
btl_sm_free_list_num=2:4:128;
MPIPO_END
\end{verbatim}
\end {enumerate}
\subsubsection{Genetic search}\label{para:MPI-gen}
PTF has been enriched with the implementation of a search strategy based on the \textit{Generalized Differential Evolution 3 (GDE3)} genetic algorithm. In this strategy a population of ten initial scenarios is randomly generated and executed, then, an iterative process is followed, generating new populations by selecting the best five scenarios (those with the smallest execution time) for the next generation (elitism), generating five new scenarios by crossing over the previous population (crossover), and introducing mutations with a fixed probability (mutation). The number of iterations can be configured, but, generally, a close to optimal solution can be found in less than 30 iterations (generations).
For our use cases we have used a medium size population of fishes (64K), to avoid long executions, and have run the MPI application on 64 compute cores. In addition, given that it is not possible to test a large set of parameters combinations exhaustively in a reasonable time, we have used the exhaustive search strategy for a very limited configuration (see Table \ref{table:MPIConfigFileExhaustive}) of the parameters, and the heuristic search strategy for a more complete configuration (see Table \ref{table:MPIConfigFileGDE3}).
\begin{table}[tb]
\centering
\begin{footnotesize}
\begin{tabular}{| l | }
\hline
\\
MPI\_PIPO BEGIN ibm \\
eager\_limit=4096:15366:65560; \\
use\_bulk\_xfer=yes,no; \\
bulk\_min\_msg\_size=4096:4096:1048576; \\
task\_affinity=CORE,MCM; \\
pe\_affinity=yes,no; \\
cc\_scratch\_buf=yes,no; \\
MPI\_PIPO END \\
\\
\hline
\end{tabular}
\caption{Configuration file for the exhaustive search.}
\label{table:MPIConfigFileExhaustive}
\end{footnotesize}
\end{table}
\begin{table}[tb]
\centering
\begin{footnotesize}
\begin{tabular}{| l | }
\hline
\\
MPIPO\_BEGIN ibm\\
eager\_limit=4096:2048:65560;\\
buffer\_mem=8388608:2097152:134217728;\\
use\_bulk\_xfer=yes,no;\\
bulk\_min\_msg\_size=4096:4096:1048576;\\
cc\_scratch\_buf=yes,no;\\
wait\_mode=nopoll,poll;\\
css\_interrupt=yes,no;\\
polling\_interval=100000:10000:1000000;\\
task\_affinity=CORE,MCM; \\
pe\_affinity=yes,no; \\
SEARCH=gde3; \\
MPI\_PIPO END \\
\\
\hline
\end{tabular}
\caption{Configuration file for the GDE3 search.}
\label{table:MPIConfigFileGDE3}
\end{footnotesize}
\end{table}
The exhaustive search strategy generates all possible combinations of the selected parameters and values, 320 in our example. Each scenario is executed and the one with the smallest wall time is chosen as the best one. The best combination corresponds to scenario 217: {\tt use\_bulk\_xfer} yes, {\tt bulk\_min\_msg\_size} 796672, {\tt cc\_scratch\_buf} no, {\tt task\_affinity} CORE, {\tt eager\_limit} 50194, {\tt pe\_affinity} yes. The execution time for this scenario was 2.8 sec, which is 1.6 times better than the time for the execution using the parameters default values (4.4 sec). The drawback is that for finding the optimal scenario in the set of tested ones, PTF needed 21505.2 sec (almost 6 hours) for the use case here.
The genetic search strategy (GDE3) can also be used for the same tuning space, but can even handle much larger tuning spaces consisting of significantly more parameters and possible values. For the use case we provide here, it executes 20 different scenarios in 1138.65 sec (approximately 20 min), which reduces the analysis time by a factor of 19 with respect to the exhaustive search. The best scenario in this case was found to be number 14: {\tt use\_bulk\_xfer} no, {\tt bulk\_min\_msg\_size} 988789, {\tt css\_interrupt} no, {\tt wait\_mode} poll, {\tt polling\_interval} 636027, {\tt cc\_scratch\_buf} no, {\tt task\_affinity} CORE, {\tt buffer\_mem} 102205313, {\tt eager\_limit} 59434, {\tt pe\_affinity} yes. The execution time for this scenario was 2.8 sec, again significantly better than the one obtained with the default values and now the search time has also been significantly reduced. These results demonstrate that using the genetic search strategy can lead to results that are almost as good as the ones produced by the exhaustive search in only a fraction of the time needed for the latter.
\subsubsection{Eager-limit parameter strategy}\label{para:MPI-gen}
The MPI Parameters plugin includes an automatic strategy for determining if it is worthy to include the eager limit and memory buffer parameters into the plugin search space. Moreover, if it determines that it is worthy to include these parameters in the search, it will automatically generate the range of values that should be searched, trying to shrink this range in order to reduce the size of the search space. For using this strategy, the user only has to specify in the configuration file the option {\tt AUTO\_EAGER\_LIMIT=<eager-limit parameter>,<buffer-mem parameter>}. In our case, using IBM MPI, the configuration option is {\tt AUTO\_EAGER\_LIMIT=eager\_limit, buffer\_mem;}.
Here we test the plugin using this configuration option, where the FSSIM application was executed with a medium size population of 64K fishes and on 64 cores. The plugin determines that it is worthy to include the {\tt eager\_limit} and {\tt buffer\_mem} parameters into the search space because the total number of bytes communicated in point-to-point messages smaller than 64Kb was found to be greater than 30\% of the total number of bytes communicated in point-to-point messages. In addition, the plugin determines that the search range for the eager limit parameter should be between 1Kb and 32Kb because more than 80\% of the messages sent eagerly were in this range. Finally, the plugin uses the values determined for the eager limit and expression \ref{eqn:mem_buff} to compute the range for the buffer mem parameter range (128Kb to 4Mb).
\begin{equation}
mem\_buff = 2{n}*{max(eager\_limit,64)}
\label{eqn:mem_buff}
\end{equation}
\begin{figure}[bth]
\center
\includegraphics[width=0.65\paperwidth]{../BPG/images/MPIFssimEager.png}
\caption{FSSIM execution time for different values of the eager limit and memory buffer parameters (IBM MPI).}
\label{fig:fssimEager}
\end{figure}
\begin{figure}[bth]
\center
\includegraphics[width=0.65\paperwidth]{../BPG/images/MPImanual64cpu64kf.png}
\caption{FSSIM execution time for different values of the eager limit and memory buffers parameters (Intel MPI).}
\label{fig:fssimEagerIntel}
\end{figure}
Figures \ref{fig:fssimEager} and \ref{fig:fssimEagerIntel} clearly show that increasing the eager limit for this application up to approximately 30Kb produces significant performance improvements, and that beyond this size no extra gains are obtained. This demonstrates that the eager limit strategy is successfully identifying the range of values that should be explored by the user for finding the best value for this parameter.
| {
"alphanum_fraction": 0.7857498754,
"avg_line_length": 61.9444444444,
"ext": "tex",
"hexsha": "f82e1516fa5ae95875fed77bfefce3bd3c638e26",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-11-20T03:04:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-20T03:04:32.000Z",
"max_forks_repo_head_hexsha": "5514b0545721ef27de0426a7fa0116d2e0bb5eef",
"max_forks_repo_licenses": [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
],
"max_forks_repo_name": "robert-mijakovic/readex-ptf",
"max_forks_repo_path": "docs/BPG/chapter4_5_mpi_param_guidelines.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "5514b0545721ef27de0426a7fa0116d2e0bb5eef",
"max_issues_repo_issues_event_max_datetime": "2020-10-14T08:05:41.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-04-21T07:57:32.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
],
"max_issues_repo_name": "robert-mijakovic/readex-ptf",
"max_issues_repo_path": "docs/BPG/chapter4_5_mpi_param_guidelines.tex",
"max_line_length": 1112,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "5514b0545721ef27de0426a7fa0116d2e0bb5eef",
"max_stars_repo_licenses": [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
],
"max_stars_repo_name": "robert-mijakovic/readex-ptf",
"max_stars_repo_path": "docs/BPG/chapter4_5_mpi_param_guidelines.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-10T09:59:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-10T09:59:37.000Z",
"num_tokens": 2682,
"size": 10035
} |
%!TEX root = ../thesis.tex
%*******************************************************************************
%****************************** Second Chapter *********************************
%*******************************************************************************
\chapter{Trends in Deforestation and Environmental Policy in Maranhão, Brazil}
\ifpdf
\graphicspath{{Chapter2/}{Chapter2/}}
\else
\graphicspath{{Chapter2/}{Chapter2/}}
\fi
\begin{abstract}
This study investigates deforestation trends in the Brazilian \textit{Cerrado} region in Maranhão, Brazil, which provides a unique natural experiment in that there were spatially hetereogenous environmental policies to combat deforestation in place. The analysis applies the non-linear estimation approach of Generalized Additive Models (GAMs) on satellite data derived measures of deforestation, where validation is conducted using features of neural networks. The GAMs confirmed that deforestation is related to climatic factors in that increased during high levels of precipitation and low levels of solar incidence. More importantly, the results revealed that there are substantially differences in trends between seasons across regions according to their policy distinction. This was further substantiated by showing that deforestation happened during both seasons for settlements which were not target of the environmental policy, but only during the rainy season for the protected areas, likely due to the lower rate of remotely sensed detection during cloud cover.\\
\noindent{\bf Keywords:} Deforestation trends, Generalized Additive Models, Remote Sensing Analysis.
\end{abstract}
\section{Introduction}
%trends are likely to influence health care delivery services in the future for many reasons
%Understanding the dynamics of land-use and land-cover (LUCC)\footnote{A long the lines of the Land-Use and Land-Cover Change (LUCC) project of the International Geosphere-Biosphere Program (IGBP) and International Human Dimensions Program (IHDP) on Global Environmental Change \citep{GEIST}. In the past, land-use and land-cover change was a hybrid category. Land use denoted the human employment of the land and was studied largely by social scientists and land cover denoted the physical and biotic character of the land surface and was largely studied by natural scientists \citep{MEYER}} has increasingly been recognized as one of the key research imperatives in global environmental change research. The growing recognition of the contribution of forests to climate change has created new momentum in the fight against tropical deforestation and forest degradation \citep{CAROLE2013}.
Tropical deforestation is relatively a modern event that only gained momentum in the second half of the $20^{th}$ century. As a matter of fact, the considerable deforestation observed globally during 1990-2010 was almost entirely confined to the tropical regions \citep{CULAS12}. In this regard, the Brazilian Cerrado is perhaps the biome which has arguably been most affected by human occupation over the last three decades, mainly due to the increasing pressures for opening up of new areas for the production of meat, grains and ethanol, mostly at the sacrifice of forested areas \citep{mma_2018, bayma_sano_2015}. Importantly, the Brazilian Cerrado has also been subject to spatially and temporally heterogeneous environmental policies discouraging such deforestation. Understanding what role such interventionist policies may have played in observed trends in deforestation could potentially provide a platform with which to assess future possible scenarios of deforestation in the Cerrado biome and the Amazon forest in general \citep{boyd_2013}. In this paper we use remote sensing data and non-linear models to model the trends of deforestation and its underlying drivers in the \textit{Cerrado} region in the Brazilian state of Maranhão using the non-linear modelling approach of Generalized Additive Models (GAMs).
Arguably, the state of Maranhão provides a particularly interesting context within which to study trends in deforestation and the possible role of environmental policy. More specifically, Maranhão is divided by an artificial line that separates it in two parts: the Legal Amazon Maranhão and the Cerrado Maranhão. This division, occurring approximately 44$^{\circ}$ west of the meridian, was established in 1953 due to the necessity to plan economic development in the region. This scenario provides a unique natural experiment of deforestation in the Legal Amazon Maranhão (LM) and Cerrado Maranhão (MA) since the former has been subject to fundamentally different environmental policies compared to the latter.\footnote{The Legal Amazon is an area that corresponds to 59$\%$ of the Brazilian territory and encompasses all eight states (Acre, Amapá, Amazonas, Mato Grosso, Pará, Rondônia, Roraima and Tocantins) and part of the State of Maranhão (west of the meridian Of 44ºW), totalling more than 5 million km$^{2}$ \citep{IPEA2}.} More specifically, the tropical forest in the Legal Amazon Maranhão is under a surveillance environmental policy, called DETER, which detects deforestation or fire incidence in the region using satellite data and informs the occurrences to the environmental police (IBAMA in Portuguese) so that they can fine or arrest the responsible persons \citep{IBAMAwebsite}. In contrast, DETER is not applicable for the other biomes in the Maranhão state.\footnote{The DETER is a rapid survey of alerts of changes in forest cover in the Legal Amazon made by National Institute os Spatial Research (INPE in Portugues) since May 2004, with data from Terra's MODIS sensor, with a spatial resolution of 250 m. DETER was developed as an alert system to support the environmental police to curb illegal deforestation. With this system, it is possible to detect only changes in the forest cover with an area larger than 25 ha. Due to cloud cover not all changes are identified by DETER. The lower resolution of the sensors used by DETER is compensated by the daily observation capacity, which makes the system an ideal tool to quickly inform the inspection bodies about new changes DETER operates daily and delivers deforestation alert maps to environmental policy in a five days after the date of the MODIS image \citep{inpe-deter_2018}.} We use this spatial division to determine how deforestation trends may have been different being the Legal Amazon Maranhão and Cerrado Maranhão.
Our use of non-linear modelling for the task at hand derives from the recognition by recent previous research that most ecological and climatic data represent complex relationships and thus that non-linear models, such as GAMs, may be particularly suited to capture confounding effects in trends; see \citep{alkemad_1998,BELL_2015,JOYE_2015,LUSK_2016,SADAT_2016,HALPERIN_2016, SANTOS_2017,TAPIA_2017, LIU_2018,MORENO_2018}. However, a review of the literature shows that such models have only been used sparsely to study deforestation. For example, \citet{COHEN_2008} modelled how deforestation affected incidence rates of a disease in Cuba. Also, \citet{GREEN_2013} used a binomial GAM model to account for forest and habitat losses in protected areas on the Eastern Arc Mountains of Tanzania. More recently, \citet{BEBBER_2017} studied the impact of protected areas on global carbon emissions in America, Africa and Asia. For Brazil, \citet{MENDES_2012} observed the relationship between deforestation, corruption, and economic growth in the region of Legal Amazon, in Brazil. Here we apply a GAM with a negative binomial distribution and logarithmic link function. To capture deforestation we construct monthly time series from remote sensing sources (MODIS), given that high temporal resolution satellite products are particularly suitable to obtain detailed knowledge about the seasonal cycles of vegetation in biomes with strong seasonal contrast, such as the Cerrado biome and Ecotone forest \citep{bayma_sano_2015}. Our climatic covariates are derived from data from meteorological stations.
%For handling spatial datasets, ArcMap 10.4.1, ArcPy 10.4.1, and the extensions Geostatistical Analyst, Spatial Analyst and Spatial Statistics from ArcToolbox \citep{esri_2016,arcpy_2016}, and MATLAB R2017a with Statistics and Machine Learning and Image Processing Toolbox \citep{matlab_2017} was extensively used. For statistical analysis and modelling, R \citep{R_2018} and several packages specially 'MASS'\citep{MASS_2002}, 'mgcv' \citep{Wood_2003, Wood_2004, Wood_2011, Wood_2017} and 'gratia' \citep{Gavin_2018} were considered.
%In the results, the GAMs confirmed that deforestation is related to year and climatic variables, but also revealed that there is substantially difference of trends between seasons and sub regions (Legal Maranhão and Cerrado Maranhão). These results show that it does seem that deforestation in the Amazon region (LM) was transferred during the dry season to the Cerrado region (MA). It is not possible to check if this shift also happened during the raining season because both regions presented positive increment for deforestation. There appears to be no spillover effect from the environmental enforcement executed in the LM region to the MA region. One plausible proof is that deforestation remained during both seasons with distinct cycles.
%With these results, three possible outcomes emerges: i) since the region is a transitional zone, the two areas don't differ in biota aspects. In a sense, anthropic actions were responsible for apparent changes in the deforestation trends and, these changes ii) happened during high levels of precipitation and low levels of solar incidence which in turn shows that, in general, cloud cover might be a benefit for clear-cutting practices, keeping in mind that iii) the artificial line divides the two regions but many of the political boundaries of municipalities remain in both sides of the region (MA and LM), which could interfere in the deforestation path between the seasons.
The results from our GAMs revealed that for the Legal Maranho region most of the deforestation happened during the rainy season, while in the unprotected Cerrado Maranhão deforestation occurred in the dry season as well. The fact that precipitation and solar incidence also played an important role in deforestation in the rainy season in the Legal Maranho region suggests that cloud cover may have acted as an impediment to infringement detection via satellites, as is done in the DETER program. We further substantiated this claim by showing that for settlements that were not the target of environmental policy deforestation mainly took place during both seasons as well.
%Several studies focused on deforestation trends in Brazil Amazon and Cerrado taking into consideration climatic and socioeconomic factors \citep{shukla_1990, skole_1993, morton_defries_2006, costa_pires_2009, nepstad_2008, nepstad2_2014}\footnote{For a detailed bibliometric analysis of deforestation trends, see \citet{Benavent_2018}.} However, most ecological and climatic datasets present a
%Amazon Maranhão and Cerrado Maranhão show a decreasing trend which imposes the main questions of this study. Is the decreasing deforestation trend in Amazon Maranhão during 2000 to 2017 displaced to the Cerrado Maranhão? And, is there a spillover effect from the environmental policy in the decreasing trend in deforestation on the Cerrado Maranhão?
%Monthly data
%New technique applied to deforestation
\section{Study Context} %mudar o titutlo
\subsection{Study Location} %mudar o titutlo
%Paragrafo introdutorio com dados demograficos, sociais economicos. Explicar a diferente estrutura da regiao /MA
Maranhão, or \textit{'flowing river'} in the indigenous language \citep{girardi_2015}, is one of the ten largest States of Brazil with more than 330 thousand square kilometers and located in the northeast part of Brazil. However, while it is considered one of the richest regions in biodiversity in the country \citep{BATISTELLA_2014}, it has historically ranked among the states with the worst social and economic indicators \citep{CELENTANO_2017}. The State's political boundaries encompass natural hydrological barriers, where within these borders 64.1\% of the area of the territory is in the Cerrado/Savana biome, 34.8\% in the Amazon biome, and only 1.1\% in the \textit{Caatinga} biome \citep{STELLA_2011}. The Cerrado represents the largest ecosystem in Maranhão, and is located from the Northeast to the southern region of the State, covering about 60\% of its surface, occurring in approximately 55 of the total 217 municipalities. Of these, 23 are almost exclusively covered by this type of vegetation \citep{BATISTELLA_2013}.
In terms of vegetation, in the centre of Maranhão there is a contact area between the Amazonian and Cerrado biomes of around 21.228$km^2$, where it is possible to observe a mosaic of savanna vegetation physiognomies concomitant with the ombrophilous forest formations (open and dense forest). This area is known as an Ecological Tension Zone (ETZ) of Maranhão and is home to a transitional vegetation which usually occurs with intermediate characteristics of the two formations, with species common and distinct to both, providing great biodiversity \citep{rossatto_2013}. In Cerrado Maranhão it is also possible to distinguish between ecotonic areas and the presence of secondary vegetation in the central region of the state. Ecotone is defined as the transition area between two or more distinct habitats or ecosystems, which may have characteristics of both or their own. Secondary Vegetation includes the various stages of natural succession in areas where there was human intervention for land use, whether for mining, agricultural or livestock purposes, or discharging the primary vegetation \citep{SANTOS-FILHO_2013}.
There is a strong debate over the definition of the ecotone forest and secondary vegetation in the state of Maranhão. Considering the Cerrado/savanna territory, almost 10\% represents the transition and secondary forest. \citet{REIS_2010} state that the Cocais Forest (\textit{"Mata dos Cocais"} in Portugues) is considered a characteristic landscape of the State, although it develops in the transition between several biomes. The authors pointed out that the Cocais Forest associates with the open fields, towards the North, with the Cerrado vegetation to the South and East, and gradually joins the forest towards the west.
%In terms of population, Maranhão is the 10th more populated state with a resident population of ${\sim}$ 6mi in 2015, which represents 3.4\% of the inhabitants living in Brazil. In addition, Maranhão has the highest percentage of people residing in rural areas comparing to other states however the economic activities are centred in Services (70\%), Industry (19\%) and Agriculture (11\%) \citep{imesc_2015}.
%falar da regiao tropical do maranhao
%The Amazon tropical forest is the largest biome in Brazil, occupying almost half of the national territory. Dominated by hot and humid climate and well distributed rains during the year, this biome has the characteristic vegetation of tall trees and forests periodically and permanently flooded. In Maranhão, the rainforest is formed by dense forest (Ombrophylous Forest), open forest (Ombrophylous Forest) and seasonal forest (Semidecidual Forest) corresponding to more than 60 thousand square kilometers \citep{BATISTELLA_2013}.
%The characteristic of the dense forest is associated to tropical climatic factors of high temperatures (averages of 25$^{\circ}$C) and high precipitation, well distributed during the year which determines a practically no dry period. In Maranhão, this region occurs in 6.45 \% of the state, located mainly in the most western portion of the state. The open forest presents different floristic forms that alter the ecological aspects of the dense forest, giving hence the name adopted, besides climatic gradients with more than 60 dry days per year, configuring a dry season. This forest region encompasses 0.18\% of Maranhão and also situates at the Maranhão's western portion. The ecological concept of the seasonal forest type is established as a function of the occurrence of seasonal climate, which determines semideciduous forest foliage. In the tropical zone, it is associated with the region marked by severe winter droughts and intense summer rains. In Maranhão, this type of forest covers 12.30 \% of the state, mainly towards the centre portion of the state \citep{BATISTELLA_2013, SPINELLI_2016}.
%falar da caatinga
%Although having a small percentage in the State (1.1\%), the \textit{Caatinga} biome is considered the most bio-diverse semiarid region in the world, characterised by shrubs with twisted branches and deep roots and cacti. In Maranhão, the \textit{Caatinga} domain presents a strong climatic irregularity, and its meteorological values are the most extreme in the state: the strongest sunshine, the lowest cloudiness, the highest thermal averages between 25$^{\circ}$ and 30$^{\circ}$ C, the highest evaporation rates and, especially, the lowest pluviometric indices, around 500 to 700 mm annually, with great spatial and temporal variability \citep{LOIOLA_2012}.
%falar do cerrado
%The Cerrado Biome in several regions of Brazil is bordered by other plant formations, being characterised by being the second largest Brazilian biome in territorial extension, being surpassed only by the Amazon \citep{BATISTELLA_2013}.
%[PARAGRAFO PARA FAZER LIGACAO COM O SUBSECTION]
%\subsubsection{Ecotone Forest and Secondary Vegetation in Maranhão}
%Authors discuss the difficulty of delimiting forest areas in transitional and / or ecological tension regions, mainly Cerrado - Amazon Forest due to the innumerable indentations and interpenetration of savanna formations in the territory of the Legal Amazon. Areas with these characteristics can be found in the states of Amazonas, Mato Grosso, Pará, Tocantins and, specially, in Maranhão.
%Differently, \citet{azevedo_2002} assigns the Cocais Forest as a secondary type of vegetation present in the two types of forests in Maranhão: the humid one, and the deciduous (periodic exchange of foliage). The author explains his assumption based on historic reports demonstrating that the ecosystem used to be different in that region. Officially, the Cocais forest is considered an open forest transition with palm trees \citep{ibge_1992}. \citet{RIBEIRO_2008} have also designated Cocais Forest as one of the features that occur in the Cerrado environment.
More recently, \citet{GARCIA201716} studied part of the Maranhão region and defined forest as a combination of riparian forest, transitional forest, and Cerrado woodland - the latter defined as having higher tree density and Cerrado, representing physiognomies with a wood layer but lower tree density. Their results showed intense conversion and fragmentation of native vegetation and, consequently, the impoverishment of the quality of native cover, reassuring the possibility of conducting studies considering ecotone forest,such as \textit{Mata dos Cocais}, as a transition forest instead of secondary vegetation.
As noted in the introduction, in addition to the natural barriers, Maranhão is also divided into two parts: the Legal Amazon Maranhão and the Cerrado Maranhão, with 209 municipalities located in the latter and 138 municipalities in the former. This division occurs approximately 44$^{\circ}$ west of the meridian and was established in 1953 in order to plan the economic development of the region comprised of the tropical forest areas of Maranhão state. We depict this delineation in Figure \ref{fig:delimitacao2}
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth, inner]{MaranhaoChapter2_Fig1.png}
\caption[Maranhão state and the Legal Amazon delimitation]{Maranhão State and the Legal Amazon delimitation. The map includes municipalities centre, rivers and basins, protected areas and indigenous land. Source: \citep{MMMAwebsite,nugeo_2018,embrapa_2018}.}
\label{fig:delimitacao2}
\end{figure}
%COLOCAR QUE EXISTE UM EMBATE PARA O TIPO DE VEGETACAO SECUNDARIA NO MARANHAO - SE EH ALGO ANTROPICO OU NATURAL POR PARTE DA REGIAO. VER O ARTIGO DE SOARES-FILHO 2013.
%Com base nos relatos de antigos viajantes de que regiões constituintes da Zona dos Cocais, como é atualmente denominado o ecótono presente em parte do território dos estados do Piauí e do Maranhão, continham várias populações de palmeiras, mas, especialmente em território maranhense, prevalecia uma floresta com características da pré-Amazônia e considerando os diferentes trabalhos e ensaios com várias espécies de palmeiras e suas respectivas propriedades no processo de recrutamento e sucessão ecológica, pode-se supor que a maciça concentração de grandes populações encontradas na região atualmente, seja reflexo de um intensivo processo de degradação das florestas originais com diferentes finalidades, partindo-se desde a exploração de territórios para pasto e agricultura, quanto ao extrativismo de plantas típicas das florestas presentes na região. O resultado desta degradação deixa evidente que, dentre estas espécies de palmeiras, o babaçu é uma das plantas mais expressivas e eficientes da comunidade pioneira.
\subsection{Deforestation in Maranhão}
%Studies of deforestation seem to vary from place to place. In Brazil, and specially Maranhão, deforestation happens depending on the status of development and welfare of the citizens in determining the extent of the forest loss. The requirement for income and economic growth results in growing demand for agricultural and forest derived products, like soy, timber and beef. The main causes of deforestation are assumed to be acting aggregated and the government finds these are the easiest and the most accessible ways of responding to their increasing economic pressures \citep{GEIST,CULAS11,CELENTANO_2017}.
%EXPLICAR A RAZAO PELA QUAL TEM DESMATAMENTO NO MARANHAO
Large-scale deforestation in the Maranhão Amazon forest began in the 1960s, when the military government promoted the occupation of this territory through the construction of highways and provided incentives for large farming projects on public lands and logging centres. In the 1980s, with the implantation of the iron mining project in the in the neighbouring state of Pará (Carajas Project), a railroad linking the mine to the port in Maranhão was built. Moreover, many pig iron facilities were installed in the Maranhão Amazon region, demanding large quantities of charcoal, which increased the pressure on forest resources \citep{CELENTANO_2017}.
%Colocar graficos
%COMO EH ESSE DESMATAMENTO COMPARADO COM A AMAZONIA LEGAL E COM O RESTANTE DO CERRADO
While many existing studies have focused on the Amazon tropical forests \citep{PFAFF,PFAFF2,HAMMIG,GEIST, GEIST2, LAMBIN2,PFAFF3,ZAMBRANO,KUIK,COE,SOLER,NEPSTAD,ARIMA,PATZ,RICHARDS,RICHARDS2,CELENTANO_2017} due to the fact the ample environmental information was available through specific environmental policies, such as DETER, studying the Cerrado biome and, consequently, transition forests remains precarious. The first obstacle in monitoring the Cerrado biome is due to the high heterogeneity of the forests (open and dense forest, for example) which are substantially influenced by the climatic seasonality \citep{bayma_sano_2015}. The second challenge is related to the fact that there is no environmental policy in place to prevent rampant deforestation. Nevertheless, in the context of the Amazon region, it is arguably crucial to understand the dynamic of Cerrado and its potential to influence adjacent forests of Amazonia since it provides a valuable endpoint from which climate and anthropogenic related aspects in the Amazon forest my be better understood (see Figure \ref{fig:defAmazonMA}).
%In this sense, the Cerrado is the Brazilian biome most affected by human occupation in the last three decades, mainly due to the increasing pressure for the opening of new areas for the production of meat, grains and ethanol, which puts the survival of many species and the integrity of their habitat in risk \citep{mma_2018, bayma_sano_2015}.
%Considered a transitioning state between semiarid and tropical environment, Maranhão's Cerrado figures the third position when comparing the amount of forest cleared in absolute values demonstrating that the end-point of Amazon forest is at risk (Figure \ref{fig:defAmazonMA}).
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth, inner]{Chapter2/ChartMA_deforestation_chapter2.png}
\caption[Estimated Deforestation in Maranhão 2000-2017]{Estimated Deforestation in Maranhão 2000-2017 (sq Km). Source: \citep{MMMAwebsite}.}
\label{fig:defAmazonMA}
\end{figure}
%SERIA BOM COLOCAR O TREND DE DESMATAMENTO DE HANSEN PARA A PARTE QUE NAO TEM DADOS DO PRODES.
In the past many aspects have arguably contributed significantly to deforestation in the Cerrado Maranhão, such as the agricultural occupation of the Cerrado biome and the unconstrained use of mechanisation, propitiated by the predominantly flat relief of the region and the existence of good depth soil conditions and good water supply, making it possible to practice rainfed agriculture \citep{bayma_sano_2015}. In contrast, more recent forest losses have been due to settlement projects, illegal logging, pasture, subsistence agriculture and commodities \citep{CELENTANO_2017, costa_2018}, and these drivers are deeply connected to the process of the state's development. More precisely, during the 40's, almost 85\% of the population was living in rural areas with low rate of population density (3,81). At that time, Maranhão had more than 200.000 km$^{2}$ uninhabitated, which included transition forest, Cerrado, and pre-Amazon forest. This "territorial gap" (/textit{fundos territoriais} in Portuguese) favoured the creation of several settlement projects along with the creation of federal roads and the Carajas railway project \citep{ferreira_2008}. Moreover, the increasing global demand for commodities affected the economic region significantly. For instance, fiscal incentives increased the production of soy, where the planted area increased from 42,6 km$^{2}$ in 1983/84 to 3,940 km$^{2}$ in 2004/05. The subsequent ecological tension zone coincides with the Brazilian agricultural frontiers, known as the deforestation arc, and is an area of intense exploitation. \footnote{The expansion of soybean cultivation in Brazil has shifted the
agricultural frontier to an area known as MAPITOBA. This area includes the states of MAranhão,PIauí, TOcantins, and BAhia,, and has maintained its expansion across the Cerrado This led to deforestation and degradation, conservation conflicts and conflicts over land, increased burning, and displacement of traditional populations \citep{mustin__2017}}
%Despite the rich diversity in the three explained biomes, Maranhão is the state of the Legal Amazon that presents the lowest degree of occupation of the area with protected areas. The number of endangered, rare and endemic species in the most varied groups of animals and plants attest to the biological importance of the region, not only for the State of Maranhão, but for the country as a whole \citep{MARTINS_2011}. Only during the 2000s Maranhão invested an effort on environmental policies through the creation of conservational units, indigenous land titling and environmental policies in which helped to decelerate the deforestation path in the Amazon Maranhão.
%\subsubsection{The trend of deforestation in Maranhão}
As can be seen from the graph \ref{fig:defAmazonMA}, deforestation in the Amazon biome of Maranhão has decreased over the years. Great part of this reduction is likely due to protectionist policy enforcement in the region. In this regard, the national environmental policy established in 2004 involved the creation of the Action Plan for the Prevention and Control of Deforestation in the Legal Amazon (PPCDAm in Portuguese). In order to control land use and prevent further deforestation, the PPCDAm also included the satellite-based monitoring programme called DETER, which alerts in real time the environmental police of illegal logging and deforestation. Importantly, until July 2018 there was no systematic satellite monitoring program for the other parts of Maranhão, such as the transitional forest of Cerrado and \textit{Caatinga}. But, in August of the same year, the National Institute of Spatial Research (INPE in Portuguese) together with several other institutions published an annual dataset covering 18 years of Cerrado biome deforestation and with this data it was possible to show the trends of deforestation in the Cerrado biome of Maranhão. As shown in Figure \ref{fig:defAmazonMA}, deforestation in great Cerrado, which includes the transitional forest, has been two times higher than in the Amazon region of Maranhão.
%Both areas, notwithstanding, show a decreasing trend according to official sources which imposes two questions questions to this study: 1- Is the decreasing deforestation trend in Amazon Maranhão during 2000 to 2017 displaced to the Cerrado Maranhão? Following the first question, 2- Is there a spillover effect from the environmental policy in the decreasing trend in deforestation on the Cerrado Maranhão? By applying an advanced technique and an alternative dataset and method, this study aims to be able to answer these questions.
%Monthly data
%New technique applied to deforestation
\section{Material and Methods}
\subsection{Remote Sensing} %mudar o titutlo
%Anthropic activities combined with natural events are significantly altering the Earth's land cover causing global climatic changes. When considering vegetation mapping for forest cover loss and forest regrowth path, traditional methods such as field surveys are time consuming, date lagged and generally too expensive. Over the past four decades, a feasible alternative considered by researchers and specialists is to apply remote sense technology and subsequent image analysis.
The use of satellite time series along with statistical analysis can be helpful in understanding the characteristics of vegetation dynamics. More precisely, since vegetation has a unique spectral feature (e.g., reflectance) it is possible to identify its unique characteristics from an optical remote sensor on a satellite. In such vegetation mapping, incorporating the spectral radiances in the red and near-infra-red regions into the spectral vegetation indices (VI) gives the possibility to estimate forage quantity and quality of grass prairie, for example \citep{xie_sha_yu_2008}.
Earlier studies coarse spatial resolution data from the Advanced Very High Resolution Radiometer (AVHRR) was used to mainly monitor land cover changes at regional and global scales, however, since 2000, the availability of Moderate Resolution Imaging Spectroradiometer (MODIS) data with superior features relative to AVHRR has provided an improved basis for regional and global mapping \citep{huang_2014}.
%For instance, in 1999, a group of researchers around the world created the Global Land Cover 2000 (GLC2000) project extracting data from 1-km SPOT4-VEGETATION imagery.\footnote{For the dataset, see http://www-gvm.jrc.it/glc2000/} After two years, US-NASA released the database of global MODIS land cover based on monthly composites from MODIS sensor for the period between January and December 2001 \citep{xie_sha_yu_2008}.\footnote{For the dataset, see http://duckwater.bu.edu/lc/mod12q1.htlm}
%In general, the selection of images acquired by adequate sensors is largely determined by the mapping objective and accuracy, the cost of images, the climate conditions, such as a cloud-free image, and the technical issues that arise from interpretation and suitability.
%Images with low resolutions may be adopted only when the high level of vegetation classes are to be identified, while the images with relatively higher resolutions are used for fine-detailed classifications of vegetation. Also, from the mapping scale point of view, vegetation mapping at a small scale usually requires high-resolution images, while low-resolution images are used for a large-scale mapping. In the field of vegetation mapping, the most commonly applied sensors in decreasing spatial resolution order include NOAA–AVHRR, MODIS, Landsat (mainly TM and ETM+), SPOT, IKONOS and QuickBird \citep{xie_sha_yu_2008}.\footnote{A detailed summary of satellites, sensors and databases relevant to vegetation can be found in \citet{horning_2010}.}
\subsubsection{MODIS}
%Falar do satelite MODIS e dos seus produtos
The MODIS sensor is flown on two spacecrafts. The Terra satellite is on an AM overpass, whereas the Aqua platform provides complementary observations in the afternoon. The instrument on-board NASA’s Terra satellite is a scanning radiometer system with 36 spectral bands, extending from the visible to the thermal infrared wave-lengths, and has a viewing swath width of 2330km by 10km. The Terra orbital configuration and MODIS viewing geometry produce full global coverage every one to two days, except for the equatorial zone where the repeat frequency is approximately 1.2 days \citep{zhan_2002, setiawan_2014}. The high temporal resolution of MODIS is a determining factor in phenological studies and spectral discrimination, and can be used to obtain detailed knowledge about the seasonal cycles of vegetation in biomes with strong seasonal contrast, such as the Cerrado biome and Ecotone forest.
%The first seven bands are designed primarily for remote sensing of the land surface with spatial resolutions of 250m (Bands 1-2), 500m (Bands 3-7), and 1000m (Bands 8-36). Note that while the bands are commonly referred to as 250m and 500m, the actual resolution of the grids is 236 and 472m at the equator.
%In addition, MODIS data is in a ready-to-use, atmospheric corrected, cloudless, and geo-referenced format.
Of the many data products derived from MODIS observations, we use two extensively used here: MCD12Q1 and MOD13Q1. The MODIS Land Cover Type Product (MCD12Q1) provides 13 science data sets (SDSs) that map global land cover at 500m spatial resolution at annual time steps for six different land cover legends from 2001-2016. In contrast, the MCD12Q1 product is created using supervised classification of MODIS reflectance data and includes 5 legacy classification schemes such as the University of Maryland classification (UMD), which recognises 17 classes, covering natural vegetation (11 classes), mosaic lands (2 classes), and non-vegetated lands (4 classes). A complete list of the classes and their definitions is given in Table \ref{UMD2} \citep{setiawan_2014, friedl_2018}.
%COLOCAR A TABLE 4 DO USER GUIDE MCD12Q1
The MODIS Vegetation Indices (VI) (MOD13Q1) product consists of time series comparisons of global vegetation conditions that can be used to monitor the Earth's terrestrial change detection. The two vegetation indices thatr we derive from these are the Normalized Difference Vegetation Index (NDVI) and the Enhanced Vegetation Index (EVI). The NDVI is a normalized transformation of the NIR (Near Infrared) to the red reflectance ratio standardized to range from -1 to 1. \citet{ratana_huete_ferreira_2005} notes that this index is sufficiently stable to permit meaningful comparisons of seasonal, inter-annual, and long-term variations of vegetation structure, phenology, and biophysical parameters:
%Gridded vegetation index maps depicting spatial and temporal variations in vegetation activity are derived at 16-day and monthly intervals in support of accurate seasonal and inter-annual monitoring of the Earth’s terrestrial vegetation \citep{didan_munoz_2015}.
%COLOCAR AQUI A EQUACAO DO NDVI
\begin{center}
\begin{equation}
NDVI = \frac{\rho_{NIR} - \rho_{red}}{\rho_{NIR} + \rho_{red}} \label{eq:1.2}
\end{equation}
\end{center}
where $\rho_{red}$ and $\rho_{NIR}$ are the surface bidirectional reflectance factors for MODIS bands 1 (620-670nm) and 2 (841-876nm).
To optimise the vegetation signal and minimise atmospheric effect and soil background noise, the EVI index has been reported to be more responsive to canopy structural variations including canopy type. The EVI formula is written as:
%COLOCAR AQUI A EQUACAO DO EVI
\begin{center}
\begin{equation}
EVI = \frac{\rho_{NIR} - \rho_{red}}{\rho_{NIR} + C_{1\rho_{red}} - C_{2\rho_{blue}} + L} (G) \label{eq:2.2}
\end{equation}
\end{center}
where $\rho_{red}$ and $\rho_{NIR}$ and $\rho_{blue}$ are the reflectance in MODIS bands 1,2 and 3 (459-479nm) and, C$_{1}$ and C$_{2}$ are the atmospheric resistance coefficients. L and G are the canopy background adjustment and the gain factor, respectively. The coefficients adopted for the MODIS EVI algorithm are, L=1, C$_{1}$ =6, C$_{2}$ =7.5 and G=2.5. The Enhanced Vegetation Index differs from NDVI by attempting to correct for atmospheric and background effects. In addition, EVI is superior in discriminating subtle differences in areas of high vegetation density than NDVI because the latter tends to saturate \citep{didan_munoz_2015, ratana_huete_ferreira_2005}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Dar exemplos de pesquisas na area de desmatamento com MODIS VI products, explicar que sao bons indices para analisar o cerrado, dando embasamento. Ver RATANA (Ferreira e Huete, 2004). e BAYMAN VER NO CADERNO VERMELHO NOTAS BIBLIOGRAFICAS!!!! FALA DE DIVERSOS PAPERS COM USO DE MODIS!!!!
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Study area characterization} \label{studycarac} %mudar o titutlo
%Caracterizacao da area de estudo
We compare deforestation trends within the vincinity of both sides of artificial border of the Legal Amazon. To this end we experiment with three bandwidths of 25km, 50km and 100km in both east and west direction from the line giving a total of 6 sampled areas. The buffer zone is characterised by intense presence of Ecotone Forest and covers the east region (MA, hereafter) and west centre region (LM, hereafter) of the Maranhão state. As can be seen in Figure \ref{fig:buffer} the study area comprises more than one third of the State which represents our 100km buffer to east and west of the territory.
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth, inner]{MaranhaoChapter2_Fig2.png}
\caption[Maranhão state and 100km buffer departing from the Legal Amazon line to the east and west portion of the territory]{Maranhão state and 100km buffer departing from the Legal Amazon line to the east and west portion of the territory. Source: \citep{MMMAwebsite,nugeo_2018,embrapa_2018}.}
\label{fig:buffer}
\end{figure}
%In climatic terms, the region presents large oscillation from north to south, predominating the tropical climate of the equatorial zone. In the LM region, the hot and humid tropical climate (As) predominates, typical of the Amazon region. The eastern region is marked by warm and tropical wet-dry or savanna climate (Aw). Temperatures are high, with annual averages higher than 25ºC, and towards the southeast of the studied region, it reaches 28ºC.
%CITAR GRAHBER_2015.
%outras variaveis ambientais: chuva, temp,
%The dry period, which occurs from July to November, with a lower incidence of rain around the month of August, registers averages of the order of 17.1mm. The average annual precipitation varies from 1,500 mm in the western portion (LM), reaching 1,800 mm near the coast, while in the MA area precipitation is lower with 1,700 mm peaks in the plateaus. There are even smaller records in the MA region, which can reach 1,000 mm.
% relevo
%The relief of Maranhão is characterised by low planing surfaces in the midst of extensive fluvial plains, much of it is enclosed by Paleozoic and Mesozoic rocks of the Parnaíba Sedimentary Basin.
The Study area is characterised by the occurrence of a rainfall regime with two well defined seasons. The rainy season, which is concentrated from December to June, reaching the highest peaks of rain in the month of March. The sample region presents itself as a large sloping platform in the south-north direction, with a low dip to the Atlantic Ocean. The relief is classified into two large units: plains, which are subdivided into smaller units, and plateaus. Plains are considered to be surfaces with dimensions of less than 200 meters. The plateaus are areas with heights above 200 meters, restricted to the south-central areas of the studied region \citep{feitosa_2006, bolfe_2013}. Information on the geology of the ecotonic region were extracted from the official map published in 2011 by the Brazilian Institute of Geography and Statistics (IBGE) on the scale 1: 1,400,000.
%Geologia
%The Oxisols are predominant in Maranhão. They are consist of stable, highly weathered, tropical mineral soils with highly oxidized subsurface horizons. In spite of the low natural fertility, these soils present physical conditions favourable to the agricultural crop, mainly the mechanized, because they are deep soils, well drained and they normally occur in flat relief or gently mountainous.
% MATERIAL SOBRE EXPOSICAO SOLAR DO MARANHAO
Solar radiation on the terrestrial surface has direct implications for local meteorology, especially in the studies on climate variability, interfering in satellite image analysis \citep{cohen_2002, dasilva_2004, pereira_2017}. The study region here is privileged in its energetic potential, since it is located completely in the region bounded by the Tropics of Cancer and Capricorn, and close to the Equator, a condition that favors high rates of solar irradiation. The State of Maranhão presents an average annual global irradiation value of approximately 5.0 kWh / m2 \citep{pereira_2017}. In the ecological tension zone, i.e. studied area, the municipalities of Caxias (5.4 kWh / m2) (MA), Chapadinha (5.3 kWh / m2) (MA), Bacabal (4.9 kWh / m2)(LM) and São Luís (4.9 kWh / m2) (LM) are distinct for having the highest solar irradiation rates.
%COLOCAR MAPA IGUAL AO DE GRAHBER_2015 LOGO APOS OS FATORES CLIMATICOS
%Aspectos vegetacionais: quais classificacoes vegetacionais?
%Also in this region, the transitioning vegetation is characterised by the contact of Savana / ombrophilous forest (SO), Savana / Seasonal Forest (SN), and Savana / Seasonal Savana besides the presence of secondary vegetation within the domain \citep{bolfe_2013}.
%outras variaveis ambientais: hidrologia e hidrovia
\subsection{Data preparation} %mudar o titutlo
%Paragrafo com softwares usados, packages e livros guiados
Handling and preparing spatial data requires specific softwares. I used ArcMap 10.4.1, ArcPy 10.4.1, and the extensions Geostatistical Analyst, Spatial Analyst and Spatial Statistics from ArcToolbox \citep{esri_2016,arcpy_2016}, and MATLAB R2017a and its Statistics and Machine Learning and Image Processing Toolbox \citep{matlab_2017}. For statistical analysis and modelling I worked with R \citep{R_2018} and several packages specially 'MASS'\citep{MASS_2002}, 'mgcv' \citep{Wood_2003, Wood_2004, Wood_2011, Wood_2017} and 'gratia' \citep{Gavin_2018}.
\subsubsection{Vegetation Indices} %PRECISO COLOCAR MAPA COM A LOCALIZACAO DA REGIAO DE ESTUDO TIPO AQUELE TILE DO MUNDO!!!!!!
Two remotely sensed datasets were used – Vegetation Indices 16-Day L3 Global 250m MODIS13Q1 and Land Cover Type Yearly L3 Global 500m MODIS12Q1. These products were retrieved from the online Application for Extracting and Exploring Analysis Ready Samples (AppEEARS) tool courtesy of the NASA EOSDIS Land Processes Distributed Active Archive Center (LP DAAC), USGS/Earth Resources Observation and Science (EROS) Center, Sioux Falls, South Dakota, \citep{didan_2015,didan_munoz_2015,sulla_2015,sulla2_2018}.
In the AppEEARS tool it is possible to define the region of interest by uploading a polygon file in shapefile format and the output file format is in Georeferenced Tagged Image File Format (GeoTIFF). When selecting GeoTIFF, one GeoTIFF will be created for each feature in the input polygon file for each layer by observation. After defining the area of interest, the tool uploads the input polygon and reproject the input file to the source projection for each data product using the Geospatial Data Abstraction Library (GDAL) (‘gdalwarp’ function and the PROJ.4 definition) for each data collection \citep{usgs_2018}. In this manner the MODIS images from MODQ131 and MCD12Q1 products were acquired in GeoTIFF format and the projection chosen was the geographic datum WGS84 – EPSG:4326. Two shapefile with the same coordinate system were used to extract the location LM and MA. LM (Legal Maranhão) refers to the area under the surveillance program to the west of the Legal Amazon line and, the MA (Cerrado Maranhão) refers to the area comprising the east portion of the buffer zone (see Figure \ref{fig:buffer}).
Next, a bounding box for each feature in the MODIS file was determined using the minimum and maximum latitude and longitude values, with a one pixel buffer applied to each corner. For each feature, the tool determines which spatial tiles intersect with the bounding box, and the tiles are then extracted from OPeNDAP \citep{ cornillon_2003} and mosaiced into a single image. The process is repeated and the MODIS images are ultimately configured into a time series image stack for each feature in the file. Reprojection is performed using nearest neighbour resampling technique and the latitude and longitude of the sample region are maintained in the conversion. Nearest neighbour resampling was selected to ensure that categorical data sets including quality data layers are able to be transformed \citep{usgs_2018}.
A total of 776 images of the Vegetation Indices product MOD13Q1 were downloaded, from February 2000 to December 2016, and 16 images of the product Land Cover MCD12Q1 for the years 2001 to 2016. Also from the product bands MOD13Q1 the composite band day of the year was obtained, which provides the date (day of the year: 1 to 366) of acquisition of each pixel that composes the image; the band pixel reliability summary quality assurance - QA, which provides a summary of the quality of the pixels; and VI Quality detailed - QA band, which provides detailed pixel quality information. The product bands MCD12Q1 was downloaded the Land Cover type quality check – QC along with five different types of land cover data set.
\subsubsection{Climatic variables} %PRECISO COLOCAR UMA TABELA COM A LOCALIZACAO LAT LONG DA ESTACAO , O NUMERO DA ESTACAO, ALTITUDE E SITUACAO
%Paragrafo com explicacao da aquisicao dos dados INPE colocando de onde peguei e citando as fontes de cada dado pego
The climatic data were obtained from the Meteorological Database for Teaching and Research of the National Meteorological Institute (BDMEP – INMET in Portuguese), which stores historical series of several conventional meteorological stations of the INMET station network. The access is through registration but is freely available for academic purposes \citep{bdmep_2018}. Each conventional weather station (see Table \ref{estacoesconvecionais} for the 9 stations in the sample area is composed of several isolated sensors that continuously record the meteorological parameters (e.g., temperature, precipitation, humidity, and solar radiation), which are annotated by an observer that sends the measurements to a collection center. In the historical series the maximum temperature is taken at 00 Universal Time Coordinated (UTC) of the day and the minimum temperature is collected at 12 UTC of the day. Precipitation is calculated by accumulating the last 24 hours collected at 12 UTC and solar radiation equals the number of hours the sun shines directly onto the surface as long as it is not blocked by clouds or any other obstacles. The relative humidity is obtained by the readings of the wet bulb temperature and dry bulb temperature at 12.00, 18.00, and 24.00 UTC \citep{vianello_2011}. We use the monthly average maximum temperature (MAMxT), monthly average compensated temperature (MACT), monthly average minimum temperature (MAMT), monthly average precipitation (MAP), monthly average relative humidity (MARH) and number of hours of sunlight in a month as total solar radiation (TS) from February 2000 to December 2016.
\begin{table}[H]
\footnotesize
\caption{INMET Metereological Stations}
\begin{tabularx}{\linewidth}{H H H H H H}
\hline
\hline
Station Number (ID) & Latitude & Longitude & Altitude & Name & \centering\arraybackslash Area\\
\hline
82571 & -5.5 & -45.23 & 153 & BARRA DO CORDA & LM \\
82970 & -9.5 & -46.2 & 285 & ALTO PARNAIBA & LM \\
82460 & -4.21 & -44.76 & 25 & BACABAL & LM \\
82765 & -7.33 & -47.46 & 193 & CAROLINA & LM \\
82376 & -3.26 & -45.65 & 45 & ZE DOCA & LM \\
82476 & -4.86 & -43.35 & 104 & CAXIAS & MA \\
82382 & -3.73 & -43.35 & 104 & CHAPADINHA & MA \\
82676 & -6.03 & -44.25 & 180 & COLINAS & MA \\
82280 & -2.53 & -44.21 & 51 & SAO LUIS & MA \\
\hline
\hline
\multicolumn{6}{l}{\footnotesize Note: Source: \cite{bdmep_2018,inmet_2018}.}
\end{tabularx}
\label{estacoesconvecionais}
\end{table}
\subsubsection{Cross Validation}
Cross validation is a necessary approach when dealing with remote sensing data. MODIS Vegetative Cover Conversion (MOD44A) was acquired from the Global Land Cover Facility - University of Maryland \citep{glcf_2018} to conduct a validation process regarding the response variables (NDVI and EVI). The VCC product is used as an indicator of change and not as a means to measure change. It is available for vegetation burning and anthropogenic deforestation types of land cover conversion. In this sense, using this product as an indicator of accuracy is useful and reliable as states \citet{defries_2002}.
As part of the validation process, we used a finer resolution data set to check the accuracy of the algorithm applied to the MOD13Q product. \citet{Hansen_2013} provided results from time-series analysis of Landsat images in characterising the global forest extent and change from 2000 through 2017. The scenes utilised for the analysis contained forest losses during the period 2000–2016, defined as a stand-replacement disturbance, or a change from a forest to non-forest state. Encoded as either 0 (no loss) or else a value in the range 1–16, representing loss detected primarily for the years 2001–2016 \citep{gfc_2017}.\footnote{\begin{flushleft}Data Source: Hansen/UMD/Google/USGS/NASA. Data available on-line from: http://earthenginepartners.appspot.com/science-2013-global-forest. \end{flushleft}} Moreover, in 2018 Brazil's Spatial Research Institute (INPE), in accordance with the Brazil's Envinromental Ministry, published a data set covering the forest loss in the Cerrado Biome. This data consists of bi-annually images from 2000 to 2012 and yearly images from 2012 to 2016. Several sensors were used to create the composite data set, such as TM/Landsat5, ETM+/Landsat7, OLI/Landsat8, and LISS-III/IRS2. At a finer resolution, this product is a justified comparison between national and international land change products proving to be an acceptable validation procedure \citep{brito_2018}.
\subsection{Data Exploration and Interpretation}
After the selection of the variables there is a refinement process which configures the most important part of the research in order to achieve usable answers for statistical modelling. In this session, we will describes the steps taken to conduct the statistical analysis and the statistical method applied to the study.
\subsubsection{Response Variable} %mudar o titutlo
% Paragrafo explicando como se deu a aquisicao dos pixels e tecnicas utilizadas interpretando por meio de figuras (colocar 2).
In order to perform the analysis, NDVI and EVI images were imported onto MATLAB and scaled to the valid range of -0.2 to 1. Two images per month for each year were uploaded - excluding October and November during leap years because they had only one image within the month. The VI Quality detailed - QA band for each scene was converted to unsigned16bit according to the VI User Guide \citep{didan_munoz_2015} and it was used to create a Goodness mask to exclude pixels with clouds and not produced due to other reason than clouds.
Before filtering the NDVI and EVI scenes with the VI mask, it was necessary to condition NDVI and EVI values to a specific threshold so to avoid values not related to forest. The criterion was taken following \citet{geerken_2009} and \citet{bayma_sano_2015} parameters to characterise forest in a transitional biome. Next, the VI indices are filtered and only values retained that have good quality according to the Goodness mask under the elimination of cloud coverage.
With the Land Cover product MCD12Q1 it was required to resize and interpolate each image to the corresponding sizes of NDVI and EVI images. The interpolation method utilised followed a deterministic method called Nearest Neighbourhood (NN) or Thiessen method. The nearest method was considered because there is no extrapolation of the data, which would not have been suitable for categorical data and because it showed to be the fastest computation method with modest memory requirements \citep{ SLUITER_2009, matlab_2017}. After the interpolation, a land cover mask was produced to select only pixels in the images presenting forest classification. Following \citet{sulla2_2018}, the University of Maryland classification which corresponded to Land Cover Type 2 in the MCD12Q1 product was selected. The mask included different types of forests with at least 40\% of tree cover and canopy higher than 2m (see table \ref{UMD2} detailing each class definition). Forests presenting less than 40\% of tree cover were excluded because this does not characterise a transitioning forest being predominantly assigned to Cerrado biome only \citep{bayma_sano_2015}.
\begin{table}[H]
\footnotesize
\caption{University of Maryland (UMD) legend and class definitions}
\begin{tabularx}{\textwidth}{l c X}
\hline
\hline
Name & Class & \centering\arraybackslash Description\\
\hline
Water & 0 & At least 60\% of area is covered by permanent water bodies \\
Evergreen Needleleaf forest & 1 & Needleleaf Forests 1 Dominated by evergreen conifer trees (canopy >2m). Tree cover >60\%. \\
Evergreen Broadleaf forest & 2 & Dominated by evergreen broadleaf and palmate trees (canopy >2m). Tree cover >60\%. \\
Deciduous Needleleaf forest & 3 & Dominated by deciduous needleleaf (larch) trees (canopy >2m). Tree cover >60\%. \\
Deciduous Broadleaf forest & 4 & Dominated by deciduous broadleaf trees (canopy >2m). Tree cover >60\%. \\
Mixed forest & 5 & Dominated by neither deciduous nor evergreen (40-60\% of each) tree type (canopy >2m). Tree cover >60\%. \\
Closed shrublands & 6 & Dominated by woody perennials (1-2m height) >60\% cover. \\
Open shrublands & 7 & Dominated by woody perennials (1-2m height) 10-60\% cover. \\
Woody savannas & 8 & Tree cover 30-60\% (canopy >2m). \\
Savannas & 9 & Tree cover 10-30\% (canopy >2m). \\
Grasslands & 10 & Dominated by herbaceous annuals (<2m). \\
Permanent Wetlands & 11 & Permanently inundated lands with 30-60\% water cover and >10\% vegetated cover. \\
Croplands & 12 & At least 60\% of area is cultivated cropland. \\
Urban and built-up & 13 & At least 30\% impervious surface area including building materials, asphalt, and vehicles. \\
Cropland/Natural Vegetation Mosaics & 14 & Mosaics of small-scale cultivation 40-60\% with natural tree, shrub, or herbaceous vegetation \\
Non-Vegetated Land & 15 & At least 60\% of area is non-vegetated barren (sand, rock, soil) or permanent snow and ice with less than 10\% vegetation. \\
Unclassified & 255 & Has not received a map label because of missing inputs \\
\hline
\hline
\multicolumn{3}{l}{\footnotesize Note: Source: \cite{sulla2_2018}.}
\end{tabularx}
\label{UMD2}
\end{table}
Finally, to obtain a certain variation within each month, values of NDVI and EVI of the first period, with 15 first days of the month, were compared to the second period, with 30 days of the month. The assumption considered is explained in Table \ref{assumption2}. In this sense, the final scene/image would present pixels assuming the highest quality and no cloud coverage. At the end of the process, pixels were selected within the bandwidths of 100km, 50km, and 25km - measured departing from the artificial Legal Amazon line to the west and east portion of the State. When the pixel had a variation greater than 0.1 within a month, it was considered a disturbance.
\begin{table}[H]
\footnotesize
\caption{Algorithm Assumption for NDVI and EVI values}
\begin{tabularx}{\linewidth}{X X X}
\hline
\hline
$NDVI_{1} > NDVI_{2}$ & -> $NDVI_{1} - NDVI_{2}$ & Numbers (1) and (2) refer to the order of the period of the month \\
$NDVI_{1} <= NDVI_{2}$ & -> $NDVI_{1} = NDVI_{2}$ & Numbers (1) and (2) refer to the order of the period of the month. The second equation assumes that values did not change within the month and then the value assigned is from the last observation \\
\hline
$EVI_{1} > EVI_{2}$ & -> $EVI_{1} - EVI_{2}$ & Numbers (1) and (2) refer to the order of the period of the month \\
$EVI_{1} <= EVI_{2}$ & -> $EVI_{1} = EVI_{2}$ & Numbers (1) and (2) refer to the order of the period of the month. The second equation assumes that values did not change within the month and then the value assigned is from the last observation \\
\hline
\hline
\end{tabularx}
\label{assumption2}
\end{table}
The approach above was undertaken for all the images corresponding to NDVI and EVI values for each month of each year, giving 406 final image results. For the leap year, the process stopped at the land cover mask filtering process. To compose a panel for each monthly period over 17 years, we took the sum of pixels signalled as disturbed for each of 406 images.
\subsubsection{Covariates Variables} \label{covariate variables} %mudar o titutlo
The climatic variables needed a more complex processing system since the data from weather stations were sparse. First, all the data was in a tabular format, i.e. all variables in one table, and geographic locations in the form of latitude and longitude coordinates and z-coordinates, such as elevation values, was created for each weather station using ArcMap. After localising the x,y,z coordinates, a shapefile for each station was created. Then the shapefiles were selected and extracted by year, using ArcPy environment.
Next, an interpolation method was used to deal with areas with no data available. The method chosen was ordinary (point) Kriging which has been argued as the best interpolation technique available for sparse data \citep{SLUITER_2009}. Ordinary kriging is part of the probabilistic methods in which the concept of randomness is incorporated into the analysis. This method is the basic form of Kriging, where the prediction relies on a linear combination of the measured values and the spatial correlation between the data, determining the weights. As the mean is unknown we assume that intrinsic stationary exists in the data. This assumption may fail for our data set since this type of data are usually not stationary. To overcome this issue we used different sizes and shapes of neighbourhood to adjust the kringing ordinary model \citep{SLUITER_2009}.
After the interpolation, the images were converted to raster, resampled to the size of the response variable, and exported to MATLAB environment. Generally, for temperature, precipitation, and other climate data, the best way to interpret and study these phenomenons is using anomaly measurements which corresponds to the difference between measurement and mean. In this sense, the average value of the variable of each image for each month and each year was computed, giving a total of 408 images analysed for both regions (MA and LM), and for each climatic variable, a total of 2,448 images. Following this procedure, the number of pixels with values higher and lower than the average value of the variable was extracted to a table. At the end, the table contained above and below values compared to the mean for each variable translating into 12 variables. A summary of the response and explanatory variables of this study is presented at Table \ref{tab:summarychapter2}.
\begin{table}[H]
\footnotesize
\caption{Summary Statistics - Response and Covariate Variables}
\begin{tabularx}{\linewidth}{X H H H H}
\hline
\hline
Variable & Mean & St. Dev. & Min. & \centering\arraybackslash Max.\\
\hline
NDVI 25Km & 626 & 731 & 0 & 4803 \\
NDVI 50Km & 1279 & 1415 & 0 & 9185 \\
NDVI 100Km & 2087 & 2175 & 0 & 12660 \\
EVI 25Km & 8277 & 11353 & 0 & 73734 \\
EVI 50Km & 2554 & 3861 & 0 & 25250 \\
EVI 100Km & 8277 & 11353 & 0 & 73734 \\
Below Min Temp 25km & 83711 & 57037 & 0 & 218320 \\
Below Min Temp 50km & 159092 & 112926 & 0 & 436290 \\
Below Min Temp 100km & 250820 & 171007 & 0 & 633370 \\
Above Max Temp 25km & 105094 & 69290 & 0 & 218320 \\
Above Max Temp 50km & 201308 & 134299 & 0 & 436300 \\
Above Max Temp 100km & 287792 & 190863 & 0 & 633370 \\
Below Sunlight 25km & 79678 & 47638 & 0 & 169150 \\
Below Sunlight 50km & 167845 & 98578 & 0 & 434760 \\
Below Sunlight 100km & 261061 & 149782 & 0 & 633370 \\
Below Humidity 25km & 98957 & 50230 & 0 & 213400 \\
Below Humidity 50km & 192560 & 97230 & 0 & 324620 \\
Below Humidity 100km & 289185 & 140379 & 0 & 633370 \\
Above Precipitation 25km & 90697 & 37384 & 0 & 191170 \\
Above Precipitation 50km & 186745 & 77623 & 0 & 411370 \\
Above Precipitation 100km & 292985 & 118665 & 0 & 633370 \\
\hline
\hline
\multicolumn{5}{l}{\footnotesize Note: Statistics refer to N=204 observations for 17 years (2000 - 2016). The below and above nomenclature}\\
\multicolumn{5}{l}{\footnotesize refers to the mean of each variable.}
\end{tabularx}
\label{tab:summarychapter2}
\end{table}
\subsubsection{Modelling deforestation trends} %mudar o titutlo
%Descrever modelos passados de desmatamento
Many recent studies of land cover changes focus explicitly on taking account of the trends and changes in the rates of environmental transformation in terms of their driving forces. More precisely, these studies try to identify the major causes of land-cover change within different geographical and historical contexts \citep{GEIST}. To this end proximate and underlying causes of deforestation models are concerned by the fact that some causes are direct in the sense that their occurrence or variation generates more or less deforestation through simple channels, while other causes are indirect in that they impact on the sources of deforestation through more complex channels \citep{MOTEL}. In this regard, the physical environment strongly influences where agents deforest, where many studies provide evidence that forests in drier, flatter, higher-fertility areas, with adequate drainage and thus more suitable for agriculture are more likely to be cleared \citep{ANGELSEN4}. In contrast, poor soil quality is also reported to lead to relatively high deforestation, since scant soil endowment fuels accelerated clearing for other activities, such as pasture \citep{GEIST,COSTA}.
Environmental factors and biophysical drivers are also increasingly being recognised as not only playing a role but being fundamental in deforestation \citep{GEIST}. To cite, \citet{BARNI} showed that, independent of the rate and magnitude of deforested areas, the areas affected by forest fires were dependent on the forest type and climate factors. Zones with ecotone influence tended to be deforested more than zones without ecotone influence, i.e., the more dense a forest is the less deforested will be. In addition, the largest occurrence of forest fires was observed in the zones with ecotone influence in years with \textit{El Nino} events, such as Maranhão state. The analysis also indicated that the areas most affected by forest fires during the studied period were associated with strong climatic events and the occurrence of these fires was amplified in the zones with ecotone influence \citep{BARNI}. These facts strongly suggest that it is pertinent to control for climatic aspects in ecotone zones when studying trends in deforestation.\footnote{In this study, ecological tension zone, ecotone zones and transitional forest have the same meaning. }
%According to \citet*{MOTEL}, there are at least four models to study the impact of deforestation and its trends.
%The first model included variables related to governance and institutional improvements as flatters for reduce the level of deforestation. The Environmental Kuznets Curve (EKC) model generally view better governance as an efficient means to achieve lower deforestation rates or environmental degradation while pursuing economic development.
%Considering studies that describe the \textit{ex-ante} and \textit{ex-post} deforestation occurrence, the second model takes into account reforestation activities, which corresponds to the phenomenon occurring in many developed countries. Rather than considering overall deforestation, \citet*{MOTEL} explained that a model of Compensated Successful Efforts (CSE) would be appropriate to developing countries in order to fulfil the requirements established by the Reduced Emissions from Deforestation and Degradation mechanism.
%In this context, many studies \citep{FENGER,DONG,LAMBIN1,FARGIONE,BARONA,ALDRICH,RICHARDS,KEENEY} concentrates in understanding causes of deforestation and land use changes to curb deforestation. In these studies, most commonly, a combination of proximate and underlying causes have been identified as the main drivers of deforestation.
\textbf{Statistical Modelling}
%The most common approach used to model deforestation would be a linear model with a single or multiple terms. However, things are rarely this simple, and the model might interact in a complex way. The four approaches presented earlier consider the linear model as the first premise of the analysis but they are challenging when dealing with non-linear relationship found between variables \citep{griffin_2012}. In environmental analysis the data are seldom modelled adequately by linear regression models.
%Introduzir o conceito de GAM
For most ecological and climatic data sets at least some of the assumptions underlying a linear regression model are unlikely to be valid \citep{zuur_2011}. To address this issue we here employ a generalized additive model (GAM). A literature review shows that GAMs are not extensively applied to deforestation models. \citet{COHEN_2008} showed that social factors appear to play a critical role that may ultimately determine disease risk when evaluated with environmental and climatic factors. They modelled incidence rates of a disease in Cuba as the response variable and deforestation as one of the exploratory variables. \citet{MENDES_2012} observed the relationship of deforestation, corruption and economic growth in the region of Legal Amazon, in Brazil and found no statistical evidence for the existence of a Kuznets curve. \citet{GREEN_2013} used a binomial GAM model to account for forest and habitat losses in protected areas on the Eastern Arc Mountains of Tanzania. More recently, \citet{BEBBER_2017} studied the impact of protected areas on global carbon emissions in America, Africa and Asia. They used splines regressions in GAMs and suggested that tropical PAs overall reduced deforestation carbon emissions by 4.88 Pg, or around 29\%, between 2000 and 2012.
A GAM is a generalized linear model with a linear predictor involving a sum of smooth functions of covariates. Mathematically, GAM is an additive modelling technique where the impact of the predicted variables is captured through smooth functions \citep{larsen_2015}. In general, the model has a structure defined as:
%\begin{center}
\begin{equation} \label{eq:3.2}
g(\mu_{i}) = A_{i} \theta + f_{1}(x_{1i}) + f_{2}(x_{2i}) + f_{3}(x_{3i},x_{4i}) + \dots
\end{equation}
%\end{center}
where $\mu{i} \equiv \displaystyle \E(Y_{i})$ and $Y_{i} \sim EF(\mu_{i},\phi)$. $Y_{i}$ is a response variable, $EF(\mu_{i},\phi)$ denotes an exponential family distribution with mean $\mu{i}$ and scale parameter, $\phi$, $ A_{i}$ is a row of the model matrix for any strictly parametric model components, $\theta$ is the corresponding parameter vector, the $f_{j}$ are smooth functions of the covariates, $x_{k}$, and ${i}$ refers to the unit of analysis \citep{Wood_2017}. This model allows for flexible specification of the dependence of the response on the covariates because the smooth functions are nonparametric. The smooth function $f_{j}$ is represented by basis expansions for each smooth, each with an associated penalty function controlling smoothness. According to \citet{Wood_2004, Wood_2017}, the estimation can be carried out by penalised regression methods, and the appropriate degree of smoothness for $f_{j}$ can be estimated from data using cross validation or marginal likelihood maximisation.
The smooth function $f$ is composed of the sum of basis functions b and their corresponding regression coefficients $\beta$, written in the form of:
%\begin{center}
\begin{equation} \label{eq:4.2}
f(x) = \sum\limits_{j=1}^k b_{j}(x) \beta_{j}, \\
\end{equation}
%\end{center}
where k is the basis dimension (\citet{Wood_2017}, p.162).
%In this way, it is possible to estimate $f$ in such a way that becomes a linear model. The smooth functions are also called splines or penalised splines. Splines are real functions that are defined by multiple sub-functions, where the polynomial pieces connect are called knots. The family of splines includes cubic splines, which fits a third-order polynomial on parts of the data and ensures that at the knots, the connections are smooth \citep{zuur_saveliev_ieno_2014}. It is important to observe that at the ends of the data, there will be discontinuity in the value taken by the spline. When the variable of interest behaves cyclically, it is necessary a correction procedure to these splines. The cyclic cubic spline is a potential solution to the problem because its basis has an additional constraint, which forces to no discontinuity at the end points of the spline \citep{Gavin_2018}.
%The choice of degree of model smoothness is essentially arbitrary and finding the optimal number of knots for a regression is obtained by finding the parameters $\beta$ and the smoothers that minimise the criteria in equation \ref{eq:5} \citep{zuur_saveliev_ieno_2014}.
%\begin{center}
%\begin{equation} \label{eq:5}
%\| Y - X\beta \|^{2} + \lambda \int_{}^{} f '' (x)^{2}dx\\
%\end{equation}
%\end{center}
%The second part of equation \ref{eq:5} is a penalty and contains $\lambda$ and an integral over the second-order derivatives, telling how smooth a curve is. A high value of $f''$ means that the smoother f is highly non-linear whereas a straight line has a second derivative equal to 0. In this case, if $\lambda$ is very large, the penalty for having a non-smooth curve is also large and if $\lambda$ is small, then there is a low penalty for non-smoothness \citep{zuur_saveliev_ieno_2014}.
%In other words, when $f$ is very wiggly the penalty will take high values and when $f$ is ‘smooth’ the penalty will be low. If $f$ is a straight line then the penalty is actually zero. So the penalty has a null space of functions that are un-penalised: the straight lines in this case. Choosing the value of $\lambda$ requires further techniques. If $\lambda$ is too high or too low then the data will be over-smoothed or under-smoothed, in both cases this will mean that the estimate $f$ will not be close to the true function $f$. Ideally, it would be good to choose $\lambda$ so that $f$ is as close as possible to $f$ \citep{Wood_2017}. A possible solution might be to choosing $\lambda$ to minimise $\mathit{V_{o}}$ known as the ordinary cross validation using $\hat{f}$ estimation to fit all the data
%\begin{center}
%\begin{equation} \label{eq:6}
%\mathit{V_{o}} = \frac{1}{n} \sum_{i=1}^{n}\frac{(y - \hat{f_{i}})^{2}}{(1 - A_{ii})^{2}}\\
%\end{equation}
%\end{center}
%where \textbf{A} is the corresponding influence matrix, which is just the influence matrix for the model fitted to all the data. In practice, the $A_{ii}$ is replaced by their mean, resulting in the generalized cross validation score (GCV) \citep{Wood_2017}
%\begin{center}
%\begin{equation} \label{eq:7}
%\mathit{V_{g}} = \frac{n \sum_{i=1}^{n}(y_{i} - \hat{f_{i}})^{2}}{{[n - tr (\textbf{A})]^{2}}}\\
%\end{equation}
%\end{center}
%Interpretability
%Flexibility and Automation
%Regularization
It is possible to regularise the smoothness of the predictor functions to prevent overfitting using the generalized cross validation score. Technically, GAM is an additive modelling technique where the impact of the predictive variables is captured through smooth functions, but provides a regularised, automatic and interpretable solution. Considering an additive model, the interpretation of the marginal effects of a single variable does not depend on the values of the other variables in the model. Also, predictor functions are automatically derived during model estimation \citep{larsen_2015}.
%Dar exemplos de contribuicao desta metodologia para o desmatamento e em especial desmatamento na floresta tropical, amazonia e cerrado (floresta de transicao).
\textbf{Autocorrelation} %mudar o titutlo
In principle, when the nature of the data is time series, the timing of one period may depend on the timing of the previous year. This means that one should check for the possibility of autocorrelation and if necessary take into account of such the auto-correlation in the data. In GAMs it is possible to include an ARMA error structure. More precisely, the ARMA model has two parameters defining its order with the number of auto-regressive parameters (p) and the number of moving average parameters (q). Model \ref{eq:3.2} now can be expressed as
%\begin{center}
\begin{equation} \label{eq:8.2}
g(\mu_{i}) = A_{i} \theta + f_{1}(x_{1i}) + f_{2}(x_{2i}) + f_{3}(x_{3i},x_{4i}) + \dots + \varepsilon_{i}
\end{equation}
%\end{center}
where $\varepsilon_{i} = {\phi}\varepsilon_{i-1} + {\phi}\varepsilon_{i-p} + \eta_{i} $ is modelled as a function of the residuals of the p previous time points and white noise, and $\varepsilon_{i} = {\theta}\eta_{i-1} + {\theta}\eta_{i-q} + \eta_{i}$ is modelled as a function of the disturbance term and a past value of this disturbance term \citep{zuur_saveliev_ieno_2014}.
\textbf{Quantifying deforestation}
The generalized additive model (GAM) with an exponential family distribution has been the most widely applied method to measure and quantify the non-linear association between phenology and covariates, such as meteorological conditions, mainly because it allows for non-parametric adjustments of non-linear confounding effects of seasonality and trends; \citep{alkemad_1998,BELL_2015,JOYE_2015,LUSK_2016,SADAT_2016,HALPERIN_2016, SANTOS_2017,TAPIA_2017,LIU_2018,MORENO_2018}. In an attempt to quantify forest disturbance as proxy for deforestation, we apply a GAM with a negative binomial distribution and a logarithmic link function. The negative binomial distribution suitable for this study since the variance of deforestation is much larger than the mean, which is frequent feature of ecological data \citep{zuur_2011}. More precisely, the means of NDVI and EVI are to 0.04\% and 0.01\% of their variance, respectively. Hence, the response variable is negative binomial distributed. The full description is as follows:
\begin{flushleft}
\hspace{4em} $Y_{is}$ $\sim$ NB($\mu{_i}$,k)
\vspace{-0.2em}
\begin{equation}
\hspace{-12em}E(Y_{i}) = \mu_{i}, and \hspace{1em} var(Y_{i}) = \mu_{i} +\frac{\mu^{2}_{i}}{k} \label{eq:9.2}
\vspace{-1em}
\end{equation}
\hspace{4em} log($\mu_{i}$) = $\alpha$ + $f_{j}$($X_{i1})$ $+ \dots +$ $f_{j}(X_{iq})$ \hspace{1em} or \hspace{1em} $\mu{i}$ = $e^{\alpha +f_{j}(X_{i1})+\dots+f_{j}(X_{iq})}$ \\
\end{flushleft}
$Y_{i}$ is the response variable at observation i. The notation $f_{j}$($X_{i1}$) stands for 'smoothing function of the covariate variable X', and \textit{NB} is a negative binomial distribution with mean $\mu_{i}$ and dispersion parameter k. In general, negative binomial distributions are used to model overdispersed count data or Poisson data.
The geometric distribution is a negative binomial with overdispersion parameter, k, set to 1. In this sense, the variance increases as a quadratic function of the mean \citep{zuur_saveliev_ieno_2014}. Correcting the data for overdispersion with the geometric distribution, the model is stated as
\begin{flushleft}
\hspace{4em} $Def_{\scriptscriptstyle (NDVI_{i}, EVI_{i})} = \alpha + f_{\scriptscriptstyle Year}(Year) + f_{\scriptscriptstyle Precip}(aPrecipitation) + f_{\scriptscriptstyle Max Temp}(aMax Temperature)$
\vspace{-0.2em}
\begin{equation}
+ f_{\scriptscriptstyle Min Temp}(bMin Temperature) + f_{\scriptscriptstyle Sunlight}(bSunlight) + f_{\scriptscriptstyle Humidity}(bHumidity) \label{eq:10.2}
\vspace{-1em}
\end{equation}
\end{flushleft}
%Colocar o modelo de desmatamento escolhido explicar cada variavel do modelo, novamente.
where $Def_{\scriptscriptstyle (NDVI_{i}, EVI_{i})}$ is the response variable that can assume NDVI and EVI values for three different bandwidths (25km, 50km, 100km) in each month \textit{i}, and $i=1,\dots,204.$ The remaining variables are the intercept $\alpha$ andthe additive smoothing functions of the explanatory variables Year, and the covariates Precipitation, Humidity, Max and Min temperature and sunlight. \textit{a} and \textit{b} refers to the sum of pixels above and below the mean, respectively.
%Explicar processo de escolha do modelo de desmatamento (forwarding selection, Zuur).
The model selection followed the forwarding approach of \citealp[p.391]{zuur_saveliev_ieno_2014}. The model started with a GAM that used one variable, then fitted 13 different models and a different set of smoothers (penalised splines "ps", cubic splines "cr", and cyclic splines "cc") and compared their Akaike information criterion (AIC). The model with the lowest AIC was elected as the main model and, then it was fitted to 12 different models, each with the addition of the variable with the lowest AIC. The forward selection stopped at the moment the main model had the best AIC value comparing to the remaining models. After the model selection, an autocorrelation test was conducted but none of the models appeared to be autocorrelated. The model, including the splines takes the form of:
\begin{flushleft}
\hspace{1em} $Def_{\scriptscriptstyle (NDVI_{i}, EVI_{i})} = \alpha + f_{\scriptscriptstyle Year}(Year, bs= cc) + f_{\scriptscriptstyle Precip}(aPrecipitation, bs= cr) +$
%\vspace{-0.2em}
\begin{equation}
f_{\scriptscriptstyle Max Temp}(aMax Temperature, bs= cr) + f_{\scriptscriptstyle Min Temp}(bMin Temperature, bs= cc) + \label{eq:11.2}
%\vspace{-1em}
\end{equation}
\hspace{1em} $f_{\scriptscriptstyle Sunlight}(bSunlight, bs=cc) + f_{\scriptscriptstyle Humidity}(bHumidity, bs= ps)$
\end{flushleft}
Due to the fact that his method is relatively recent, it is important to acknowledge that the algorithms available for choosing the optimal smoothing parameter are not yet well developed and can generate misleading results if care is not taken. Furthermore, use of such criteria can often lead to over-fitting and deliver implausible associations. The choice of smoothing parameters for smoothing splines in GAM should therefore always be accompanied by a graphical verification of functional associations with the outcome to verify clinical plausibility \citep{moore_2011}.
\subsubsection{Model Validation} %mudar o titutlo
Validating the results from the algorithm applied to the MOD13Q1 and MCD12Q1 images required features of the machine learning domain. In summary, machine learning algorithms can figure out how to perform important tasks by generalizing from examples. This is often feasible and cost-effective where manual programming is not \citep{Domingos_2012}.
There are different types of machine learning algorithms, where the most mature and widely used one is classification. According to \citet{Domingos_2012}, a classifier is a system that inputs a vector of discrete and/or continuous feature values and outputs a single discrete value, the class. For this study, the filter classifies pixels into deforested or not deforested and its input may be a Boolean vector $x = (x_{1},\dots, x_{j},\dots, x_{d})$ where $x_{j} = 1$ if the \textit{j}th pixel is deforested and $x_{j} = 0$ otherwise. A learner inputs a training set $(x_{i}, y_{i})$, where $x_{i} = (x_{i,1},\dots, x_{i,d})$ is an observed input and $y_{i}$ is the corresponding output, and output is a classifier. The test of the learner is whether this classifier produces the correct output $y_{t}$ for future examples $x_{t}$. A feasible classification validation is the confusion matrix.
%Explicar aqui o metodo usado para fazer a confusion matrix.
The confusion matrix is a two by two table that contains four outcomes produced by a binary classifier. The classification scheme divides the data randomly into a training set, a test set and a validation set. The training method is the Scaled Conjugate Gradient (SCG), which is a supervised learning algorithm for feed-forward neural networks \citep{mor_1993}. In order to optimise the performance, an iterative random sampling approach is applied. The Cross-Entropy approach is based on sampling and updating an underlying distribution function over the set of feasible solutions \citep{HU_2009}. Finally, calculations for the confusion matrix are done based on minimum excluded (MEX) calculations \citep{matlab_2017}.
The four outcomes produced by the confusion matrix are true positive, true negative, false positive, and false negative. The true positives (TP) refer to the number of positives divided by all the positive outcomes and the same applies to true negatives (TN). False positives (FP) indicate the number of pixels assigned as positive but are, in fact, negative, divided by all the positive outcome. In turn, false negatives (FN) follow the same interpretation as false positives.
%Explicar os metodos de validacao para as variaveis Response
To validate the response variable results, it was used the MODIS Vegetative Cover Conversion (VCC) for the available period (2000-2005). The product is further divided in Deforestation product (MOD44A${\_}$C) and Burn product (MOD44A$\_$B) and both were used to compute the validation test.
The method for the deforestation product is derived from the original space partitioning method \citep{zhan_2002} and relies on a decision tree classification algorithm \citep{breiman_1984} to determine antecedent vegetation condition and compares this to current vegetation condition. Change due to burning product is derived using the difference Normalized Burn Ratio (dNBR) methodology from two scenes a year apart, as proposed by \citet{key_2004}. Tests were computed per season (raining season and dry season) and per vegetation index (NDVI and EVI). The confusion matrix showed 100\% true positives and true negatives, which gives high stability to the algorithm created and applied to the NDVI and EVI indices.
%ROBUSTNESS CHECK AND VALIDATION WITH HANSEN AND PRODES
Checking the results with other datasets was an alternative approach taken in this study. A confusion matrix was applied to the \citet{Hansen_2013} and Brazilian (INPE) data sets \citep{brito_2018}. The results showed no difference from the results presented in the VCC validation method. The confusion matrices are provided in the Appendix \ref{appendix2}.
%Explicar os metodos de validacao para as variaveis Explicativas
The covariates variables were validated using cross-validation processes during the interpolation procedure. Cross-validation uses all the data to estimate the trend and autocorrelation models. It removes each data location one at a time and predicts the associated data value. This is also known as leaving-one-out, and can be computed for all or a subset of the data locations \citep{esri_2016}.
In the kriging method, the cross-validation produced other results that helped evaluate the best interpolation method. More specifically, the Average Standard Errors (ASE) and Root Mean Square Standardized Error (RMSE) were computed. If ASE from the model are close to the RMSE then the model is correctly assessing the variability in prediction. If ASE are greater than RMSE then the model is overestimating the variability of the prediction and, finally, if the ASE are less than RMSE, the model is underestimating the variability in predictions. For the covariates analyses, ASE were on average 95\% of the value of the RMSE, proving to be a reasonable interpolation method with valid results.
In terms of statistics, model validation with additive modelling was visual rather than numeric after the model selection phase. The steps taken included plotting the residuals against fitted values to identify violation of homogeneity, and plotting the residuals against each variable in the model and check for patterns. Also, a histogram of the residuals was examined to verify normality.
%Criar um diagrama parecido com o de Griffin, p. 34 para descrever MODEL VALIDATION
\section{Results} %mudar o titutlo
\subsection{Deforestation trend in a ecotone zone of Maranhão} \label{resultssection1.2}
%Explicar response over year
Our baseline model includes 204 monthly observations of NDVI and EVI values changing over the years with five influencing covariates (Precipitation, Max Temperature, Min Temperature, Sunlight and Humidity). The baseline model was applied to three different distance spans (25km, 50km, 100km) considering the Legal Amazon line.\footnote{It is important to acknowledge that the numerical results of the model should not be interpreted in the same manner as the linear regression results. According to \citet{Wood_2011} in \citet{zuur_saveliev_ieno_2014}, p-values close to 0.05 can be around half of their correct value when the null hypothesis is true. This means that smoothers with p-values smaller than 0.001 can be trusted but p-values of 0.02 to 0.05 need to be viewed with caution.}
In general, the deviance explains the models close to the artificial line better. At large distances (100 km), most of the covariates do not have a significant effect, and thus will be omitted from this part of analysis. Table \ref{results1} gives the summary of the results including deviance, AIC, p-value, degrees of freedom, and the estimated value of the function. The best way to understand and interpret GAMs is through visual representation. Considering that the results produced several models, we define the name of these models according to the location status, whether in Cerrado Maranhão (MA) or Legal Maranhão (LM), the bandwidth or distance from the Legal Amazon line, in which are 25km,50km and, 100km, and, finally, regarding each response variable that in our case is related to NDVI (n) and EVI (e) values. We add an indicative variable to indicate raining (r) and dry (d) seasonality. Plotting the smoothing functions, it is possible to check for the path of deforestation through the years and the climatic state during that period. The blue line refers to positive changes in deforestation or increments, and the red line indicates negative changes in deforestation or decreases \citep{Gavin_2018}.
\begin{sidewaystable}
\begin{table}[H]
\footnotesize
\caption[Models Output of GAMs]{Models Output of GAMs}
\begin{tabularx}{0.88\linewidth}{lxxxxxx}
\hline
\hline
Model & AIC & Deviance Explained & AIC & Deviance Explained & AIC & Deviance Explained\\
\hline
Baseline Model & & & Raining Season & & Dry Season & &\\
\hline
ma25n & 4592.974 & 15\% & 3008.928 & 53.8\% & 3029.610 & 52.3\% \\
lm25n & 4753.342 & 30.9\% & 3082.583 & 44\% & 3330.420 & 54.6\% \\
ma25e & 5382.682 & 20.1\% & 3486.908 & 52.1\% & 3533.256 & 50.9\%\\
lm25e & 5450.606 & 36.4\% & 3369.937 & 63.1\% & 3812.126 & 65.6\%\\
ma50n & 5035.296 & 17.3\% & 3402.803 & 60\% & 3390.411 & 42.2\%\\
lm50n & 5010.875 & 24.5\% & 3241.013 & 55.8\% & 3498.748 & 35.1\%\\
ma50e & 6123.032 & 20.5\% & 4154.889 & 62.6\% & 4051.907 & 47\%\\
lm50e & 5921.597 & 29.3\% & 3886.190 & 63.2\%& 4112.314 & 48.5\%\\
ma100n & 5312.836 & 16.6\% & 3723.872 & 62.2\%& 3562.066 & 45.2\%\\
lm100n & 5318.571 & 30.5\% & 3552.311 & 52.7\% & 3701.532 & 47.2\%\\
ma100e & 6097.770 & 23.6\% & 4176.352 & 59.4\% & 4051.806 & 45.3\% \\
lm100e & 5901.040 & 33.4\% & 3906.004 & 59.5\% & 4097.055 & 46.7\% \\
\hline
\hline
\multicolumn{7}{l}{Note: Models Output of GAMs with Akaike’s Information Criteria (AIC) and Deviance goodness-of-fit statistic for each statistical model}
\end{tabularx}
\label{results1}
\end{table}
\end{sidewaystable}
With respect to the model \textit{ma25n}, the explained deviance is 15\%, the variance was set to 1 in the geometric negative binomial distribution. The explanatory variable year is significant at 1\% level, and the smoothers significant at 1\% level are \textit{Min Temperature}, \textit{Max Temperature}, \textit{Humidity} and \textit{Sunlight}. The degrees of freedom for the smoothers are 2.4, 6.8, 4 and 7.9. In summary, for MA at 25km, deforestation had a positive effect after 2010 and, through the years, deforestation decreased during periods of low humidity (high thermal oscillation). It is also deduced from the model that deforestation decreased during periods of more hours of sunlight. There were more deforested pixels in periods where temperature declined.
For the model \textit{lm25n}, the explained deviance is 30.9\%, the explanatory variable year is significant at 5\% level, the smoothers significant at 1\% level are \textit{Max Temperature}, \textit{Humidity} and \textit{Sunlight}. The degrees of freedom for these smoothers are 8.6, 6.1 and 1.4. The results are similar to the \textit{ma25n} model by showing that deforestation also decreased during periods of less humidity and extreme higher levels of precipitation. Examining deforestation as a function of the year showed that there was a positive effect, i.e., increments on forest loss, during the beginning of the 2000's.
%Explicar o de \textit{ma25e} e o ml25e
Models with EVI values were considered better in terms of cross validation. Model \textit{ma25e} shows that deforestation increased over time with a positive peak after 2010. Deforestation also increased when the covariates sunlight and minimum temperature decreased. For maximum temperature, the negative effect is greater than the positive effect but, in essence, deforestation decreased with higher temperatures. On the Legal Maranhão side, the results of the model \textit{lm25e} show that all variables are significant at the 1\% level. The model explains 36\% of the changes in EVI values, i.e., deforestation. From 2007 to 2012, deforestation increased in the region. The positive effect happened during periods of higher temperature and low humidity.
%explicar o de \textit{ma50n} e \textit{lm50n}
%Expanding the buffer zone to 50km, the model \textit{ma50n} shows the same pattern as 25km analysis with the difference that deforestation increased with higher levels of precipitation. The model explains 17.3\% of NDVI values for that region. Deforestation through the years had only a negative effect between 2002 and 2004 and the most of deforestation occurred when levels of sunlight were low. For the \textit{lm50n} model, it is also observed the same pattern with the exception that, at 50km band, deforestation declined with higher levels of temperature although the pattern for deforestation continued to decrease at extreme higher levels of precipitation.
%explicar o de \textit{ma50e} e \textit{lm50e}
%Checking for EVI values, the model \textit{ma50e} still showed forest loss increment for the period after 2010, giving the same result as \textit{ma25e}. In turn, deforestation happened during low temperature period and with less hours of sun. It shows that at 50km, increased deforestation happened when precipitation level was higher than the average and deforestation and humidity had a negative linear relationship. This result differs from the \textit{lm50e} model, in this model, humidity and deforestation have a non-linear relationship and the latter occurred when the former was at low levels. Forest loss also happened when temperatures increased and, looking in function of the years, deforestation decreased after 2004 and increased back again after 2010.
%explicar o \textit{ma100n} e \textit{lm100n}
%For the last set of models with the largest bandwidth the deviance explained between 16\% to 33\% of deforestation in those areas. Starting by the model \textit{ma100n}, many of the covariates have lost significance. The explanatory variable year has no longer an effect on deforestation and higher levels of temperature have a linear negative relationship with deforestation. At the end, deforestation decreased during lower levels of humidity or, higher thermal oscillation. Model \textit{lm100n} showed that deforestation increased at higher levels of precipitation but when the levels of precipitation reached values higher than 75\% of the sample, deforestation decreased significantly. At 100km buffer, deforestation decreased during periods with less glaring sunlight.
%explicar o \textit{ma100e} e ml100e
%With the model \textit{ma100e}, deforestation increased during 2008 to 2012 with a slightly decrease in 2010. Deforestation varied considerable with the decay of humidity levels, this can also be mirrored with the temperature covariates, both seem to show that deforestation increased within their extreme values. Again, deforestation decreased with the increasing amount of precipitation during the studied period. Finally, the 100km band analysis for the Legal Maranhão area shows that all the variables are significant at 1\% level and, apart from the same results found in model \textit{lm100n}, deforestation decreased in a short period of 2007 to 2009. Deforestation occurred when temperatures were oscillating. Forest loss took place whit low temperatures and high temperatures in the studied period.
\subsection{The effect of seasonality in the deforestation trend in a ecotone zone of Maranhão} \label{resultssection2.2}
Given the results in Section \ref{resultssection1.2}, it is clear that seasonality is a key factor for the deforestation trend in the transition forest of Maranhão. Low values for solar incidence, low levels of humidity, high levels of precipitation, and reduced values of temperature indicate possible differences in the trend for the winter and summer season. As explained in Section \ref{studycarac}, the ecotone forest presents two well defined seasons: the rain period (summer) and the dry period (winter). In an attempt to refine the analysis, we divided the sample according these two seasons. The wet season starts in December and continues until the end of June. From July the dry season starts remaining until the end of November. Thus there are 102 observations for each season. To allow further comparison, the model took the same approach given by equation \ref{eq:11.2}.
\subsubsection{Raining Season}
%Explicar o de \textit{ma25n} e o \textit{ml25n}
Subsetting the data and applying GAM, model \textit{ma25nr} explains 53.8\% of the deforestation path in the Maranhão eastern side. From the plot, deforestation increased in year cycles, i.e., for 2000-2002, 2006 and 2008. Also, increased forest loss happened with less available sunlight and increased precipitation levels. The deforestation trend shows a decrease when temperatures reached extreme high and low values. The same pattern is observed for the humidity covariate. At 25km in the LM area, deforestation over time had only two positive effects, from 2000-2001 and from 2011-2012. Model \textit{lm25nr} with a 44\% of deviance explained shows that clear-cutting expanded in periods of high levels of precipitation, and less exposure to sunlight.
%Explicar o de \textit{ma25e} e o ml25e
The model with EVI values as the response variable shows a different trend comparing to NDVI values. Model \textit{ma25er} indicates that deforestation increased in the MA region over the years. Accordingly, deforestation took place when precipitation levels were higher than the average and during lower hours of sunshine. For the \textit{lm25er} model, 63.1\% of the model explains the deforestation process. Cutting down the trees was more prominent during 2000 to 2005 and 2010 to 2015. The removal of trees increased with high levels of precipitation, and low levels of temperature and sunlight.
%explicar o de \textit{ma50n} e \textit{lm50n}
Using the areas within 50km of the artificial line, model \textit{ma50nr} followed the same arrangement shown in model ma25nr, except for revealing that forest losses icnrease through time. There is also a clear cycle apparent from the Year plot. A similar cycle is also shown in model \textit{lm50nr} for the explanatory variable year. Decreasing deforestation is associated with high levels of precipitation in the MA region.
%explicar o de \textit{ma50e} e \textit{lm50e}
Improving the model by deviance explained, model \textit{ma50er} also exhibits a cycle pattern for deforestation in the MA region, with no singularities compared to the NDVI model (\textit{ma50nr}). Model \textit{lm50er} shows that deforestation increased before and after 2005 and when maximum temperatures were even higher than the maximum average. The decreasing process happened when precipitation levels were much higher than the average as well.
%explicar o \textit{ma100n} e \textit{lm100n}
For the models with the largest buffer area in the MA ecotone region, model \textit{ma100nr} still showed some deforestation cycle, with a peak right after 2005. At 100km, deforestation was positive when temperature dropped more than the minimum average and when sun exposure presented less number of direct sun hours. At the LM 100km-border, the model \textit{lm100nr} provides evidence of an increasing path of deforestation through time, with the highest peak during 2009. Forest losses took place when precipitation levels were higher than the average up to a certain limit.
%explicar o \textit{ma100e} e \textit{lm100e}
Relative to the EVI values analysis, the model \textit{ma100er} presented similar results from the NDVI model. It is noticeable, however, that the EVI model shows an increasing path of deforestation during 2006 - 2010, unlike under the NDVI model (\textit{ma100nr}). The EVI model for the LM side also presented similarities to the NDVI model. Looking over the years, deforestation had a positive effect until 2005, then again in 2008 to 2010, and once more recent in the years 2014-2016. However, these cycles were not different from what was already seen in the NDVI model (\textit{lm100nr}).
\subsubsection{Dry Season}
%Explicar o de \textit{ma25n} e o \textit{lm25n}
The analysis of the dry season with the GAM model proposed in \ref{eq:11.2} shows that the deviance explained by the \textit{ma25nd} model was 52.3\%. All the terms were highly significant at the 0.1\% level. The deforestation trend had three positive peaks during 2005-2006, 2011-2013, and after 2015. Forest loss increments appeared in periods of high precipitation level accompanied by high temperatures. The model looking at a 25km bandwidth on the LM side has 54.6\% of deviance explained. However, only two variables are significant at 1\% level, namely \textit{Humidity} and \textit{Sunlight}. In the dry season, the Legal Maranhão decreased deforestation when humidity levels were low and increased deforestation when sunlight was below the average. Apparently, deforestation did not changed over time during the dry season.
%Explicar o de \textit{ma25e} e o \textit{lm25e}
Turning to the second response variable, the model \textit{ma25ed} has all the terms highly significant at the 0.1\% level and the deforestation cycle through time is evident. The same pattern, presented in the model with the NDVI response variable, is seen in this model. Positive values of forest losses happened in periods of high precipitation accompanied by high temperatures with less hours of sun. Differently from model \textit{lm25nd}, model \textit{lm25ed} improved significantly with 65.6\% deviance explained. One can observe four positive peaks of deforestation during the studied period 2004, 2006, 2011 and 2015, even though the overall path shows a significant decrease in 2005, which essentially compensates for the positive peaks.
%explicar o de \textit{ma50n} e \textit{lm50n}
For the 50km bandwidth on the MA side, the explanatory variable and the covariates are highly significant at 0.1\%. In general, an increment on deforestation from the model \textit{ma50nd} occurred during 2005 and 2006 and from 2013 until 2016. In general, the process occurred during high levels of precipitation and low temperatures. In the LM region (model \textit{lm50nd}), during the dry season negative changes in the deforestation took place when humidity levels were low and temperatures were high. There appears to be no deforestation trend over the years.
%explicar o de \textit{ma50e} e \textit{lm50e}
Model \textit{ma50ed} shows an interesting deforestation trend for the Maranhão region. Until 2005, deforestation was decreasing over time but, after 2005, the deforestation process increased especially after 2013. This process took place during periods of high precipitation rates and lower temperatures. Looking across the artificial line, for EVI values, the \textit{lm50ed} model showed only one period of changes during 2005 (decreasing) to 2007 (increasing). After that, there appears to be no trend of deforestation through the years. The process captured during that period followed low temperatures.
%explicar o \textit{ma100n} e \textit{lm100n}
Reflecting the same pattern from previous models with smaller buffers, the model \textit{ma100nd} presents the explanatory variable and the covariates as highly significant at 0.1\%. Deviance explained 45.2\% of the model and a modest trend is seen with an increasing peak after 2013. With 47.2\% of deviance explained, model \textit{lm100nd} does not show a deforestation trend over the years, demonstrating accordance with the previous models (\textit{lm25nd} and \textit{lm50nd}). Less humidity in the period of forest change is observed for the \textit{lm100nd} model.
%explicar o \textit{ma100e} e ml100e
The deforestation trend for model \textit{ma100ed} is very similar to model \textit{ma100nd}, showing no significant difference in the peaks. The variables are highly significant at the 0.1\% level and the trend is observed with high levels of precipitation and lower temperatures. Decreased deforestation was detected during periods of low humidity levels. In terms of the 100km buffer, the LM region did not experience a trend in deforestation over time. Deviance explained 46.7\% of the model and forest change happened during periods of low humidity and low solar radiation.
\subsection{Settlements}
Thus far we have found clear differences the trends, and the factors driving them, in deforestation across the two regions examined here, regardless of their spatial definition. To further investigate this we also look specifically examine deforestation trends within settlements. Settlements areas are allocated and supervised by Brazil’s Special Secretary of Agrarian Development, and there is virtually no law enforcement these plots, resulting in low levels of environmental compliance \citep{PERES2}.\footnote{Due to the intense debate on the issue and the commitment of other Latin American countries with the implementation of agrarian reform in the decade of 60, the government included it as one of its priorities. An amendment that allowed the Union to promote the expropriation of social interest, upon payment of prior and fair compensation in special government bonds, was drafted and approved. Shortly thereafter, Law 4504 was enacted, which provides for the Land Statute (Estatuto da Terra, 1964). The Brazilian Federal Land Reform program was launched in 1964 to bring \textit{people without land to land without people}. This applied not only to the poor and landless \textit{peasants}, but also to the expanding Southern Brazilian agribusiness. At the same time, the Brazilian Institute of Agrarian Reform (IBRA in portuguese) and the National Institute of Agrarian Development (INDA in portuguese) were created, and replaced by the Institute for Rural Settlement and Agrarian Reform (INCRA). Brazil was provided with legal and institutional framework that would start a national land reform program \citep{ESPADA}.} The Brazilian Environmental Police (Instituto Brasileiro do Meio Ambiente e dos Recursos Naturais Renováveis—IBAMA) repeatedly fined the federal Agrarian Agency (Instituto Nacional de Colonização e Reforma Agrária—INCRA) for environmental violations in the settlements. Usually, the fines are sent to INCRA and not to the settler. When the agency receives the fines applied by the environmental policy, the justice system frequently takes the side of the agency or even annuls the fines because, from a legal point of view, the agency does not commit an environmental crime, but rather acknowledges the presence of preserved forest within settlements. In this sense the INCRA complies with the legislation because it leaves part of the forest of the whole settlement intact, but cannot oblige the granted slots to deforest.
Deforestation in settlements is committed by the landowner or landholder. In most cases, there are loggers who lease lots and even press the small farmer to clear. They may also threaten and kill the leaderships that hinder the timber business. Moreover, there is pressure from the local commerce and from sawmills, who buy this wood. Therefore, it is not the agrarian reform policy that is the cause of deforestation. Under pressure by public opinion, INCRA established in 2012 the 'Green Settlement Program' to deal with the environmental debt of settlements. This policy, however, is still not implemented and there is no feasibility that could endorse the effectiveness of this policy \citet{PACHECO,PERES2}. Under these circumstances, and in an attempt to corroborate with the finding results, this study took from the observed region, settlements areas from both sides MA and LM (see figure \ref{fig:delimitacaosett2} since they are not directly affected by any surveillance monitoring policy in order to check whether the trends of deforestation differed from the trends presented within settlements.
The analysis of deforestation in settlements follows the same strategy as presented in Section \ref{resultssection1.2} and \ref{resultssection2.2}. The sample includes 204 monthly observations of Vegetation Indices values changing over the years including covariates, such as, \textit{Precipitation}, \textit{Max Temperature}, \textit{Min Temperature}, \textit{Sunlight} and \textit{Humidity}. The baseline model presented in equation \ref{eq:11.2} is applied to the entire studied area since the environmental surveillance policy is not applied to the settlers. Plotting the smoothing functions, it is possible to check for the path of deforestation through the years and the climatic state during the studied period.
The MA model shows that lower levels of deforestation occurred when the settlements had lower presence of sunlight during the day. Also, deforestation decreased significantly before 2007 and no significant changes were observed after that period. Comparing the indices, the EVI model showed more variability than the NDVI model. Deforestation increased before 2004 and after 2010, and happened with significant changes in temperature, where forest losses took place with higher levels of temperature and preceded by lower levels of temperature. In general, these results resemble the findings for the Maranhão side. Deviance explained between 33$\%$ to 35$\%$ of the model.
Looking at the LM model, significant deforestation appeared when the settlements had lower presence of sunlight during the day, in contrast with MA model. It can be seen from the model that deforestation happened at a constant level having no significant changes throughout the studied years. No differences were found when comparing the two vegetation indices. The results here also do not provide evidence of significant differences when compared to the results found for the 100km threshold (see \ref{appendix2}). Deviance in this model explained between 23$\%$ to 26$\%$ of the model.
When sub-sampling the dataset into rainy season, one observes that the MA model presents a cyclical deforestation trend, in congruence with the main findings. For both indices the deforestation cycle within settlements had less variability and the number of pixels deforested did not increase as it did for the findings in \ref{resultssection2.2}. The deviance explained about 72$\%$ of the model. The settlements in the Legal Amazon side experience similar results to the rainy season sample from the main findings. Deforestation followed a scenario of higher precipitation levels, less hours with sun visibility, and low temperatures. Deviance explained 49$\%$ to 58$\%$ of the model.
The dry season for the settlements within the Maranhão side reveals a similar path compared to the results found at \ref{resultssection2.2}. Inside settlements deforestation increased during high levels of precipitation and low levels of temperature. This model also states that deforestation did not change significantly before 2005, in contrast to the main findings for this period. Deviance explained 51$\%$ to 53$\%$ of the model. The deforestation path within settlements in the Legal Amazon side shows a completely different path when compared to the main finding results for this region. There is clearly a cycle path of deforestation within settlements. This result is apparent considering both vegetation indices, but is more prominent for NDVI. Deviance ranged from 36$\%$ to 41$\%$. The results obtained for this region supported the three outcomes highlighted in Section \ref{ref:discussion}. More precisely, the findings here suggest that much of the forest loss activity happened similarly between the two regions. The second outcome, that cloud cover might benefit deforestation because it inhibits the forest cover detection from satellites, is not observed for the settlements area. An explanation may be that in the main sample the artificial line was based on a political and policy barrier, but for settlements this barrier did not have a policy significance since the settlements are not constrained by it.
\section{Discussion} \label{ref:discussion}
Previous studies considering GAM models for modelling deforestation are scarce. Looking at the pure ecological analyses, there are studies related to changes in phenology using GAM, for example \citet{TAPIA_2017}. The study here demonstrated that GAM-based models using satellite derived data could be useful in checking and understanding deforestation trends in an ecotonic region.
%Dar explicacao dos resultados, dando a entender que houve um shift do desmatamento para o lado do MA durante o periodo seco a nivel de 25km a 50km.
In the results, the GAMs confirmed that deforestation is related to year and covariates, but also revealed that there are substantially differences in trends between seasons. For the Legal Maranhão region, most of the deforestation happened during the rainy season. The results indicate that this event holds across the 25km and 50km buffers. In essence, the best description of deforestation for this region includes high levels of precipitation, reaching a threshold that corresponded to 6.3\% (25km), 4.4\% (50km) and, 3.9\% (100km) of the observations of the whole sample, low incidence of direct sunlight in hours, reaching a threshold that corresponded to 9.3\% (25km), 7.8\% (50km) and, 6.8\% (100km) of the observations of the whole sample. During the dry season, several models were not able capture a trend for deforestation, indicating that no significant changes were picked up by the models in this season. One exception was seen with model lm50ed. The model could capture the following year after the establishment of the environmental policy surveillance monitoring in that region.
The Maranhão side GAM models behaved very differently during the dry season compared to the Legal Maranhão area. It was confirmed that there was a trend for deforestation during this season. In fact, the results indicate that the Maranhão region has a well-defined deforestation trend for both seasons. Notably, the deforestation process increased during the dry season from 2005 onwards. The characterisation and environment that shaped deforestation for that region and was constant were low temperatures and low availability of sunlight with no thresholds.
%colocar que EVI eh melhor que NDVI o que corrobora estudos de bayman
%1- Is the decreasing deforestation trend in Amazon Maranhão during 2000 to 2017 displaced to the Cerrado Maranhão? %2- Is there a spillover effect from the environmental policy in the decreasing trend in deforestation on the Cerrado Maranhão?
These results seem to suggest that deforestation in the Amazon region (LM) was displaced during the dry season to the Cerrado region (MA). It is not possible to conclude that this shift also happened during the rainy season because both regions were characterized by positive increments for deforestation. There appears to be no spillover effect from the environmental enforcement executed in the LM region to the MA region. One plausible piece of evidence is that deforestation remained during both seasons with distinct cycles. An interaction with precipitation and sunlight shows the different paths between seasons for both regions (see \ref{fig:visgam}, \ref{fig:visgamr} and \ref{fig:visgamd}).
With these results, three possible outcomes emerges: i) since the region is a transitional zone, the two areas don't differ in biota aspects. In a sense, anthropic actions were responsible for apparent changes in the deforestation trends and, these changes ii) happened during high levels of precipitation and low levels of solar incidence which in turn shows that, in general, cloud cover might be a benefit for clear-cutting practices, keeping in mind that iii) the artificial line divides the two regions but many of the political boundaries of municipalities remain in both sides of the region (MA and LM), which could interfere in the deforestation path between the seasons.
%i) since the region is a transitional zone, the two areas don't differ in biota aspects. In a sense, anthropic actions were responsible for apparent changes in the deforestation trends;
The first outcome is not supported entirely by the results of the models. In fact, deforestation is a human activity and the oscillation process is caused by individuals' choices. The findings, show, however that much of this activity happened differently between the two regions. As it was mentioned earlier in section \ref{studycarac}, the LM region is under an environmental monitoring policy (DETER) that uses satellites images to detect deforestation in the tropical and transitional forest and punish those found at fault. In this sense, the results inform us that deforestation took place during the raining season, which suggest an explanation for the second outcome.
%ii) happened during high levels of precipitation and low levels of solar incidence which in turn shows that, in general, cloud cover might be a benefit for clear-cutting practices,
The existence of clouds for satellite images is an impediment to detect vegetation changes. The presence of high levels of precipitation and low levels of direct sunlight might indicate also the existence of clouds as natural barriers. The second outcome states that cloud cover might benefit deforestation because it inhibits forest cover change detection as seen from satellites. Possibly human activity was displaced from dry seasons with clear sky to rainy season with cloudy days. In other words, human behavior changed due to the environmental monitoring policy.
%iii) the artificial line divides the two regions but many of the political boundaries of municipalities remain in both sides of the region (MA and LM), which could interfere in the deforestation path between the seasons.
Finally, as an artificial line, no concrete boundaries exist in the studied area and many of the political boundaries of municipalities remain on both sides of the regions (MA and LM). In other words, municipalities and provinces are split by this artificial line, so that much of the deforestation process during the dry season was displaced from the Legal Maranhão (LM) to the Maranhão side (MA), since there was no political or economic deterrent to these anthropic actions. This can be inferred from the findings of the models that showed deforestation trends in the MA region increasing, especially after 2010.
%Nonetheless, the results reported here could contribute to the identification of deforestation trends in transitional zones, as in the case of Maranhão, which is held responsible for large deforestation in the Amazon and Cerrado biome \citep{CELENTANO_2017,inpe_2018}. In general, the proven models served the purpose of the study, but it does not rule out exploring other tools in the future, which will complement these results, such as applying an analysis including cloud cover as an indicator of human changing behavior, in addition to the aforementioned analysis, a natural experiment with regions completely isolated from the artificial line would be ideal to examine the third outcome of the results.
\section{Conclusions}
In this study, a new approach was taken to study deforestation trends in areas of ecological tension. Generalized Additive Modelling was implemented in the Maranhão state of in Brazil to detect the path of deforestation in the transitional forest of Amazon and Cerrado biomes. The technique applied because it allowed non linear relationship with several response variables. Images from satellite images combined to climatology weather station dataset formed the database used in this study. It was created an algorithm to capture Vegetation Indices changes over time in order to create a proxy for deforestation as a response variable. Climatologic variables were converted and resampled to adjust to the response variable and were used as covariates of the model. An artificial line, called the Legal Amazon line was used to divide the analysis in terms of two regions, Legal Maranhão (LM) and Maranhão (MA), according to their differing treatment in deforestation protection.
Model validation and cross validation was were taken with the use of neural networks and artificial intelligence (AI). It was found that models with EVI values as the response variable were a better fit for the deforestation trends, confirming the assumptions made by \citet{ratana_huete_ferreira_2005,bayma_sano_2015,didan_munoz_2015} in that ecotone forests respond better to EVI than NDVI values. Graphical results of the GAMs not only revealed the trend or turning points of regression, but also showed the possible limits within which the optimum forest loss of each Vegetation Index value could occur.
In the results, the GAMs confirmed that deforestation is related to year and covariates, but also revealed that there are substantially difference of trends between seasons and regions. For the Legal Maranhão region, most of the deforestation happened during the rainy season. In terms of the Maranhão side the GAM models behaved very differently during the dry season, compared to the Legal Maranhão area, in that there was a trend for deforestation during that season. In fact, the results indicate that the Maranhão region has a well-defined deforestation trend for both seasons. In particular, the deforestation process increased during the dry season from 2005 on wards. These findings were further validated by showing that deforestation happened during both seasons for settlements which were not target by the environmental policy.
In general, the models employed here arguably served the purpose of the study well, but this does not rule out exploring other tools in the future, such as applying an analysis including cloud cover as an indicator of human changing behavior. In addition to the aforementioned analysis, a natural experiment with regions completely isolated from the artificial line could be helpful to examine how and why much of the deforestation process during the dry season was displaced from the Legal Maranhão (LM) to the Maranhão side (MA), since there was no political or economic deterrent to these anthropic actions.
There are of course a number of limitations to the analysis undertaken here. Following \citet{murase_2009} approach, possible errors in modeling could be taken into account. First of all, in terms of predicting the trend of deforestation based on a list of variables, the model implicitly assumes that the predicted range or potential space is fully occupied by forest, which in reality might not be true. Additionally, the spatial distribution of the vegetation indices may exhibit dynamic behavior over time, so that a potential area may or may not be sparsely vegetated for a certain period (e.g., during sampling) due to progressive succession of forest. Or a temporary absence could be due to natural causes, such as, attack of pests or diseases or inter-species competition. Secondly, the regional environmental conditions follow changing trends of different duration, so it is possible that in certain cases an observed value may be declining due to regional changes rather local changes, but the prediction model does not detect this dynamic behavior. Additionally, the study was based on coarse image resolution which could neglect local changes in the sample area. The results could also feasibly suffer from overfiting since more data is needed to optimize the smoothing algorithms. Finally, our results may not be generalizable to other areas, such as dense tropical forest and open fields.
\let\cleardoublepage\clearpage
\begin{appendices}
\renewcommand{\thechapter}{A.\arabic{chapter}}\label{appendix2}
\input{Chapter2/AppendixA2}
\end{appendices}
| {
"alphanum_fraction": 0.7945155659,
"avg_line_length": 153.7043478261,
"ext": "tex",
"hexsha": "1cc60cf8e3348ecd18ef1debf665e4829970190a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "85755a8608514a7b4c8fc53c8b449027dfc896c4",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vgs549/phd-thesis-vilane",
"max_forks_repo_path": "Chapter2/chapter2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "85755a8608514a7b4c8fc53c8b449027dfc896c4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vgs549/phd-thesis-vilane",
"max_issues_repo_path": "Chapter2/chapter2.tex",
"max_line_length": 2503,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "85755a8608514a7b4c8fc53c8b449027dfc896c4",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vgs549/phd-thesis-vilane",
"max_stars_repo_path": "Chapter2/chapter2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 29935,
"size": 123732
} |
% %\documentclass[prb,preprint,showpacs,amsmath,amssymb ]{revtex4}
% \documentclass[prb, showpacs,amsmath,amssymb,twocolumn]{revtex4}
%
% % Some other (several out of many) possibilities
% %\documentclass[preprint,aps]{revtex4}
% %\documentclass[preprint,aps,draft]{revtex4}
% %\documentclass[prb]{revtex4}% Physical Review B
% %\usepackage{wasysym}
% %\usepackage{bm}
% %\usepackage[usenames]{color}
% %\renewcommand\arraystretch{2.2}
%
% \usepackage{dcolumn}
% \usepackage{verbatim} % multi-line comments
% \usepackage{graphicx}
% \usepackage{epsfig}
% \usepackage{hyperref} % hyperlinked references and figures
% %\usepackage{color}
%
% \begin{document}
%\newcommand{\boldnabla}{\mbox{\boldmath$\nabla$}}
\chapter{Anderson localization as position-dependent diffusion in disordered waveguides}
\label{chap:Dz_absorb}
\label{paper:4_start}
% \author{Ben Payne$^1$, Alexey Yamilov$^1$\footnote{Electronic~address:[email protected]} and Sergey E. Skipetrov$^2$\footnote{Electronic~address:[email protected]}}
\begin{center}
Ben Payne$^1$, Alexey Yamilov$^1$, and Sergey E. Skipetrov$^2$
\end{center}
\ \\
\begin{center}
\textit{$^1$Department of Physics, Missouri University of Science \& Technology,\\ Rolla, MO 65409\\
$^2$Universit\'{e} Joseph Fourier, Laboratoire de Physique et Mod\'{e}lisation des Milieux Condens\'{e}s, CNRS, 25 rue des Martyrs, BP 166, 38042 Grenoble, France}\end{center}
\ \\
% \affiliation{$^1$Department of Physics, Missouri University of Science \& Technology, Rolla, MO 65409\\
% $^2$Universit\'{e} Joseph Fourier, Laboratoire de Physique et Mod\'{e}lisation des Milieux Condens\'{e}s, CNRS, 25 rue des Martyrs, BP 166, 38042 Grenoble, France}
%
% \date{\today}
%
% \begin{abstract}
\addcontentsline{toc}{section}{ABSTRACT}
\begin{center}\textbf{ABSTRACT\footnote{Published in Physical Review B \textbf{82} 024205 (2010).}} \end{center}
We show that the recently developed self-consistent theory of Anderson localization with a position-dependent diffusion coefficient is in quantitative agreement with the supersymmetry approach up to terms of the order of $1/g_0^2$ (with $g_0$ the dimensionless conductance in the absence of interference effects) and with large-scale {\it ab-initio} simulations of the classical wave transport in disordered waveguides, at least for $g_0 \gtrsim 0.5$. In the latter case, agreement is found even in the presence of absorption. Our numerical results confirm that in open disordered media, the onset of Anderson localization can be viewed as position-dependent diffusion.
% \end{abstract}
%
% % 42.25.Dd - Wave propagation in random media
% % 72.15.Rn - Localization effects (Anderson or weak localization)
% \pacs{42.25.Dd, 72.15.Rn}
%
% \maketitle
%=====================================================================================================================
\section{INTRODUCTION}
Anderson localization is a paradigm in condensed matter physics~\cite{1958_Anderson}. It consists in a blockade of the diffusive electronic transport in disordered metals due to interferences of multiply scattered de Broglie waves at low temperatures and at a sufficiently strong disorder. This phenomenon is not unique to electrons but can manifest itself for any wave in the presence of disorder, in particular for classical waves, such as light and sound~\cite{1984_John_prl}, and, as shown more recently, for matter waves~\cite{2008_Billy}. Although the absence of decoherence and interactions~\cite{2007_Akkermans_book} for classical waves is appealing in the context of the original idea of Anderson, serious complications appear due to absorption of a part of the wave energy by the disordered medium~\cite{1991_Genack}.
Extracting clear signatures of Anderson localization from experimental signals that are strongly affected by --- often a poorly controlled -- absorption was the key to success in recent experiments with microwaves~\cite{2000_chabanov_nature,2003_Genack}, light~\cite{2006_Maret_PRL} and ultrasound~\cite{2008_van_Tiggelen_Nature}.
Classical waves offer a unique possibility of performing angle-, space-, time- or frequency-resolved measurements with excellent resolution, the possibility that was not available in the realm of electronic transport. In a wider perspective, they also allow a controlled study of the interplay between disorder and interactions, as illustrated by the recent work on disordered photonic lattices~\cite{2007_Segev}.
Interpretation of measurements requires a theory that would be able to describe not only the genuine interferences taking place in the bulk of a large sample but also the modification of these interferences in a sample of particular shape, of finite size, and with some precise conditions at the boundaries. Such a theory has been recently developed~\cite{2000_van_Tiggelen,2004_Skipetrov,2006_Skipetrov_dynamics,2008_Cherroret} based on the self-consistent (SC) theory of Vollhardt and W\"{o}lfle~\cite{1980_Vollhardt_Wolfle}. The new ingredient is the position dependence of the renormalized diffusion coefficient $D(\mathbf{r})$ that accounts for a stronger impact of interference effects in the bulk of the disordered sample as compared to the regions adjacent to boundaries. This position dependence is crucial in open disordered media~\cite{2009_Cherroret}. $D(\mathbf{r})$ also appears in the supersymmetry approach to wave transport~\cite{2008_Tian}, which confirms that this concept goes beyond a particular technique (diagrammatic or supersymmetry methods) used in the calculations.
The SC theory with a position-dependent diffusion coefficient was successfully applied to analyze microwave~\cite{2004_Skipetrov} and ultrasonic~\cite{2008_van_Tiggelen_Nature} experiments. The predictions of the theory~\cite{2006_Skipetrov_dynamics} are also in qualitative agreement with optical experiments of St\"{o}rzer \textit{et al.}~\cite{2006_Maret_PRL}. However, it remains unclear whether the position dependence of $D$ is just a (useful) mathematical concept or if it is a genuine physical reality. In addition, the extent to which predictions of SC theory are quantitatively correct is not known. Obviously, the last issue is particularly important once comparison with experiments is attempted.
In the present paper we compare the predictions of SC theory of localization with the known results obtained previously using the supersymmetry method~\cite{2000_Mirlin} and with the results of extensive \textit{ab-initio} numerical simulations of wave transport in two-dimensional (2D) disordered waveguides. We demonstrate, first, that the position-dependent diffusion is a physical reality and, second, that SC theory agrees with the supersymmetry approach up to terms of the order of $1/g_0^2$ (with with $g_0$ the dimensionless conductance in the absence of interference effects) and with numerical simulation at least for $g_0 \gtrsim 0.5$. In the latter case, the agreement is found even in the presence of absorption.
%=====================================================================================================================
\section{SELF-CONSISTENT THEORY OF LOCALIZATION}
\label{sec:sctheory}
We consider a scalar, monochromatic wave $u(\mathbf{r})e^{-i \omega t}$ propagating in a 2D volume-disordered waveguide of width $w$ and length $L \gg w$. The wave field $u(\mathbf{r})$ obeys the 2D Helmholtz equation:
\begin{equation}
\left\{\nabla^2 + k^2\left[1 + i \epsilon_a + \delta\epsilon(\mathbf{r}) \right]\right\} u(\mathbf{r}) = 0.
\label{eq:helmholtz}
\end{equation}
Here $k=\omega/c$ is the wavenumber, $c$ is the speed of the wave in the free space, $\epsilon_a$ is the imaginary part of the dielectric constant accounting for the (spatially uniform) absorption in the medium, and $\delta\epsilon(\mathbf{r})$ is the randomly fluctuating part of the dielectric constant.
Assuming that $\delta\epsilon(\mathbf{r})$ is a Gaussian random field with a short correlation length, it is easy to show that the disorder-averaged Green's function of Eq.~(\ref{eq:helmholtz}), $\langle G(\mathbf{r}, \mathbf{r}') \rangle$, decays exponentially with the distance $|\mathbf{r}-\mathbf{r}'|$
\cite{2007_Akkermans_book}. The characteristic length of this decay defines the mean free path $\ell$. In this paper we consider quasi-1D waveguides defined by the condition $w \lesssim \ell \ll L$. The intensity Green's function of Eq.~(\ref{eq:helmholtz}), $C(\mathbf{r}, \mathbf{r}') = (4\pi/c)
\langle \left| G(\mathbf{r}, \mathbf{r}') \right|^2 \rangle$, obeys self-consistent equations that can be derived following the approach of Ref.~\citenum{2008_Cherroret}. In a quasi-1D waveguide, all position-dependent quantities become functions of the longitudinal coordinate $z$ only and the stationary SC equations can be written in a dimensionless form:
\begin{eqnarray}
&&\left[\beta^2 - \frac{\partial}{\partial \zeta} d(\zeta)
\frac{\partial}{\partial \zeta} \right] {\hat C}(\zeta,\zeta')
= \delta(\zeta-\zeta'),
\label{eq:sceq1}
\\
&&\frac{1}{d(\zeta)} = 1+\frac{2}{{\tilde g}_0}
{\hat C}(\zeta,\zeta).
\label{eq:sceq2}
\end{eqnarray}
Here ${\hat C}(\zeta,\zeta') = (w D_0/L)C(\mathbf{r},\mathbf{r}')$,
$D_0 = c\ell/2$ is the Boltzmann diffusion coefficient, $\zeta = z/L$ is the dimensionless coordinate, $d(\zeta) = D(z)/D_0$ is the normalized position-dependent diffusion coefficient, $\beta = L/L_a$ is the absorption coefficient (with $L_a = \sqrt{\ell \ell_a/2}$ and $\ell_a = 1/k\epsilon_a$ the macro- and microscopic absorption lengths, respectively), and ${\tilde g}_0 = (\pi/2)N \ell/L$ with $N = kw/\pi$ the number of the transverse modes in the waveguide. These equations should be solved with the following boundary conditions:
\begin{eqnarray}
{\hat C}(\zeta,\zeta^{\prime}) \mp
\frac{z_0}{L} d(\zeta) \frac{\partial}{\partial \zeta}
{\hat C}(\zeta,\zeta^{\prime}) = 0
\label{eq:bc}
\end{eqnarray}
at $\zeta = 0$ and $\zeta = 1$. Similarly to the 3D case~\cite{2008_Cherroret}, these conditions follow from the requirement of vanishing incoming diffuse flux at the open boundaries of the sample. $z_0$ is the so-called extrapolation length equal to $(\pi/4)\ell$ in the absence of internal reflections at the sample surfaces~\cite{1999_van_Rossum}. We will use $z_0 = (\pi/4) \ell$ throughout this paper. When Eqs.\ (\ref{eq:sceq1}--\ref{eq:bc}) are solved in the diffuse regime ${\tilde g}_0 \gg 1$, the dimensionless conductance of the waveguide is found to be $g_0 = (\pi/2)N \ell/(L + 2 z_0)$ ~\cite{1999_van_Rossum,1997_Beenakker} which is close to ${\tilde g}_0$ for $z_0 \ll L$.
In the absence of absorption ($\beta = 0$) we can simplify Eq.~(\ref{eq:sceq1}) by introducing $\tau = F(\zeta) = \int_0^{\zeta} d\zeta_1/d(\zeta_1)$:
\begin{eqnarray}
-\frac{\partial^2}{\partial \tau^2} {\hat C}(\tau, \tau^{\prime})
= \delta(\tau-\tau^{\prime}),
\label{eq:sceq3}
\end{eqnarray}
with the boundary conditions (\ref{eq:bc}) becoming
\begin{eqnarray}
{\hat C}(\tau, \tau^{\prime}) \mp
\tau_0 \frac{\partial}{\partial \tau}
{\hat C}(\tau, \tau^{\prime}) = 0,
\label{eq:bc3}
\end{eqnarray}
and $\tau^{\prime} = F(\zeta^{\prime})$, $\tau_0 = z_0/L$. Equations (\ref{eq:sceq3}) and (\ref{eq:bc3}) are readily solved:
\begin{eqnarray}
{\hat C}(\tau, \tau^{\prime}) =
\frac{(\tau_< + \tau_0)(\tau_{\mathrm{max}} + \tau_0 - \tau_>)}{\tau_{\mathrm{max}} + 2 \tau_0},
\label{eq:sol}
\end{eqnarray}
where $\tau_< = \min(\tau, \tau^{\prime})$, $\tau_> = \max(\tau, \tau^{\prime})$ and $\tau_{\mathrm{max}} = F(1)$. We now substitute this solution into Eq.~(\ref{eq:sceq2}) to obtain
\begin{eqnarray}
\frac{1}{d(\tau)} \equiv \frac{d \tau}{d\zeta} =
1 + \frac{2}{\tilde g}_0 \times \frac{(\tau + \tau_0)(\tau_{\mathrm{max}} + \tau_0 - \tau)}{\tau_{\mathrm{max}} + 2 \tau_0}.
\label{eq:dtdz}
\end{eqnarray}
This differential equation can be integrated to find $\tau$ as a function of $\zeta$. Using $d(\zeta) = (d\tau/d\zeta)^{-1}$ we finally find
\begin{eqnarray}
d(\zeta) &=& \left\{ {\tilde g}_0 \sqrt{p} \cosh(\sqrt{p} \zeta/{\tilde g}_0) \right.
\nonumber \\
&-& \left. [{\tilde g}_0 + \tau_0(1 - p)] \sinh(\sqrt{p} \zeta/{\tilde g}_0) \right\}^2
\nonumber \\
&\times& \left\{ p [({\tilde g}_0 + \tau_0)^2 - \tau_0^2 p]
\right\}^{-1},
\label{eq:dzsol}
\end{eqnarray}
where $p$ is the solution of a transcendental equation
\begin{eqnarray}
\frac{2 {\tilde g}_0}{\sqrt{p}}
\mathrm{arctanh} \left\{ \frac{1}{\sqrt{p}}
\left[ 1 - \frac{\tau_0}{{\tilde g}_0} \left(p - 1 \right) \right] \right\} = 1.
\label{eq:p}
\end{eqnarray}
Solving the last equation numerically and substituting the result into Eq.~(\ref{eq:dzsol}) we can find the profile $d(\zeta)$ at any ${\tilde g}_0$ and $\tau_0 = z_0/L$. In contrast,
for $\beta > 0$ Eqs.\ (\ref{eq:sceq1}--\ref{eq:bc}) do not admit analytic solution and we solve them by iteration: we start with $D(z) = D_0$, solve Eq.~(\ref{eq:sceq1}) numerically with the boundary conditions (\ref{eq:bc}) and then find the new $D(z)$ from Eq.~(\ref{eq:sceq2}). This procedure is then repeated until it converges to a solution. In typical cases considered in this paper the convergence is achieved after 10--20 iterations.
The simplest object that Eqs.\ (\ref{eq:sol}--\ref{eq:dzsol}) allows us to study is the average conductance of the waveguide $\langle g \rangle$. Indeed, the average transmission coefficient of the waveguide is found as
\begin{eqnarray}
T &=& -D(L) \left. \frac{dC(z, z^{\prime}=\ell)}{dz} \right|_{z=L}
\nonumber \\
&=& - \frac{1}{w} \times \left.
\frac{d{\hat C}(\tau, \tau_{\ell})}{d\tau}
\right|_{\tau = \tau_{\mathrm{max}}}
\nonumber \\
&=& \frac{1}{w} \times \frac{\tau_{\ell} + \tau_0}{\tau_{\mathrm{max}} + 2 \tau_0},
\label{eq:t}
\end{eqnarray}
where $\tau_{\ell} = F(\ell/L)$.
For the waveguide we have $\langle g \rangle \propto T$. A ratio that emphasizes the impact of localization effects is $\langle g \rangle/g_0 = T/T_0$, where $T_0$ is the average transmission coefficient found in the absence of localization effects (i.e., for $d \equiv 1$): $T_0 = (\ell + z_0)/w(L + 2 z_0)$. We find
\begin{eqnarray}
\frac{\langle g \rangle}{g_0} =
\frac{L + 2 z_0}{\ell + z_0}
(\tau_{\ell} + \tau_0)
\frac{p - 1}{2 {\tilde g}_0}.
\label{eq:goverg0}
\end{eqnarray}
Simple analytic results follow for $z_0 = 0$, when $g_0 = {\tilde g}_0$. Equation (\ref{eq:dzsol}) yields
\begin{eqnarray}
d(\zeta) &=& \left[ \frac{\sinh(\sqrt{p} \zeta/g_0)}{\sqrt{p}} - \cosh(\sqrt{p} \zeta/g_0) \right]^2
\label{eq:dzsol2}
\end{eqnarray}
and we find
\begin{eqnarray}
\tau_{\ell} &=& \frac{g_0}{\sqrt{p}\; \mathrm{cotanh}
(\sqrt{p} \ell/L g_0) -1}.
\label{eq:tauell}
\end{eqnarray}
In the weak localization regime $g_0 \gg 1$ the solution $p$ of Eq.~(\ref{eq:p}) can be found as a series expansion in powers of $1/g_0$:
$p = 2 g_0 + 1/3 + 2/45 g_0 - 17/540 g_0^2 + \ldots$.
If we keep only the first term $p = 2 g_0$, substitute it into Eq.~(\ref{eq:dzsol2}) and expand in powers of $1/g_0 \ll 1$, we obtain
$D(z) \simeq D_0 [ 1 - (2/g_0) (z/L)(1-z/L)]$.
Keeping terms up to $1/g_0^2$ in the expression for $p$ and substituting it into Eqs.\ (\ref{eq:tauell}) and (\ref{eq:goverg0}), expanding the result in powers of $1/g_0$ and then taking the limit of $L/\ell \rightarrow \infty$, we obtain
\begin{eqnarray}
\frac{\langle g \rangle}{g_0} \simeq
1 - \frac{1}{3 g_0} + \frac{1}{45 g_0^2} + \frac{2}{945 g_0^3} + \ldots.
\label{eq:goverg01}
\end{eqnarray}
This result coincides {\em exactly} with Eq.~(6.26) of Ref.~\cite{2000_Mirlin} obtained by Mirlin using supersymmetry approach, except for a factor of 2 due to two independent spin states of electrons in Ref.~\cite{2000_Mirlin}. We therefore proved the exact equivalence between SC theory and the supersymmetry approach for the calculation of the average conductance $\langle g \rangle$ up to terms of the order of $1/g_0^2$.
Deep in the localized regime $g_0 \ll 1$ and Eq.~(\ref{eq:p}) can be solved approximately to yield
$p = 1 + 4 \exp(-1/g_0)$ (always for $z_0 = 0$ and hence for $g_0 = {\tilde g}_0$).
If we substitute this $p$ into Eq.~(\ref{eq:dzsol2}), we obtain
$D(z) \simeq D_0 \{ \exp(-z/\xi) + \exp[-(L-z)/\xi] \}^2$, where $\xi = g_0 L$ is the localization length.
Equations (\ref{eq:tauell}) and (\ref{eq:goverg0}) then yield
\begin{eqnarray}
\frac{\langle g \rangle}{g_0} \simeq
\frac{2}{g_0} \exp\left(-\frac{1}{g_0} \right),
\label{eq:goverg02}
\end{eqnarray}
where we made use of the fact that $L/\ell \gg 1$ and $N \gg 1$.
In contrast to Eq.~(\ref{eq:goverg01}), this result differs from the one obtained using the supersymmetry approach [see Eq.~(6.29) of Ref.~\cite{2000_Mirlin}]. Even though the exponential decay of conductance with $1/g_0 = L/\xi$ --- expected in the localized regime --- is reproduced correctly, both the rate of this decay and the pre-exponential factor are different. We thus conclude that SC theory does not provide quantitatively correct description of stationary wave transport in disordered waveguides in the localized regime.
It is worthwhile to note that the breakdown of SC theory for $g_0 \ll 1$ is not surprising and could be expected from previous results. Indeed, it has already been noted that for the time-dependent transmission, SC theory does not apply after the Heisenberg time $t_H$~\cite{2004_Skipetrov}. The stationary transmission coefficient $T$ of Eq.~(\ref{eq:t}) is an integral of the time-dependent transmission $T(t)$: $T = \int_0^{\infty} dt\; T(t)$, with the peak of $T(t)$ around the Thouless time $t_D = L^2/\pi^2 D_0$~\cite{2004_Skipetrov}. When $g_0 \sim t_H/t_D \gg 1$, the integral is dominated by $t < t_H$ where SC theory applies. The integration thus yields the correct $T$. However, when $g_0 \ll 1$, $t_H$ is smaller than $t_D$ and the main part of pulse energy arrives at $t > t_H$. Such long times are beyond the reach of SC theory, hence its breakdown for small $g_0$.
%=====================================================================================================================
\section{NUMERICAL MODEL}
\label{sec:numerical}
To test the predictions of the SC model discussed in the previous section we solve Eq.~(\ref{eq:helmholtz}) numerically using the method of transfer matrices defined in the basis of the transverse modes of the empty waveguide~\cite{2007_Froufe-Perez_PRE,2010_Payne_closed}. To this end, we represent $\delta \epsilon(\mathbf{r})$ as a collection of $M$ randomly positioned ``screens'' perpendicular to the axis $z$ of the waveguide and characterized by random functions $f_{\nu}(y) = \sum_{n=1}^N \chi_n(y)\chi_n(y_\nu)$:
\begin{equation}
\delta\epsilon(\mathbf{r}) = \alpha\sum\limits_{\nu=1}^M \delta(z - z_\nu)f_\nu(y).
\label{de}
\end{equation}
Here $\chi_n(y) = (2/w)^{1/2}\sin(\pi ny/w)$ are the transverse modes of the waveguide and $y_\nu$ are chosen at random within the interval $(0, w)$. $z_\nu$ represent random positions of the screens, whereas $\alpha$ measures their scattering strength. Absorption can be included in the model by making $\alpha$ complex.
In the limit $N\rightarrow\infty$, $f_\nu(y)$ becomes a delta-function $\delta\left(y-y_\nu\right)$, mimicking a point-like scatterer. By the choice of $f_\nu(y)$ in Eq.~(\ref{de}) we narrowed the basis to $N$ right- and $N$ left-propagating modes with real values of the longitudinal component of the wavevector. Such modes are often termed ``open channels'' in the literature~\cite{2007_Froufe-Perez_PRE}. Hence, the total transfer matrix of the system is a product of $M$ pairs of $2N\times 2N$ scattering matrices corresponding to the random screens positioned at $z_{\nu}$ and the free space in between them, respectively~\cite{2010_Payne_closed}. Because the numerical computation of products of a large number of transfer matrices ($\sim 10^2$--$10^5$ for the results in this paper) is intrinsically unstable, we implement a self-embedding procedure~\cite{1999_yamilov_selfembed} which limits the errors in flux conservation to less than $10^{-10}$ in all cases. The system is excited by illuminating the waveguide with $N$ unit fluxes (one in each right propagating mode) and the wave field $u(\mathbf{r})$ is computed~\cite{1999_yamilov_selfembed,2010_Payne_closed} for a given realization of disorder [see the inset of Fig.~\ref{fig1}(a)]. To compute statistical averages, ensembles of no fewer than $10^7$ realizations are used.
%---------------------------------------------------------------------------------------------------------------------
\begin{figure*}
%\vskip -0.5in
%\centering{\includegraphics[height=6.5in,angle=-90]{fig1}}
%\vskip -1.75in
\vskip -0.3in
\centering{\includegraphics[width=4.5in,angle=-90]{chapters/Anderson_Localization_as_position-dependent_diffusion_in_disordered_waveguides__Phys_Rev_B/pictures/fig1}}
\vskip -1.9in
\caption[The average (a) and the variance (b) of the conductance $g$ of disordered waveguides supporting $N=10$ (circles)
and $N=20$ (squares) modes are shown versus the inverse of $g_0$.]{\label{fig1}
The average (a) and the variance (b) of the conductance $g$ of disordered waveguides supporting $N=10$ (circles)
and $N=20$ (squares) modes are shown versus the inverse of $g_0$. The solid lines marked as SUSY are fits using
Eq.~(6.23) of Ref.~\cite{2000_Mirlin}, derived using the supersymmetry approach, with $\ell = 15.7 \lambda$ as the only
fit parameter. The solid line marked as SC in (a) is obtained using the self-consistent theory [Eq.~(\ref{eq:goverg0})]. Inset in (a): for a given realization of disorder, wave ``trajectories'' found by connecting local Poynting vectors are superimposed on the distribution of intensity $|u(\mathbf{r})|^2$ in a disordered waveguide with $w=10.25\lambda$ and $L=50\lambda$. Only trajectories that traverse the waveguide are shown.}
\end{figure*}
%---------------------------------------------------------------------------------------------------------------------
To estimate the mean free path $\ell$ of waves in our model system we perform a set of simulations for different disorder strengths and waveguide lengths, exploring both the regime of classical diffusion ($g_0 > 1$) and that of Anderson localization ($g_0 < 1$). The results of the simulations are used to compute the dimensionless conductance $g$, equal to the sum of all outgoing fluxes at the right end of the waveguide, and then to study its average value $\langle g \rangle$ and variance $\mathrm{var}(g)$~\cite{2006_Yamilov_conductance}. The dependencies of $\langle g \rangle$ and $\mathrm{var}(g)$ on $g_0$ are fitted by the analytic expressions obtained by Mirlin~\cite{2000_Mirlin} using the supersymmetry approach, with $\ell$ as the only fit parameter (Fig.~\ref{fig1})~\cite{2010_Payne_closed}. The best fit is obtained with $\ell = (15.7\pm 0.2) \lambda$.
In Fig.~\ref{fig1}(a) we also show Eq.~(\ref{eq:goverg0}) following from SC theory. As could be expected from the discussion in the previous section, the prediction of SC theory coincides with both the results of the supersymmetry approach and numerical simulations only for large $g_0 \gtrsim 0.5$.
%=====================================================================================================================
\section{POSITION-DEPENDENT DIFFUSION COEFFICIENT}
\label{sec:position}
%---------------------------------------------------------------------------------------------------------------------
\begin{figure}
%\vskip -1cm
%\centering{\includegraphics[height=5.5in,angle=-90]{fig2a}
%\vskip -1.25cm
% \includegraphics[height=5.5in,angle=-90]{fig2b}}
%\vskip -1cm
\vskip -0.5cm
\centering{\includegraphics[height=3.5in,angle=-90]{chapters/Anderson_Localization_as_position-dependent_diffusion_in_disordered_waveguides__Phys_Rev_B/pictures/fig2a}
\vskip -0.7cm
\includegraphics[height=3.5in,angle=-90]{chapters/Anderson_Localization_as_position-dependent_diffusion_in_disordered_waveguides__Phys_Rev_B/pictures/fig2b}}
\vskip -0.7cm
\caption[(a) Position-dependent diffusion coefficient $D(z)$ in 2D waveguides supporting the same number $N = 10$ of transverse modes (width $w=5.25\lambda$) but having different lengths $L$.]{\label{fig2} (a) Position-dependent diffusion coefficient $D(z)$ in 2D waveguides supporting the same number $N = 10$ of transverse modes (width $w=5.25\lambda$) but having different lengths $L$. Disorder is the same for all lengths. Symbols show the results of numerical simulations, whereas solid lines are obtained from the self-consistent theory with the mean free path $\ell = 17.5\lambda$. Dashed lines show the approximate results for
$g_0 \gg 1$ (shown for $L = 100 \lambda$) and $g_0 \ll 1$ (shown for $L = 1600 \lambda$), with $D(0)$ substituted for $D_0$, see text. (b) Same as (a) but in the logarithmic scale.}
\end{figure}
%---------------------------------------------------------------------------------------------------------------------
The wave field $u(\mathbf{r})$ that we obtain as an outcome of the numerical algorithm allows us to calculate the energy density ${\cal W}(\mathbf{r})$ and flux $\mathbf{J}(\mathbf{r})$~\cite{1953_Morse}:
\begin{eqnarray}
{\cal W}({\bf r}) &=& \frac{k^2}{2}\left|u({\bf r})\right|^2 + \frac{1}{2}\left| \boldnabla u({\bf r})\right|^2, \label{eq:W}\\
{\bf J}({\bf r}) &=& -kc\; \mathrm{Im} \left[u({\bf r})\boldnabla u({\bf r})\right]. \label{eq:J}
\end{eqnarray}
These two quantities formally define the diffusion coefficient $D(z)$ which, in general, may be position-dependent:
\begin{equation}
D(z) = -\frac{\langle J_z(\mathbf{r}) \rangle}{\frac{d}{d z} \langle{\cal W}(\mathbf{r}) \rangle},
\label{eq:Dofz_definition}
\end{equation}
where the averages $\langle \ldots \rangle$ are taken over a statistical ensemble of disorder realizations as well as over the crossection of the waveguide. Eq.~(\ref{eq:Dofz_definition}) can be used only at distances beyond one mean free path $\ell$ from the boundaries of the random medium because more subtle propagation effects of non-diffusive nature start to be important in the immediate vicinity of the boundaries~\cite{2007_Akkermans_book}.
We first consider non-absorbing disordered waveguides described by $\epsilon_a = 0$ in Eq.~(\ref{eq:helmholtz}) and real $\alpha$ in Eq.~(\ref{de}).
In Fig.~\ref{fig2} we compare numerical results for $D(z)$ with the outcome of SC theory for waveguides of different lengths but with statistically equivalent disorder. Quantitative agreement is observed for $L = 100$--$800 \lambda$, corresponding $g_0 \approx 0.3$--2. For the longest of our waveguides ($L = 1600\lambda$, $g_0 \approx 0.16$), deviations of numerical results from SC theory start to become visible in the middle of the waveguide, which is particularly apparent in the logarithmic plot of Fig.~\ref{fig2}(b).
The mean free path $\ell = 17.5\lambda$ corresponding to the best fit of SC theory to numerical results is only about $10\%$ higher than $\ell = 15.7 \lambda$ obtained from the fits in Fig.~\ref{fig1}.
We checked that the results of numerical simulations are not sensitive to the microscopic details of disorder: $D(z)$ obtained in two runs with different scattering strengths $\alpha$ and different scatterer densities, but equal mean free paths $\ell$ turned out to be the same.
%=====================================================================================================================
\section{EFFECT OF ABSORPTION}
\label{sec:absorption}
The linear absorption is modeled by introducing a non-zero $\epsilon_a$ in Eq.~(\ref{eq:helmholtz}) and making $\alpha$ in Eq.~(\ref{de}) complex. A link between $\epsilon_a$ and $\alpha$ can be established using the condition of flux continuity. Indeed, for continuous waves considered in this work the continuity of the flux leads to
\begin{equation}
\left\langle \boldnabla \cdot {\bf J}({\bf r})\right\rangle = (c/\ell_a) \left\langle{\cal W}({\bf r})\right\rangle,
\label{eq:flux_concervation}
\end{equation}
where $\ell_a = 1/k\epsilon_a$.
We checked that within numerical accuracy of our simulations the proportionality factor $c/\ell_a$ indeed remains constant independent of $z$. Therefore, Eq.~(\ref{eq:flux_concervation}) allows us to determine the microscopic absorption length $\ell_a$ as
$c \langle{\cal W}({\bf r})\rangle/\langle \boldnabla \cdot {\bf J}({\bf r})\rangle$ obtained numerically at a given $\alpha$.
%---------------------------------------------------------------------------------------------------------------------
\begin{figure}
%\vskip -1cm
%\centering{\includegraphics[height=5.5in,angle=-90]{fig3}}
%\vskip -1cm
\vskip -0.5cm
\centering{\includegraphics[height=3.5in,angle=-90]{chapters/Anderson_Localization_as_position-dependent_diffusion_in_disordered_waveguides__Phys_Rev_B/pictures/fig3}}
\vskip -0.7cm
\caption[The effect of absorption on the position-dependent diffusion coefficient.]{\label{fig3} The effect of absorption on the position-dependent diffusion coefficient. Symbols are results of numerical simulations in a 2D waveguide of length $L = 400\lambda$, width $w = 10.25 \lambda$ ($N = 20$) and several values of the macroscopic absorption length $L_a$ indicated on the figure. Lines are obtained from SC theory with $\ell = 17.1 \lambda$ adjusted to obtain the best fit for the case of no absorption (lower curve). Dashed line shows $D(z)$ following from the self-consistent theory with the same $\ell = 17.5 \lambda$ as in Fig.~\ref{fig2} and illustrates the sensitivity of $D(z)$ to the exact value of $\ell$.}
\end{figure}
%---------------------------------------------------------------------------------------------------------------------
Figure~\ref{fig3} demonstrates the effect of absorption on the position-dependent diffusion coefficient for a waveguide of length $L = 400\lambda$, which is about 25 mean free paths. For this waveguide $g_0 \simeq 1.3$ and the localization corrections are important. We observe that absorption suppresses the localization correction to the position-dependent diffusion coefficient. This clearly demonstrates that the absorption nontrivially affects the transport by changing the way the waves interfere.
Nevertheless, we observe good agreement between numerical results (symbols) and SC theory (solid lines). The predictions of SC theory start to deviate from numerical results only for strong absorption ($L_a/L \lesssim 0.4$). Once again, the mean free path $\ell = 17.1 \lambda$ obtained from the fit of SC theory to the lower curve of Fig.~\ref{fig3} is within 10\% of the value estimated from the variance of dimensionless conductance.
%=====================================================================================================================
\section{CONCLUSIONS}
\label{sec:conclusions_Dz}
Two important results were obtained in this work. First, we convincingly demonstrated that the position-dependent diffusion coefficient is not an abstract mathematical concept but is a physical reality. The results of numerical simulations of scalar wave transport in disordered 2D waveguides unambiguously show that the onset of Anderson localization manifests itself as position-dependent diffusion. The reduction of the diffusion coefficient $D(\mathbf{r})$ is much more important in the middle of an open sample than close to its boundaries, in agreement with predictions of the self-consistent theory of localization.
Second, we established that for monochromatic waves in 2D disordered waveguides predictions of the self-consistent theory of localization are {\it quantitatively\/} correct provided that the dimensionless conductance in the absence of interference effects $g_0$ is at least larger than $0.5$. Moreover, the self-consistent theory yields a series expansion of the average conductance $\langle g \rangle$ in powers of $1/g_0$ that coincides exactly with the expansion obtained using the supersymmetry method~\cite{2000_Mirlin} up to terms of the order of $1/g_0^2$. This was not obvious {\it a priori\/} because of the numerous approximations involved in the derivation of self-consistent equations~\cite{2008_Cherroret}. The agreement between theory and numerical simulations is good in the presence of absorption as well, which has a particular importance in the context of the recent quest for Anderson localization of classical waves that heavily relies on confrontation of experimental results with the self-consistent theory~\cite{2004_Skipetrov,2008_van_Tiggelen_Nature,2006_Maret_PRL,2006_Skipetrov_dynamics,2003_Genack}. Deep in the localized regime ($g_0 < 0.5$), the self-consistent theory loses its quantitative accuracy, but still yields qualitatively correct results (exponential decay of conductance with the length of the waveguide and of the diffusion coefficient $D$ with the distance from waveguide boundaries). It would be extremely interesting to see if the ability of the self-consistent theory to provide quantitative predictions still holds in three-dimensional systems where a mobility edge exists. In particular, the immediate proximity of the mobility edge is of special interest.
\textit{Note added.}
After this paper was submitted for publication, a related preprint appeared~\cite{2010_Tian_PRL}. In particular, the authors of that work show that the self-consistent theory does not apply to 1D disordered media, which is consistent with our results because $g_0 \sim \ell/L$ is always small in 1D, provided that the condition $L \gg \ell$ assumed in this paper is fulfilled.
%=====================================================================================================================
%\acknowledgments
%\ \\
%\textbf{Acknowledgments}
\section{ACKNOWLEDGMENTS}
We thank Bart van Tiggelen for useful comments. The work at Missouri S\&T was supported by the National Science Foundation Grant No. DMR-0704981. The numerical results obtained at the Tera-Grid, award Nos. DMR-090132 and DMR-100030. S.E.S. acknowledges financial support of the French ANR (Project No. 06-BLAN-0096 CAROL).
| {
"alphanum_fraction": 0.7075788062,
"avg_line_length": 105.5169230769,
"ext": "tex",
"hexsha": "75a7398d3b8ccd923b7f5120d36445859982e7c0",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "646123088fdd226e8677e6f3edb8d109be96994e",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "bhpayne/physics_phd_dissertation",
"max_forks_repo_path": "chapters/Anderson_Localization_as_position-dependent_diffusion_in_disordered_waveguides__Phys_Rev_B/quasi1d_dofz_prb.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "646123088fdd226e8677e6f3edb8d109be96994e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "bhpayne/physics_phd_dissertation",
"max_issues_repo_path": "chapters/Anderson_Localization_as_position-dependent_diffusion_in_disordered_waveguides__Phys_Rev_B/quasi1d_dofz_prb.tex",
"max_line_length": 1706,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "646123088fdd226e8677e6f3edb8d109be96994e",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "bhpayne/physics_phd_dissertation",
"max_stars_repo_path": "chapters/Anderson_Localization_as_position-dependent_diffusion_in_disordered_waveguides__Phys_Rev_B/quasi1d_dofz_prb.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 9740,
"size": 34293
} |
\documentclass[a4paper, 11pt]{article}
\input{00-preamble-arxiv}
\input{00-preamble}
\begin{document}
\maketitle
\input{abstract}
\setcounter{tocdepth}{2}
\tableofcontents
\pagebreak
\input{01-introduction}
\input{02-methods-revision}
\input{03-results}
\input{04-discussion}
\section*{Abbreviations}
\begin{description}
\item[BMU] Best Matching unit
\item[DNF-SOM] Dynamic Neural Field-Self-Organizing Map
\item[DSOM] Dynamic Self-organizing Map
\item[KSOM] Kohonen Self-Organizing Map (Kohonen original proposal)
\item[KDE] Kernel Density Estimation
\item[RSOM] Randomized Self-Organizing Map
\item[TDA] Topological Data Analysis
\item[SOM] Self-Organizing Map
\end{description}
\section*{Funding}
This work was partially funded by grant ANR-17-CE24-0036.
% \printbibliography %[heading=bibintoc]
\bibliographystyle{plain}
\bibliography{biblio}
\newpage
\appendix
\input{05-appendix-revision}
\end{document}
| {
"alphanum_fraction": 0.7410071942,
"avg_line_length": 24.325,
"ext": "tex",
"hexsha": "eed5d251363fcae70cdea94ed2c4ec6fa6ea03ea",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-01-03T04:41:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-03T04:41:57.000Z",
"max_forks_repo_head_hexsha": "78e6eb924b5f89a0e6f42eb6bbe7971473a9abaa",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "rougier/VSOM",
"max_forks_repo_path": "article-overleaf/article-arxiv-revision.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "78e6eb924b5f89a0e6f42eb6bbe7971473a9abaa",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "rougier/VSOM",
"max_issues_repo_path": "article-overleaf/article-arxiv-revision.tex",
"max_line_length": 74,
"max_stars_count": 17,
"max_stars_repo_head_hexsha": "78e6eb924b5f89a0e6f42eb6bbe7971473a9abaa",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "rougier/VSOM",
"max_stars_repo_path": "article-overleaf/article-arxiv-revision.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-11T22:20:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-20T06:27:15.000Z",
"num_tokens": 305,
"size": 973
} |
% \iffalse meta-comment
%
% Copyright (C) 1993-2022
% The LaTeX Project and any individual authors listed elsewhere
% in this file.
%
% This file is part of the LaTeX base system.
% -------------------------------------------
%
% It may be distributed and/or modified under the
% conditions of the LaTeX Project Public License, either version 1.3c
% of this license or (at your option) any later version.
% The latest version of this license is in
% http://www.latex-project.org/lppl.txt
% and version 1.3c or later is part of all distributions of LaTeX
% version 2008 or later.
%
% This file has the LPPL maintenance status "maintained".
%
% The list of all files belonging to the LaTeX base distribution is
% given in the file `manifest.txt'. See also `legal.txt' for additional
% information.
%
% The list of derived (unpacked) files belonging to the distribution
% and covered by LPPL is defined by the unpacking scripts (with
% extension .ins) which are part of the distribution.
%
% \fi
% Filename: ltnews12.tex
%
% This is issue 12 of LaTeX News.
\documentclass
% [lw35fonts] % uncomment this line to get Times
{ltnews}[1999/12/01]
% \usepackage[T1]{fontenc}
\publicationmonth{December}
\publicationyear{1999}
\publicationissue{12}
\providecommand{\acro}[1]{\textsc{\MakeLowercase{#1}}}
\providecommand\pkg[1]{\texttt{#1}}
\providecommand\cls[1]{\texttt{#1}}
\providecommand\option[1]{\texttt{#1}}
\providecommand\env[1]{\texttt{#1}}
\providecommand\file[1]{\texttt{#1}}
\begin{document}
\maketitle
\section{LPPL update}
Since the release of the \LaTeX{} Project Public Licence version~1.1,
we have received a small number of queries which resulted in some
minor changes to improve the wording or explain the intentions better.
As a consequence this release now contains LPPL~1.2 in the file
\file{lppl.txt} and the previous versions as \file{lppl-1-0.txt} and
\file{lppl-1-1.txt}.
\section{fixltx2e package}
This package provides fixes to \LaTeXe{} which are desirable but
cannot be integrated into the \LaTeXe{} kernel directly as they
would produce a version incompatible to earlier releases (either
in formatting or functionality).
By having these fixes in the form of a package, users can benefit
from them without the danger that their documents will fail, or
produce unexpected results, at other sites; this works because a
document will contain a clear indication (the \verb=\usepackage=
line, preferably with a required date) that at least some of these
fixes are required to format it.
\section{Outcome of TUG '99 (Vancouver)}
The slides from the \acro{TUG}'99 presentation we gave on \emph{a
new interface for \LaTeX\ class designers} are available from the
\LaTeX\ Project website; look for the file \verb|tug99.pdf| at:
\begin{quote}
\url{http://www.latex-project.org/talks/}
\end{quote}
Please note that this document was intended only to be informal
``speaker's notes'' for our own use. We decided to make them available (the
speaker's notes as well as the slides that were presented) because several
people requested copies after the talk. However, they are \emph{not} in
a polished copy-edited form and are not intended for publication.
Prototype implementations of parts of this interface are now available from:
\begin{quote}
\url{http://www.latex-project.org/code/experimental/}
\end{quote}
We are continuing to add new material at this location so as to
stimulate further discussion of the underlying concepts. As of
December 1, 1999
the following parts can be downloaded.
\begin{description}
\item[xparse] Prototype implementation of the interface for declaring
document command syntax. See the \texttt{.dtx} files for
documentation.
\item[template] Prototype implementation of the template interface
(needs parts of \pkg{xparse}).
The file \file{template.dtx} in that directory has a large section
of documentation at the front describing the commands in the
interface and giving a `worked example' building up some templates
for caption formatting.
\item[xcontents] Interface description for table of contents data (no
code yet). Coding examples have been thoroughly discussed on the
\texttt{latex-l} list.
\item[xfootnote] Working examples for generating footnotes,
etc. Needs \pkg{xparse} and \pkg{template}.
\end{description}
All examples are organised in subdirectories and additionally
available as \texttt{gzip} \texttt{tar} files.
Please remember
that this material is intended only for experimentation and comments;
thus any aspect of it, e.g., the user interface or the functionality,
may change and, in fact, is very likely to change.
For this reason it is explicitly forbidden to place this material on
\acro{CD-ROM} distributions or public servers.
These concepts, as well as their implementation, are under discussion
on the list \texttt{LATEX-L}. You can join this list, which is
intended solely for discussing ideas and concepts for future versions
of \LaTeX, by sending mail to
%\begin{quote}
\email{[email protected]}
%\end{quote}
containing the line
\begin{quote}
\texttt{SUBSCRIBE LATEX-L} \textit{Your Name}
\end{quote}
This list is archived and, after subscription, you can retrieve older
posts to it by sending mail to the above address, containing a command
such as:
\begin{quote}
\texttt{GET LATEX-L LOGyymm}
\end{quote}
where \texttt{yy}=Year and \texttt{mm}=Month, e.g.
\begin{quote}
\texttt{GET LATEX-L LOG9910}
\end{quote}
for all messages sent in October 1999.
\end{document}
| {
"alphanum_fraction": 0.7538571941,
"avg_line_length": 34.8375,
"ext": "tex",
"hexsha": "b7a01bc208b7e78d35c646b294fa434491874a1a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "676031375fbd333c3b685d534641eed52b75380f",
"max_forks_repo_licenses": [
"LPPL-1.3c"
],
"max_forks_repo_name": "snu-development/latex2e",
"max_forks_repo_path": "base/doc/ltnews12.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "676031375fbd333c3b685d534641eed52b75380f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"LPPL-1.3c"
],
"max_issues_repo_name": "snu-development/latex2e",
"max_issues_repo_path": "base/doc/ltnews12.tex",
"max_line_length": 76,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "329dd9a9e6fc93c1c1877f130ad3f016b23c03e1",
"max_stars_repo_licenses": [
"LPPL-1.3c"
],
"max_stars_repo_name": "gucci-on-fleek/latex2e",
"max_stars_repo_path": "base/doc/ltnews12.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-25T01:13:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-25T00:58:18.000Z",
"num_tokens": 1438,
"size": 5574
} |
\chapter{The Dialects of Iridian}\label{ch:dialects}
\section{Dialects Outside of Iridia}
\subsection{Ukrainian Dialects}
The Ukrainian dialects of Iridian (\ird{hokránževní mieva}), known locally as \textit{\cyrtext гукран\-же\-вн\'и мни\-ва} is spoken in the borderlands of Ukraine and Iridia. It forms a dialect continuum with the southeastern dialects of the country and is the dialect with the most number of speakers outside the country. Within the dialect group itself, variations can be observed from the forms spoken from one town to another, mainly because until very recently the language had no official status in Ukraine\footnote{It was recognized as a minority language in 2003.} and its relative isolation from the mainstream dialects of Iridian made it take a path of its own.
As could be expected from its location, this group of dialects has had significant influence from the Ukrainian language (and to a lesser extent, from Russian) and its vocabulary contains more Slavic-derived words than Standard Iridian. These dialects are also written entirely in the Cyrillic script (based on the Iridian Cyrillic alphabet with some spelling conventions adapted from Ukrainian) although in recent years the use of Latin alphabet is becoming more common (primarily due to the rise of text messaging and the internet).
Phonologically, the Ukrainian dialects are perhaps the most divergent. One of the most notable difference is the replacement of nasal vowels with a sequence of an oral vowel and a nasal consonant homorganic with the following stop, or if the nasal vowel was in coda, with an oral vowel and /m/. This also meant that coda /m/ and /n/ no longer nasalizes the preceding vowel (and diphthongize it in case it is an /a/ or an /e/).
\ex
Loss of nasal vowels in Ukrainian Iridian:\\
\irdp{bi\k{e}c}{cat}\quad$\rightarrow$\quad\ird{\cyrtext б\'инц} [bʲiːnt͡s]\\
\irdp{l\k{a}ca}{flatiron}\quad$\rightarrow$\quad\ird{\cyrtext ланца} [ˈlänt͡sɐ]\\
\irdp{bž\k{e}}{bee}\quad$\rightarrow$\quad\ird{\cyrtext бжем} [bʑɛm]
\xe
\ex
Non-nasalization of vowels before /m/ and /n/:\\
\irdp{bi\k{e}cem}{my cat}[ˈbʲɛ̃w̃t͡sə̃w̃]\quad$\rightarrow$\quad\ird{б\'инцем} [bʲiːnt͡sɪm]\\
\xe
| {
"alphanum_fraction": 0.7757051865,
"avg_line_length": 84.5384615385,
"ext": "tex",
"hexsha": "b594482d76f4a38d89b7d94b8030f70a019b961d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2ee3f7cec4dd492d308132e502c4feeb83bed963",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "roelchristian/iridian-reference-grammar",
"max_forks_repo_path": "chapters/appx-02-dialects.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "2ee3f7cec4dd492d308132e502c4feeb83bed963",
"max_issues_repo_issues_event_max_datetime": "2020-09-14T11:42:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-09-14T07:13:14.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "roelchristian/iridian-reference-grammar",
"max_issues_repo_path": "chapters/appx-02-dialects.tex",
"max_line_length": 670,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "2ee3f7cec4dd492d308132e502c4feeb83bed963",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "roelchristian/iridian-reference-grammar",
"max_stars_repo_path": "chapters/appx-02-dialects.tex",
"max_stars_repo_stars_event_max_datetime": "2020-09-20T15:34:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-09-12T12:08:59.000Z",
"num_tokens": 639,
"size": 2198
} |
\chapter{UML}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{figures/uml_class_diagram.png}
\end{figure}
These are the classes that are represented in the UML class diagram:
\begin{itemize}
\item \textit{AbstractExpression}: Declares an interpret() operation that all nodes (terminal and nonterminal) in the AST override.
\item \textit{TerminalExpression} (\textit{NumberExpression}): Implements the interpret() operation for terminal expressions
\item \textit{NonTerminalExpression} (\textit{PlusExpression, MinusExpression, DivideExpression, MultiplyExpression}): Implements the interpret() operation for all nonterminal expressions.
\item \textit{Context(String)}: Contains information that is global to the interpreter. It contains the String expression with the Postfix notation that has to be interpreted and parsed.
\item \textit{Client }: Builds the AST assembled from TerminalExpression and NonTerminalExpression. The Client invokes the interpret() operation.
\end{itemize}
The nonterminal symbols forward interpret the \textit{interpret} methods. This means, that you only need to call it once for the root node, because they will automatically call it on their list of child nodes. The \textit{TerminalExpression} cannot have any children. Only the \textit{NonTerminalExpression} can have one or more children. To see the interactions between the different object, look at the example in the next chapter.
| {
"alphanum_fraction": 0.7924016282,
"avg_line_length": 77.5789473684,
"ext": "tex",
"hexsha": "049fa4c6a798cc51e6361b721406cd2374f718c9",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b8894f4b23c0ef2ce11c152a40691cd797246869",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "not-matthias/interpreter-pattern",
"max_forks_repo_path": "documentation/chapters/01_document/03_uml.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b8894f4b23c0ef2ce11c152a40691cd797246869",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "not-matthias/interpreter-pattern",
"max_issues_repo_path": "documentation/chapters/01_document/03_uml.tex",
"max_line_length": 433,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b8894f4b23c0ef2ce11c152a40691cd797246869",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "not-matthias/interpreter-pattern",
"max_stars_repo_path": "documentation/chapters/01_document/03_uml.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 326,
"size": 1474
} |
\section{Prototype}
\label{sec:prototype}
To evaluate the capabilities and modular structure of \textit{Clixon}, a prototype was developed based on the YANG model specified in the RFC draft \textit{ietf-tcpm-yang-tcp}~\cite{draft-ietf-tcpm-yang-tcp}.
In the following subsections, first, the proposed YANG model \textit{ietf-tcp} used in the prototype is introduced. Subsequently, the difficulties encountered during the development of the plugin are outlined. In the last subsection \ref{Portability to QNX} the portability of the prototype and the underlying \textit{Clixon} framework is analyzed based on the Blackberry real-time operating systems (RTOS) QNX and an armv7 system architecture as evaluation platform.
%%%%%%%%%%%%%%%%%%
% IETF TCP YANG Model
%%%%%%%%%%%%%%%%%%
\subsection{IETF TCP YANG Model}
\label{IETF TCP YANG Model}
\begin{figure}[htbp]
\centering
\begin{lstlisting}[gobble=8,language={}]
module: ietf-tcp
+-rw tcp!
+-rw connections
| +-rw connection*
| +-rw local-address
| +-rw remote-address
| +-rw local-port
| +-rw remote-port
| +-rw common
| +-rw keepalives!
| | +-rw idle-time
| | +-rw max-probes
| | +-rw probe-interval
| +-rw (authentication)?
| +-:(ao)
| | +-rw enable-ao?
| | +-rw send-id?
| | +-rw recv-id?
| | +-rw include-tcp-options?
| | +-rw accept-key-mismatch?
| +-:(md5)
| +-rw enable-md5?
+-ro statistics {statistics}?
+-ro active-opens?
+-ro passive-opens?
+-ro attempt-fails?
+-ro establish-resets?
+-ro currently-established?
+-ro in-segments?
+-ro out-segments?
+-ro retransmitted-segments?
+-ro in-errors?
+-ro out-resets?
+-x reset
+-w input
| +-w reset-at?
+-ro output
+-ro reset-finished-at?
\end{lstlisting}
\caption{TCP YANG.}
\label{fig:ietf-yang}
\end{figure}
The TCP Maintenance and Minor Extensions (TCPM) working group of the IETF is currently working on a standardized YANG Model for the TCP stack named \textit{ietf-tcp}~\cite{draft-ietf-tcpm-yang-tcp}. It specifies a minimal YANG model for the TCP stack which is presented in Figure~\ref{fig:ietf-yang}. It consists of a container for all TCP connections and a container for basic TCP statistics. In addition, it defines groupings of authentication parameters that can be reused by other models.
To our knowledge, the presented prototype is the first implementation of the proposed \textit{ietf-tcp} model. However, it must be mentioned that the YANG model could not be implemented to its full extent, since at the time of implementation the functionality required for the authentication container was not supported by the Linux kernel.
Furthermore, the list of TCP connections was only implemented as a read-only list because write access was not applicable in the prototype environment.
Likewise, the reset action for the TCP statistics could not be implemented properly due to the lack of support provided by the operating systems used (Ubuntu, QNX).
Nevertheless, the evaluation phase provided valuable insights that contributed to the further development of the YANG model~\footnote{Further information about changes to the YANG model and the contribution of this work can be found in the slide decks for IETF 110~\cite{ietf-110} and IETF 111~\cite{ietf-111}.}.
%%%%%%%%%%%%%%%%%%
% Plugin Development
%%%%%%%%%%%%%%%%%%
\subsection{Plugin Development}
\label{Plugin Development}
\begin{figure}[h!]
\centering
\includegraphics[width=\linewidth]{assets/Prototype/Plugin_Architecture_v.png}
\caption{Plugin Architecture}
\label{fig:plugin_architecture}
\end{figure}
Unlike the \textit{Clixon} framework, which is written entirely in \inlinelst{C} and based on the \textit{Autotools toolchain} as the build system, the plugin was developed using \inlinelst{C++} and \textit{CMake} for better development experience.
To obtain the TCP connections and statistics specified in the YANG model from the base system, in this case Ubuntu, the pseudo files \inlinelst{/proc/net/snmp} and \inlinelst{/proc/net/tcp} were read and parsed.
For testing the implementation of the plugin, \textit{YangSuite} was used, which was fairly new at the time of implementation. It provides interfaces like NETCONF and RESTCONF gNMI and and can be operated in a docker container. Fig.~\ref{fig:plugin_architecture} shows how \textit{YangSuite} integrates into the user management layer and into the overall setup.
The Source code for the implemented plugin can be found on GitHub at https://github.com/mager-m/ietf-tcp-research-project.
%%%%%%%%%%%%%%%%%%
% Portability to QNX
%%%%%%%%%%%%%%%%%%
\subsection{Portability to QNX}
\label{Portability to QNX}
% Why QNX?
QNX is a commercially distributed real time operating system (RTOS) that is used through various industries and serves as the basis in many embedded devices, including network related equipment. Therefore QNX has been chosen as the evaluation platform for the portability analysis. A physical development board, based on the ARMv7 architecture, has been used in the process.
% Technical Prerequisites -> Port Effort
Since \textit{Clixon}'s goal is to provide a YANG-based configuration manager with support for many platforms, it already runs on a variety of operating systems. To achieve this, \textit{Clixon} relies on different feature flags and the use of \textit{Autotools toolchain} to detect the platform capabilities at configuration time. This not only made it easier to get the framework up and running, but also to adapt any necessary changes as described in the following.
%& Findings + Challenges
% CLixon
During the deployment of the framework to the new platform a memory corruption appeared while reading the YANG models from the disk. Presumably due to a different memory management it has not been noticed with Ubuntu so far. An appropriate fix was implemented and merged in agreement with the maintainer into the project as pull request.
Another challenge was the lack of support for the multiple privilege functions such as \inlinelst{setresuid} and \inlinelst{getresuid} that are used by \textit{Clixon} to drop privileges after initialization. To overcome this issue, an additional check in \textit{Autotools} along with the feature flag \inlinelst{HAVE\_SETRESUID} was introduced. Thus, if \inlinelst{getresuid} / \inlinelst{setresuid} is not available on the platform, it will be detected during the configuration phase and the relevant code will not be included in the compilation. This change was also merged into the project with a pull request.
Due to the highly generic code structure, the deployment of the \textit{Clixon} framework to the evaluation platform proceeded without further challenges.
% Plugin
Since QNX does not have the same pseudo files for reading TCP connections and statistics as Ubuntu, modifications to the plugin were necessary. To overcome the non-existent files, the information was obtained by using the \inlinelst{netstat} command and parsing the corresponding output. Otherwise, no major changes were necessary.
% Fazit
The deployment of the prototype to the evaluation platform has confirmed the initial assumption and the advertisement of \textit{Clixon}. Due to the generic code base and the use of the \textit{Autotools toolchain} along with many feature flags, the framework proved to be very flexible and easily portable to new UNIX-like environments.
The various challenges faced during the deployment have also highlighted the benefits of open-source software. Besides the possibility to make changes directly to the source code, there is a solid community for emerging questions during the development. Since the changes can be contributed and merged, the framework is constantly evolving and so are the capabilities.
%
%Since model-based management is also becoming increasingly important in other industries beyond traditional network management, the portability of the implemented software solution was evaluated subsequently.
| {
"alphanum_fraction": 0.7083621285,
"avg_line_length": 72.9579831933,
"ext": "tex",
"hexsha": "b0dec387cee58298ab335ec539e916f7f780bafe",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d4480d4dbec4f35ea61782c25bbe0a7dd05006bc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mager-m/ietf-tcp-research-project",
"max_forks_repo_path": "publication/contents/3_Prototype.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d4480d4dbec4f35ea61782c25bbe0a7dd05006bc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mager-m/ietf-tcp-research-project",
"max_issues_repo_path": "publication/contents/3_Prototype.tex",
"max_line_length": 616,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "d4480d4dbec4f35ea61782c25bbe0a7dd05006bc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mager-m/ietf-tcp-research-project",
"max_stars_repo_path": "publication/contents/3_Prototype.tex",
"max_stars_repo_stars_event_max_datetime": "2021-10-21T10:14:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-10T13:55:05.000Z",
"num_tokens": 1915,
"size": 8682
} |
% Abstract ------------
\begin{abstract}
\noindent \tom{Abstract not updated yet.} Recent evidence has shown that spatial allele sorting during invasions can increase invasion speed, as well as variability in invasion speed, by generating selection on dispersal distance and the low-density population growth rate. Most studies assume that selection acts on each of these traits independently, and do not consider that traits may instead be correlated. However, it is well-established that correlations between traits play an important role in evolutionary dynamics. Here, we estimate additive genetic correlations and environmental correlations between dispersal distance and the low-density population growth rate in the beetle \textit{Callosobruchus maculatus}, a model system that has been previously used to investigate experimental invasion dynamics. We find that, for both additive genetic and environmental correlations, dispersal distance and low-density growth rate are negatively correlated in \textit{C. maculatus}. Furthermore, we use these estimates from the \textit{C. maculatus} system to parameterize simulations of invasions that explore the full spectrum of possible trait correlations. We find that strong positive correlations between dispersal and the low-density growth rate increase the speed and variability of invasions relative to strong negative correlations, and that both positive and negative correlations generate qualitatively different outcomes compared to invasions without trait correlations. We suggest that incorporating estimates of trait correlations is likely to be important for modeling the range of potential outcomes for invading populations.
\end{abstract}
% Keywords ------------
\setlength\parindent{.45in} \keywords{spatial selection, life-history evolution, trait correlations, G-matrix, dispersal}
\doublespacing
% Introduction --------
\section{Introduction}
Understanding the factors that govern the rate of spatial expansion is a long-standing problem in population biology and takes on urgency in the context of two key dimensions of contemporary global change: range expansion by invasive species and climate change migration by native species. Classic ecological theory tells us that the dynamics of range expansion are driven by the combined forces of local birth/death processes (`demography') and individual movement (`dispersal') \citep{skellam_random_1951,okubo_diffusion_1980,kot_discrete-time_1986,kot_dispersal_1996}. Recently, ecologists have begun to examine the consequences of individual variation, especially heritable variation, in demography and dispersal traits and how eco-evolutionary feedbacks can modify the dynamics of range expansion.
Individuals that vary in dispersal ability are expected to become sorted along an expanding population front \citep{shine_evolutionary_2011}. Spatial sorting generates an over-representation of highly dispersive phenotypes at the invasion vanguard. If dispersal is heritable, non-random mating among highly dispersive individuals may lead to rapid evolution of increased dispersal ability at the leading edge. Furthermore, if density-dependent demography generates a fitness advantage at the low-density invasion front, highly dispersive alleles may be favored by `spatial selection' \citep{phillips_life-history_2010, perkins_evolution_2013}. Increased fitness in the vanguard can also result in natural selection for increased low-density reproductive rates (`\textit{r}-selection’) \citep{phillips_life-history_2010}. Because invasion speed is determined by dispersal and low-density reproductive rate, the combined action of these evolutionary processes is expected increase the speed of invasions \citep{phillips_evolutionary_2015} and a surge of recent experimental work supports this theoretical prediction \citep{williams_rapid_2016, ochocki_rapid_2017, weiss-lehman_rapid_2017,van2018kin}. Several studies also show more variation in speed -- how far the invasion spreads over time -- than would be expected in the absence of evolution \citep{phillips_evolutionary_2015, ochocki_rapid_2017, weiss-lehman_rapid_2017}. Increased variability is likely due to the stochastic fixation of alleles at the leading edge, which is a consequence of the serial founder events that characterize invasive spread -- a spatial analogue of genetic drift called `gene surfing' \citep{edmonds_mutations_2004,klopfstein_fate_2006,excoffier_surfing_2008,peischl_expansion_2015,phillips_evolutionary_2015, ochocki_rapid_2017, weiss-lehman_rapid_2017}.
Most theoretical models of the eco-evolutionary dynamics of range expansion assume that dispersal and low-density reproductive rate (hereafter `fertility') evolve independently. If, however, these traits are genetically correlated then it impossible to predict the outcome of selection without knowing both the magnitude and sign of the correlation \citep{lande_measurement_1983,chenoweth2010contribution}. Quantitative genetics offers a convenient framework to explore sources of (co)variation in ecologically important traits. In the simplest case, total phenotypic variation in a single quantitative trait can be partitioned into two types of variance: additive genetic variance (the variance that can be explained by the inheritance of genes from parents to offspring) and environmental variance (any residual, non-heritable variance caused by extrinsic factors) \citep{lynch_genetics_1998,kruuk_estimating_2004,wilson_ecologists_2010}. This framework can be extended to account for multiple traits and the correlations between them. Genetic correlations are expected to arise through pleiotropy (a subset of genes influencing multiple traits) and/or physical linkage (association of alleles on chromosomes) \citep{roff_evolutionary_1997} while environmental correlations arise from plastic responses to the environment that are non-independent across traits. There are, then, many ways for dispersal and fertility to interact via trait correlations; the correlations can be positive or negative, and can be due to genetic effects, environmental effects, or both.
Due to the energetic cost of dispersal, it is often assumed that negative %genetic <- I think better to cut?
correlations in the form of trade-offs between dispersal and life-history traits should be important drivers of invasion dynamics \citep{hanski_dispersal-related_2006,chuang_expanding_2016}. However, it it is not clear that we should expect to see such bivariate trade-offs in nature \citep{saltz_trait_2017}, or even that trade-offs should necessarily result in negative genetic correlations \citep{houle_genetic_1991}. In the Glanville fritillary butterfly (\textit{Melitaea cinxia}), variation tightly linked to a single gene (\textit{Pgi}) generates a positive genetic correlation between dispersal propensity and clutch size \citep{hanski_dispersal-related_2006,bonte_dispersal_2012}. Conversely, speckled wood butterflies (\textit{Pararge aegeria}) at range margins demonstrate a heritable negative correlation between dispersal propensity and clutch size \citep{hughes_evolutionary_2003}, while the damselfly \textit{Coenagrion scitulum} exhibits no genetic correlation between dispersal and clutch size \citep{therry_higher_2014}. Environmental correlations, on the other hand, may arise through any number of extrinsic, non-heritable factors that generate plastic phenotypic responses. Female blue tits (\textit{Parus caeruleus}) show a positive environmental correlation between dispersal and future fertility that is related to current brood size: females that were experimentally assigned to rear small broods in one year dispersed farther and had increased fertility in the following year relative to females assigned large broods \citep{nur_consequences_1988}. Negative environmental correlations between dispersal and fertility have been demonstrated in the green-veined white butterfly (\textit{Pieris napi}), where individuals exposed to a controlled temperature/photoperiod regime that mimicked summertime conditions had higher dispersal and lower fertility than individuals exposed to a springtime regime \citep{karlsson_seasonal_2008}. Indeed, widespread evidence for dispersal `syndromes' -- the covariation of dispersal with other life history and behavioral traits within \citep{clobert2009informed} or between \citep{comte2018evidence} species -- suggests that the classical assumption of demography and dispersal rates as independent parameters may break down for eco-evolutionary models that incorporate trait heterogeneity. How, then, should we expect the magnitude and sign of demography-dispersal covariance to alter predictions about range expansion?
Only two previous studies to our knowledge, both simulation-based, have explored eco-evolutionary dynamics of spread under trade-offs (negative correlation) between life history traits, focusing on the trade-off axis of \textit{r}/\textit{K} selection \citep{burton_trade-offs_2010,perkins_after_2016}. These studies suggest that, for populations invading empty space, traits that promote fertility and dispersal should be maintained at the invasion front at the expense of competitive ability \citep{burton_trade-offs_2010,perkins_after_2016}. However, because they imposed a particular type of trade-off, these studies do not reveal how variation in the sign and magnitude of genetic and/or environmental trait correlations modify spread dynamics. We hypothesize that negative genetic correlation between dispersal and fertility may constrain evolutionary acceleration of spread, relative to invasions with independent trait evolution, because high-dispersal / high-fertility phenotypes may be inaccessible to selection. %\tom{[It is not clear in the case of negative correlation whether dispersal or fertility would increase at front, if both cannot. Might be worth adding.]}
Alternatively, positive correlation may align the direction of selection with the main axis of phenotypic covariation, causing greater evolutionary responses than would be expected for uncorrelated traits. We further hypothesize that environmental correlations, positive or negative, should have little impact on evolutionary dynamics of invasion; since environmental correlations modify traits in a way that is not heritable, they may contribute little more than noise at the invasion front. Finally, although there is an expectation that evolutionary processes can make invasions more variable \citep{phillips_evolutionary_2015,ochocki_rapid_2017,weiss-lehman_rapid_2017}, it is not clear how genetic and/or environmental correlations will interact with other evolutionary processes to influence variability in invasion outcomes. Understanding the factors that drive invasion variability is essential for making useful predictions about the range of possible trajectories for spreading populations.
%\tom{[I have the hypothesis that environmental correlations should increase variance, but only in variable abiotic environments. If the spatial environment is constant then there is no way for this correlation to manifest. If the spatial environment is itself variable then there should be wider fluctuations in trait values at the front (if E correlation is positive). Worth testing/adding? How?]}-- add to discussion
In this study, we used a combination of laboratory experiments, classical quantitative genetics, and individual-based models to explore how demography-dispersal trait correlations, arising from genetics and/or environment, influence trait evolution during range expansion and the ecological dynamics of spread. We explore this question first in a specific empirical context, building upon a model system for the evolutionary acceleration of range expansion, and then more generally. In a previous study, we showed that rapid evolution accelerated the expansion of bean beetles (\textit{Callosobruchus maculatus} (Chrysomelidae)) spreading through laboratory mesocosms and also elevated replicate-to-replicate variability in invasion speed, a result that we attributed to the stochasticity-generating effects of 'gene surfing' \citep{ochocki_rapid_2017}. Surprisingly, evolutionary acceleration was due entirely to rapid evolution of dispersal distance; there was no evidence that fertility evolved during range expansion despite predictions that it should \citep{ochocki_rapid_2017}. Here we focused on the architecture of these traits, including their genetic and environmental variances and correlations. We then integrated experimental trait estimation with a spatially explicit individual-based model that combines population genetics and density dynamics. The model allowed us to retrospectively evaluate whether and how trait correlations contributed to the evolutionary effects on traits (increased dispersal, no change in fertility) and range expansion (increased mean and variance of invasion speed) that we observed in our previous study. Lastly, we used the system-specific parameters as a starting point to ask, more generallyacross parameter space, how the full range of possible demography-dispersal trait correlations influence the eco-evolutionary dynamics of spread.
% Methods -------------
\section{Materials and Methods}
We conducted this study in three parts. First, we measured dispersal and density-dependent fertility from individuals with known pedigree. This enabled us to infer the genetic and environmental variances and covariances (and thus correlations) between dispersal and fertility using hierarchical Bayesian estimation of a quantitative genetics model. Second, we used estimates from this statistical model to parameterize a stochastic simulation of bean beetle range expansion. The model allowed us to generate system-specific predictions for evolved trait changes and spread dynamicsy. Lastly, we varied the genetic and environmental (co)variances of dispersal and fertility, beyond the particular values of the beetle system, to more generally evaluate how trait correlations influence invasion dynamics.
\subsection{Bean beetle experiment}
\subsubsection{Study system}
The bean beetle \textit{Callosobruchus maculatus} is a stored-grain pest that feeds on legumes, spending its entire developmental life inside a single bean \citep{fujii_behavioral_1990}. Adult beetles, which requires neither food nor water, emerge after ca. 28 days of development. Adults live ca. 10 days, during which they disperse, mate, and reproduce. The short generation time and convenient rearing conditions make this species is a popular model system in population biology, including previous studies of life-history traits, population dynamics, and range expansion \citep{bellows_analytical_1982,fujii_behavioral_1990,miller_confronting_2011,miller_sex_2013,wagner_genetic_2016,ochocki_rapid_2017}.
Laboratory populations of \textit{C. maculatus} are typically highly inbred, often for dozens or hundreds of generations. We created a genetically diverse population that was founded with 54 beetles (\female:\mars $\approx$ 1:1) haphazardly chosen from each of 18 laboratory lines (960 beetles in total), each line having been originally isolated from different parts of the species’ global distribution \citep{downey_comparative_2015}. This was the same genetic make-up of the populations used in our previous range expansion experiments \citep{ochocki_rapid_2017}. Individuals in this mixed population interbred in a resource-unlimited environment for seven generations before the start of the experiment, to allow for sufficient genetic mixing and to reduce linkage disequilibrium \citep{roughgarden_theory_1979,ochocki_rapid_2017}. Beetles were maintained in a climate-controlled growth chamber on a 16:8 photoperiod at 28°C throughout the experiment. The beetles used in this experiment were reared on black-eyed peas (\textit{Vigna unguiculata} (Fabaceae)). %Beetles from different laboratory lines readily interbreed and produce fertile offspring \citep{fox_complex_2004,fox_genetic_2004,ochocki_rapid_2017}.
\subsubsection{Trait measurement}
\paragraph{Dispersal}
We used a nested full-sib/half-sib breeding design to measure genetic and environmental variances and covariances in our laboratory-reared populations of \textit{C. maculatus}. This design is appropriate because it allows the estimation of these variances from a single generation of trait measurement and does not require information on the parental genotypes or phenotypes \citep{falconer_introduction_1996,conner_primer_2004,wilson_ecologists_2010}. We created half-sib families by mating a single sire to three virgin dams, and replicated this process 50 times to get 150 unique full-sib families, each nested within one of 50 half-sib families. After a 48-hour mating period, each dam was transferred to an individual Petri dish for oviposition. Petri dishes contained 50g black-eyed peas, essentially unlimited resources for a single female, and dams were permitted to oviposit \textit{ad libitum} until senescence. After 28 days of development, we measured dispersal and density-dependent fertility in the adult offspring.
Within 48 hours of eclosion, we measured dispersal ability by allowing beetles to disperse for two hours across one-dimensional arrays of 60mm Petri dish `patches'. Each patch in these arrays was interconnected by 1/8" plastic tubing and contained seven black-eyed peas, the same dispersal environment as in our range expansion experiments \citep{ochocki_rapid_2017}. Dispersal trials began with 16 full-siblings (8 females and 8 males) in a starting patch, with a sufficient number of patches to the left and right so that beetles could disperse in either direction without encountering the edge of the environment. We chose to use 16 beetles because that was the largest number of individuals that we could reliably obtain from our full-sibling families while maintaining a 1:1 sex ratio among the dispersing individuals. After two hours of dispersal, we counted the number of patches that each beetle dispersed, which allowed us to estimate a dispersal kernel for each full-sibling family.
\paragraph{Fertility}
After dispersal, we gathered female beetles from the dispersal array and transfered each of them to an isolated Petri dish where they could oviposit; after 28 days we counted the number of offspring that emerged to estimate a fertility for each female. While our main focus was low-density fertility in leading-edge environments, we additionally quantified density dependence in fertility so that our simulations could include realistic population dynamics behind the advancing front. To induce density-dependence in fertility, each oviposition dish contained a resource density of either 1, 3, 5, or 10 black-eyed peas, and a non-sibling male beetle. Given the opportunity, females will attempt to distribute their eggs approximately uniformly among available beans \citep{fujii_behavioral_1990} so that the number of eggs per bean is inversely proportional to the number of beans available. Larval competition within a bean has a strong negative effect on larval survival to adulthood, so that any larva's survival probability decreases with the number of eggs per bean \citep{giga_intraspecific_1991}. It is therefore possible to vary the strength of larval competition that offspring experience simply by varying the number of beans available to females for oviposition. Thus, dishes containing one black-eyed pea were expected to yield high egg densities, resulting in high-competition larval environments; dishes containing 10 black-eyed peas were expected to yeild low egg densities, resulting in low-competition larval environments. Post-dispersal females were haphazardly assigned to one of the four oviposition densities. Due to the relatively low number of female dispersers in each full-sibling family, we opportunistically supplemented fertility trials with full-sibling females that were not included in the dispersal trial. We attempted to replicate each bean density at least three times per full-sib family; we made note of fertility trials using un-dispersed females, and preliminary analyses did not reveal any differences in fertility between dispersed and un-dispersed individuals.
Since the density-dependent competition described here is among full siblings, it is important to consider whether competition among full siblings might be different than competition among unrelated individuals. Conveniently, experimental evidence shows that the strength of competition among developing larvae of \textit{C. maculatus} does not vary with relatedness \citep{smallegange_local_2008}. Thus, changes in fertility in response to changing resource availability under this design likely reflect true measures of intraspecific competitive ability, and are likely not influenced by reduced competition due to kinship.
\subsubsection{Statistical analysis}
\paragraph{Overview}
We used the animal model to estimate genetic and environmental variances and covariances of dispersal and demography traits \citep{lynch_genetics_1998,kruuk_estimating_2004,wilson_ecologists_2010}. The animal model is hierarchical linear mixed model that partitions genetic variance in quantitative traits based on associations between kinship and trait values of offspring, even if trait values of parents are not known (as in our study). Because dispersal and fertility were measured as counts (patches moved and number of offspring, respectively) we used a generalization of the animal model for non-Gaussian traits \citep{de2016general}. This distinction is important because, in the generalized animal model, genetic variation in traits manifests at two scales: the scale of the observations and a `latent' scale that corresponds more directly to the trait values expected due to kinship but that can only be studied through random realizations (the observations) \citep{de2016general}. As we describe in the next sections, we focus throughout on genetic variace, covariance, and heritability of dispersal and demography traits on their latent scales.
While the animal model framework is able to accommodate additional random effects associated with maternal environmental (i.e., maternal effects), our model failed to converge when we attempted this, indicating that our data were not sufficient to estimate maternal effects in addition to other sources of variance and covariance. Any maternal effects are therefore implicitly incorporated into kinship, which may positively bias estimates of additive genetic variance and narrow-sense heritability.
Males and females in \textit{C. maculatus} are known to differ in dispersal \citep{miller_sex_2013,ochocki_rapid_2017}. Since we could only measure density-dependent fertility in females, we focused our analysis exclusively on data collected from females and the the simulation model that follows is correspondingly female-dominant.
\paragraph{Dispersal}
Previous studies estimating \textit{C. maculatus} dispersal kernels have found negative binomial or Poisson Inverse Gaussian kernels to provide the best fit to dispersal data \citep{miller_sex_2013,wagner_genetic_2016,ochocki_rapid_2017}. While this was also the case in the present study when data were aggregated across families, a Poisson kernel provided the best fit to family-level dispersal data. This is likely due to the fact that, variance in the mean among families generates an aggregate response that is negative-binomially distributed. We thus modeled the dispersal distance $d$ of individual $i$ from sire $j$ and dam $k$ as:
%
\begin{equation}\label{corr:dispersal_random}
d_{ijk} \sim \mathit{Poisson}(\lambda_{ijk})
\end{equation}
%
where $\lambda_{ijk}$ is the mean and variance of the Poisson distribution. The expected value for dispersal distance is defined by a linear model that includes a grand mean ($\mu^{d}$) and accounts for parentage ($a^{d}_jk$) and residual deviation of observation $i$ ($e^{d}_i$):
%
\begin{equation} \label{corr:dispersal_linmod}
log(\lambda_{ijk}) = \mu^{d} + a^{d}_{jk} + e^{d}_{i}
\end{equation}
%
The genetic ($a^{d}_{jk}$) and environmental ($e^{d}_i$) random variables for dispersal distance are further defined below in relation to fertility.
\paragraph{Fertility}
Unlike dispersal, fertility was measured with respect to density. We imposed density dependence by manipulating the resources available to an individual female rather than density \textit{per se}. We analyzed the data using the framework of the Beverton-Holt model of population growth, modified so that population density is expressed as the ratio of females to beans:
%
\begin{equation}\label{corr:BevHoltFull}
\frac{N_{t+1}}{B} = \frac{r(\frac{N_{t}}{B})}{1 + \frac{r}{K}\frac{N_{t}}{B}}
\end{equation}
%
Dividing both sides by $N_{t}/B$ and setting $N_{t}=1$ gives the expected offspring production of single females in variable bean environments:
%
\begin{equation}\label{corr:BevHoltPercap}
\hat{N}_{t+1} = \frac{r}{1 + \frac{r}{KB}}
\end{equation}
%
Here, $B$ is the number of beans available, $r$ is low-density fertility, and $K$ is the carrying capacity per-bean (i.e., the number of beetles that one bean could support). Our aim was to identify variation in $r$ that was attributable to pedigree and covariance with dispersal. There is a well-documented covariance between statistical estimates of $r$ and $K$ in density-dependent population models \citep{hilborn_quantitative_1992}, and this prevented us from modeling both $r$ and $K$ as heritable traits. Instead, we assume a fixed value of $K$ and allow $r$ to vary among individuals according to a quantitative genetic model of inheritance.
We treat the number of offspring produced by female $i$ from sire $j$ and dam $k$ ($N_{ijk}$) as a Poisson random variable, with the expected value given by \ref{corr:BevHoltPercap}.
% - Not sure it is worth showing this.
%
\begin{equation}\label{corr:Noff_ran}
N_{ijk} \sim \mathit{Poisson}\Big(\frac{r_{ijk}} {1 + \frac{r_{ijk}}{KB_{ijk} }}\Big)
\end{equation}
%
As in \ref{corr:dispersal_linmod}, low-density fertility is described by a linear model that includes a grand mean ($\mu^{r}$) and accounts for parentage ($a^{r}_jk$) and residual deviation of observation $i$ ($e^{r}_i$):
%
\begin{equation} \label{corr:fert_linmod}
log(r_{ijk}) = \mu^{r} + a^{r}_{jk} + e^{r}_{i}
\end{equation}
%
\paragraph{Linking dispersal and fertility}
Finally, to model genetic and environmental variances and covariances, we link the corresponding random deviates. The vector $\bm{a}_{jk}$ contains the random deviates (also known as `breeding values') for dispersal ($a^{d}_{jk}$) and fertility ($a^{r}_{jk}$) associated with kinship and is distributed according to a multivariate normal distribution centered on the average of the breeding values for both parents (the `midparent value') with variance-covariance matrix $\bm{G}/2$:
%
\begin{gather} \label{corr:gen}
\bm{a}_{jk} \sim \mathit{MVN} \Big( \frac{\bm{a}_{j} + \bm{a}_{k}}{2}, \frac{\bm{G}}{2} \Big) \\[10pt]
\bm{G} =
\begin{bmatrix}
\begin{array}{ll}
V_{G,d} &C_{G} \\
C_{G} &V_{G,r} \\
\end{array}
\end{bmatrix}
\end{gather}
%
Here, $V_{G,d}$ and $V_{G,r}$ are the additive genetic variances in the `latent' trait values for dispersal and fertility and $C_{G}$ is the additive genetic covariance between between the latent trait values. In Equation (\ref{corr:gen}), dividing $\bm{G}$ by $2$ accounts for the expected additive genetic variance among full siblings compared with the population as a whole \citep{roughgarden_theory_1979}
The environmental deviates -- individual-to-individual variation that is not explained by pedigree, also known as `overdispersion' \citep{de2016general} -- are treated similarly, where the vector $\bm{e}_{i}$ contains elements $e^{d}_{i}$ and $e^{r}_{i}$ from \ref{corr:dispersal_linmod} and \ref{corr:fert_linmod}, respectively, and is distributed as follows:
%
\begin{gather} \label{corr:env}
\bm{e}_{i} \sim \mathit{MVN} (0, \bm{E}) \\[5pt]
\bm{E} =
\begin{bmatrix}
\begin{array}{ll}
V_{E,d} &C_{E} \\
C_{E} &V_{E,r} \\
\end{array}
\end{bmatrix}
\end{gather}
%
In the bean beetle system, microsite variation in larval environment is a good candidate for the environmental variation $\bm{E}$, as host beans may vary in, for example, nutrient content, water content, geometry, age, etc. For both covariances, genetic and environmental correlations were derived as $\rho = \frac{C}{\sqrt{V_{d}}\sqrt{V_{r}}}$. \tom{Check with Brad. I assume this is where the heritabilities in Figure \ref{corr:posteriors} come from.}
Assuming that genetic and environmental effects are not correlated with each other, the total phenotypic variances and covariances in these traits can be described by a covariance matrix $\bm{P}$, which is simply $\bm{P} = \bm{G} + \bm{E}$. Calculating $\bm{P}$ enables us to calculate the narrow-sense heritability of each trait, which reflects the proportion of variance in the phenotype (on the latent scale) attributable to additive genetic factors. For dispersal,
%
\begin{equation}\label{corr:heritability}
h^{2}_d = \frac{V_{G,d}}{V_{P,d}}
\end{equation}
%
and likewise for heritability of fertility ($h^{2}_r$). All analyses in this section were performed in R 3.4.0 \citep{r_core_team_r:_2015} using \code{rstan} \citep{stan_development_team_rstan:_2015}. Because models were fit in a Bayseian framework, we can quantify parameter uncertainty through their posterior distributions. Code for these analyses may be found at \url{https://github.com/bochocki/correlatedtraits}.
\subsection{Dynamics of invasions with correlated traits}
We simulated sexually-reproducing populations spreading across a one-dimensional landscape in discrete time and discrete space, based on our empirical estimates for \textit{C. maculatus} the system. Although \textit{C. maculatus} has two sexes, we simulated hermaphroditic populations for the purpose of tractability. Each simulation began with 20 individuals in a single starting patch; we modeled each of these individuals as expressing dispersal and fertility phenotypes following the statistical model defined above. The additive genetic ($\bm{a}_{jk}$) and environmental deviates ($\bm{e}_i$) for each individual were drawn at random given covariance matrices $\bm{E}$ and $\bm{G}$, respectively (Equations (\ref{corr:env}) and (\ref{corr:gen})). The initial conditions of the simulation mimic a small founding population being introduced to a novel landscape from some genetically well-mixed source population.
We defined an invasion's extent in each generation as the location of the individual farthest to the right of the starting patch. Since dispersal direction was unbiased, both the left- and right-moving wave fronts should exhibit similar dynamics, but we restricted our analysis to the right-moving fronts to avoid pseudo-replication. To understand how trait variation and covariation altered simulated invasion outcomes, we compared mean invasion extent after 20 generations and the coefficient of variation (CV) in extent as a measure of variability. All simulations were conducted using Julia 0.5.0 \citep{bezanson_julia:_2017}, and all analyses were conducted using R 3.4.0 \citep{r_core_team_r:_2015}. All code for the simulation and analyses is publicly available at \url{https://github.com/bochocki/correlatedtraits}. Additional methodological details of the simulations are provided in appendix XX.
\subsubsection{Simulation details \tom{-- Move to appendix}}
Each generation of the simulation, individuals in the population mated, reproduced, died, and their offspring dispersed; this is similar to the laboratory-imposed life-cycle in \textit{C. maculatus} invasion experiments \citep{miller_sex_2013,wagner_genetic_2016,ochocki_rapid_2017}. Because the landscape was modeled as an array of discrete patches, local interactions -- including mate finding, reproduction, and density-dependent population growth -- took place at the patch-level.
%Thus, the population density in any patch was simply the total number of individuals in that patch.
For mating, each individual selected one other individual in the same patch, at random (and with replacement), and received genetic information from that individual. Because individuals were modeled as hermaphrodites, all individuals were capable of acting as both male and female during reproduction. Under this mating system, each individual had the capacity to contribute genetic information to multiple unique individuals, but could only receive genetic information from one individual. Individuals could not self-fertilize; all offspring were thus the product of two unique parents. In instances where a patch contained only one individual, that individual did not reproduce. Our model therefore includes a mate-finding Allee effect for singly-occupied patches.
Offspring inherited breeding values from their parents $\bm{a}_{jk}$ , which were drawn from a multivariate normal distribution according to Equation (\ref{corr:gen}). The expressed phenotype was also dependent on the environmental deviates $\bm{e}_{i}$, drawn according to Equation (\ref{corr:env}), and the population mean phenotypes $\mu^{d}$ and $\mu^{r}$. As in similarly-structured models of evolution during invasions, additive genetic variance is expected to decrease as the variance in breeding values among individuals decreases \citep{phillips_evolutionary_2015}. Each generation, we calculated the additive genetic covariance matrix $\bm{G}$ in each patch by calculating the variances and covariance among all breeding values in that patch. Offspring breeding values were then assigned according to Equation (\ref{corr:gen}), using the patch-estimated $\bm{G}$ matrix.
After mating, each individual reproduced following the density-dependent Beverton-Holt model of population growth described in Equations (\ref{corr:BevHoltFull}) to (\ref{corr:growth}). We modeled invasions across a homogeneous landscape, assuming a fixed resource density of 10 beans in all patches. The carrying capacity $K$ was therefore fixed across the landscape, but per-capita population growth varied among individuals according to \ref{corr:fert_linmod}.
After reproduction, parents senesced, marking the end of the generation; at the start of the next generation, their offspring dispersed. Thus, we modeled populations that were characterized by discrete, non-overlaping generations. Offspring dispersed from their natal patch according to their latent dispersal phenotype $\lambda_{ijk}$, and dispersal distance was Poisson distributed, as in Equation (\ref{corr:dispersal}). While the Poisson distribution only generates positive values, individuals in the simulation could disperse either to the left or right. We simulated bi-directional dispersal by randomly multiplying an individual's Poisson distance by -1 (for leftward dispersal) or +1 (for rightward dispersal), with equal probability for each direction. After dispersal, individuals mated with an individual in the patch that they dispersed to, they reproduced, and they senesced. We simulated this process for 20 generations, on par with similar timescales of eco-evolutionary dynamics in empirical systems \citep{williams_rapid_2016,ochocki_rapid_2017,weiss-lehman_rapid_2017}.
\subsubsection{Generalizing beyond the \textit{C. maculatus} system}
We first ran the simulated invasions with parameter values estimated from the \textit{C. maculatus} laboratory experiments (Table \ref{corr:estimates}). We then generalized the simulation study by exploring realistic variation in trait correlations and heritability. To test the role of variation in the sign and magnitude or trait correlations and contrast the effects of genetic vs. environmental correlations, we varied $\rho_{E}$ and $\rho_{G}$ in a fully factorial design, so that each correlation coefficient took the following values: -0.9, -0.5, -0.1, 0, 0.1, 0.5, and 0.9. We further replicated the variation in trait correlations across five cases corresponding to variation in the heritability of dispersal and fertility:
\begin{itemize}
\item $h^{2}_d = h^{2}_r = 0$. This is the `no evolution' scenario, which serves as a baseline and internal control.
\item $h^{2}_d > h^{2}_r$. Heritability of dispersal is greater than that of fertility.
\item $h^{2}_d < h^{2}_r$. Heritability of fertility is greater than that of dispersal.
\item $h^{2}_d = h^{2}_r$. Equally high heritability of dispersal and fertility.
\item $h^{2}_d = h^{2}_r$. Equally low (but non-zero) heritability of dispersal and fertility.
\end{itemize}
Lastly, we repeated all of the above for two levels of total phenotypic variance: $V_{G,d}$ and $V_{G,r}$ from the beetle system and $2V_{G,d}$ and $2V_{G,r}$. This allowed us to assess whether changing heritabilities (proportions of variance) has qualitatively consistent effects for different absolute amounts of variance. We replicated each combination 1000 times ($N$ = \tom{add} simulations total). \tom{Not sure how much of the above we will keep, at least in the main ms. Depends on results, so this will likely get updated and hopefully simplified.}
\printonnextpage{Tables/corr_estimates.tex}
%\printonnextpage{Tables/corr_parameters.tex}
% Results -------------
\section{Results}
\subsection{\textit{Genetic architecture of \textup{C. maculatus} demography and dispersal traits}}
We found that \textit{C. maculatus} exhibited similar total phenotypic variance in the latent traits corresponding to dispersal and fertility ($V_{P,d}$ = 0.40, $V_{P,r}$ = 0.35; Figure \ref{corr:posteriors}, Table \ref{corr:estimates}). Furthermore, dispersal and fertility both exhibited additive genetic variance, suggesting that both traits are heritable from parents to offspring. However, the absolute amount of genetic variance and therefore the proportion of heritable variation differed between the traits. Dispersal had a median narrow-sense heritability ($h^{2}_{d}$) of 0.54 and the 95\% credible interval in the estimate spanned a wide range (0.22 to 0.91), suggesting moderate to strong inheritance of this trait. The median narrow-sense heritability for fertility ($h^{2}_{r}$) was 0.16 and the 95\% credible interval spanned a lower and narrower range (0.05 to 0.31), reflecting less genetic variation in this trait compared to dispersal.\tom{[I like figure 1 but it would be nice to have labeled sub-panels so that we can explicitly reference the variances, correlations, and heritabilities.]}
We found evidence of negative correlations between dispersal and fertility for both additive genetic (median $\rho_{G}$: -0.37) and environmental effects (median $\rho_{E}$: -0.16). While posterior distributions for both correlations included zero, the majority of both posterior densities were negative (Figure \ref{corr:posteriors}, center panel), suggesting that, given estimation uncertainty, the correlations are xx-xx times more likely to be negative than positive.\tom{[My though here is that we can quantify how much of the posterior is negative vs positive, and this gives us a sense of confidence in the conclusion that correlations are negative.]} The estimated additive genetic correlation had a 95\% credible interval that was notably wider than the estimated environmental correlation ($\rho_{G}$: -0.79 to 0.15; $\rho_{E}$: -0.43 to 0.09). This occurs even though the credible intervals for the covariance estimates are similar to each other ($C_{G}$: -0.11 to 0.01; $C_{E}$: -0.09 to 0.02; Figure \ref{corr:posteriors}). This discrepancy is likely due to the relatively wide credible interval in the additive genetic variance in the dispersal trait ($V_{G,d}$: 0.08 to 0.45), which would necessarily result in a relatively wide credible interval in the additive genetic correlation.\tom{[I don't think this is super important but I actually do not follow this argument. I think part of my confusion is that I am not sure where the correlation estimates come from (comment above where you say they were ``calculated''). For related reasons, I think the covariances can be dropped from this figure, since they simply integrate the variances and correlations.]}
\input{Figures/corr_posteriors.tex}
\subsection{\tom{\textit{Comparing predicted and observed trait evolution and range expansion}}}
\tom{The figures in your thesis talk, etc.}
\subsection{\textit{The role of correlations in trait evolution and spread dynamics}}
We find that genetic correlations between dispersal and growth ($\rho_{G}$) have consistent effects on mean invasion extent after 20 generations of invasion, with strong positive genetic correlations generating farther extents compared to strong negative genetic correlations. For a given environmental correlation ($\rho_{E}$), increasing the genetic correlation ($\rho_{G}$) results in increased invasion extent (Figure \ref{corr:extent}a and b). Likewise, increasing $\rho_{E}$ for a given value of $\rho_{G}$ results in increased mean invasion extent. However, this effect is less pronounced; changes in $\rho_{G}$ appear to have a greater impact in invasion extent than changes in $\rho_{E}$. \tom{[I think this is an interesting result and one that we will need to dig more deeply into. Is this a beetle-specific result, or is it general? How would we know? Is it because the genetic correlation was stronger than the environmental correlation? Would the effects on speed be the same for correlations of equal magnitude?]} While correlations have a clear impact on extent, all invasion extents were on the same order of magnitude, regardless of correlation. \tom{[You say nothing here about comparison with the empirical results, though this is shown in the figure. Also, it is unclear where those values (the crosses) come from, though it may not matter if our approach to comparing obs/pred will change.]}
\printonnextpage{Figures/corr_extent_and_CV.tex}
Correlations between dispersal and fertility also had a clear effect on the CV in invasion extent. In general, increasingly positive genetic correlations ($\rho_{G}$) resulted in more variable range expansion than negative correlations. However, environmental correlation ($\rho_{E}$) had virtually no effect on spread variability (Figure \ref{corr:extent}c). Genetic correlations had a larger effect on the CV of extent compared to the mean extent: strong positive correlations resulted in a CV that was nearly twice as large as the CV when correlations were strong and negative (Figure \ref{corr:extent}c and d).\tom{[Not sure you can say this if the mean values were scaled and the CV values were not.]}
% Discussion ----------
\section{Discussion}
\tom{[I did only light commenting here since the paper may change in some important structural ways. Obviously, many of the take-home messages should not change. Overall, I think it needs better structure to communicate what we learned about the beetle system (and why it matters) and what we learned more generally.]}
Trait correlations in invading organisms are often considered in the contrasting realms of negative correlations (`trade-offs') or positive correlations (`colonizer syndrome')\tom{[The `syndrome' idea was not developed in the intro.]}. Previous studies have demonstrated genetic and environmental correlations between dispersal and fertility \citep{nur_consequences_1988,hughes_evolutionary_2003,hanski_dispersal-related_2006,karlsson_seasonal_2008,bonte_dispersal_2012,therry_higher_2014}, and have explored the evolutionary consequences of trade-offs in life-history traits and dispersal \citep{burton_trade-offs_2010,perkins_after_2016}. The results of this study attempt to incorporate both ends of this continuum into a cohesive framework for understanding the role of correlations in evolutionary invasion dynamics. In this study, we estimate dispersal and fertility trait data for the beetle \textit{C. maculatus}, and use that data to parameterize a model that explores how a wide range of possible genetic and environmental correlations alter invasion dynamics. We found that \textit{C. maculatus} exhibits additive genetic variance in both dispersal and fertility, as well as negative genetic and environmental correlations between those traits. Furthermore, we show that mean invasion extent and the coefficient of variation in extent are dependent on genetic correlations and, to a lesser extent, environmental correlations. Finally, we demonstrate that an interaction between genetic and environmental correlations has important consequences for the dynamics of evolving invasions. \tom{[This summary paragraph should also emphasize that the explicit consideration of correlations allowed us to retrospectively interpret the invasion dynamics that we previously documented. We should also have a more explicit statement of what this study is the first to do, though I like the statement about integrating ideas about trade-offs and syndromes.]}
A prior experiment using laboratory invasions of \textit{C. macualtus} showed that, after 10 generations of invasion, \textit{C. maculatus} evolved increased dispersal ability \citep{ochocki_rapid_2017}, which is consistent with our finding that \textit{C. maculatus} exhibits additive genetic variance in dispersal \tom{[That result was reported in the original study, probably not worth mentioning again here.]}. These findings are also in agreement with another study by Sano-Fujii (\citeyear{sano-fujii_genetic_1986}) who demonstrated a genetic basis for the inheritance of a flight/flightless polymorphism in \textit{C. maculatus}. Interestingly, since dispersal in the present experiment was measured by ambulatory dispersal, this suggests that it may be possible for selection to act on multiple modes of dispersal within the same organism. Conversely, the \textit{C. maculatus} invasion experiments by Ochocki and Miller (\citeyear{ochocki_rapid_2017}) showed no evidence that fertility evolved, although our current findings suggest that fertility is heritable, and previous research has shown fertility to be heritable at levels higher than we report ($h^{2} \approx$ 0.63, \citep{messina_heritability_1993}; $h^{2} \approx$ 0.40, \citep{messina_environment-dependent_2003}). \tom{[I think an important results that does not come through clearly is that fertility had less evolutionary potential than dispersal, and this must explain a big part of why fertility did not evolve in the invasion experiment (though the correlation matters too, and this is the more interesting result).]}The environmental correlation that we observe in the present study may be attributable to the larval environments that females experienced; as previously mentioned, host beans are subject to their own phenotypic variation, although we tried to minimize the importance of this by visually selecting for beans that were uniform in size and condition. We also made no effort to regulate larval densities within beans. Messina and Fry (\citeyear{messina_environment-dependent_2003}) consider genetic and environmental correlations in \textit{C. maculatus}; although they do not measure dispersal, they find positive genetic and environmental correlations between fertility and longevity in the presence of host beans, and negative genetic and environmental correlations (of a similar magnitude) in the absence of beans. Each laboratory population has a unique evolutionary history, complicated by founder effects, population bottlenecks, and genetic drift; it is interesting, although perhaps not surprising, that researchers testing traits on different populations should attain different estimates. Moreover, that genetic and environmental correlations can depend so strongly on population and environmental context reinforces our claim that it is important to explore invasion dynamics over a wide range of possible correlations, and not just in the context of trade-offs. \tom{[I like this last point. Overall though I think this paragraph needs better structure. It bounces around several results, and I am not sure what you are trying to emphasize beyond checking the boxes of comparison with previous work.]}
Our finding that invasion extent is dependent on genetic correlations, where extent increases with the genetic correlation, seems to follow our expectations. Invasion speed is dependent on dispersal and fertility at the leading edge of the invasion \citep{skellam_random_1951,okubo_diffusion_1980,kot_discrete-time_1986}. When genetic correlations are strong and positive, good dispersers that make it to the leading edge of the invasion are likely to have increased fertility relative to other individuals in the population -- not only due to the positive genetic correlation between fertility and dispersal, but also due to the release from density-dependence that individuals in the vanguard experience. Thus, spatial selection and natural selection act in concert to reinforce both dispersal and fertility, boosting invasion speed. \tom{[I think it is not just that spatial selection and natural selection are aligned (they always are) but that they can operate along the dominant axis of heritable covariation. This magnifies the phenotypic response to selection.]} The converse can explain why negative genetic correlations result in slower invasions: individuals who are good enough dispersers to travel to the leading edge are likely to have poor fertility as a consequence of the negative genetic correlation, so that spatial selection and natural selection act in opposition, resulting in an attenuating effect that reduces invasion speeds. This attenuation can also explain why variance is reduced at strong, negative genetic correlations. Variation in invasion speed for evolving invasions is thought to be caused by gene surfing -- the stochastic buildup of alleles at the leading edge of an invasion as a consequence of the serial founder events that typify invasive spread \citep{edmonds_mutations_2004,klopfstein_fate_2006,excoffier_surfing_2008,peischl_expansion_2015,phillips_evolutionary_2015,ochocki_rapid_2017,weiss-lehman_rapid_2017}. The variance in invasion speed due to gene surfing is reduced when long-distance dispersal decreases, presumably due to the fact that a (relatively) slow leading edge is more likely to experience gene flow from trailing patches (Ochocki, Miller, and Phillips, \textit{in prep.}). Thus, slower invasions caused by negative genetic correlations between fertility and dispersal should also be disproportionately less variable than invasions where those genetic correlations are positive.\tom{[I have trouble following these last two sentences and I am not sure what you mean by `disproportionately'. I think about it differently, following the logic you developed in the cartoon of the demography-dispersal heatmap. If you stochastically sample a set of demography-dispersal traits under a negative correlation you will get a less variable set of invasion phenotypes than under a positive correlation, and this should explain the CV result.]}
We hypothesized that environmental correlations would have little effect on invasion extent and CV other than contributing statistical noise. Our results suggest that environmental correlations are relatively unimportant compared to genetic correlations \tom{[again, is this \textit{generally} or just in bean beetles?]}, but interactions between the two have clear effects that we did not expect. For all values of the genotypic correlation, positive environmental correlations increase mean invasion extent, while negative environmental correlations decrease mean invasion extent. To understand why this occurs, it is helpful to recall that the breeding values and the residual deviates that determine phenotypes are independent of each other, and that the residual deviates are centered on zero and, by definition, not heritable. Because residual deviates are normally distributed and centered on zero, strong positive environmental correlations will act to increase dispersal and fertility for roughly half of the individuals in the population every generation, thus generating increased invasion extents relative to no environmental correlation. Conversely, strong negative environmental correlations will almost always yield increases in one trait and decreases in another. Since invasion speed is dependent on both traits, negative environmental correlations generate decreased invasion extents relative to no environmental correlation. \tom{[This interpretation strikes me as overly complicated. Isn't the basic idea that a positive correlation, whether it be inherited or induced by environment, will make it more likely that good dispersers also have high fertility, and this will always speed things up? In this sense the distinction between correlations that are genetic vs environmental is actually not all that important in terms of spread dynamics, and this seems to me like an important result to emphasize.]} Finally, although environmental correlations can have important consequences for mean invasion extent, environmental correlations have little impact on the variance in extent other than contributing random noise.\tom{[This is an odd statement; `contributing noise' sounds like it should increase variance.]}
Like any study based on a model organism, there are caveats that merit consideration when attempting to generalize our results. For example, we did not explore alternative values for many parameters in our model: additive genetic and environmental variances, total phenotypic variances, mean trait values, and life-history are all important components of evolutionary and ecological dynamics, and should be expected to have important impacts on invasion dynamics, but were not considered here for purposes of tractability. \tom{[This is defensible only if we can demonstrate that we are considering the appropriate types of parameter space for the scope of our questions. In the current draft, this argument falls a little flat.]} Furthermore, we simulated beetles as being hermaphroditic, even though they clearly have two sexes. Accounting for two-sex invasions requires additional assumptions about sex-linked traits \citep{guntrip_effect_1997}, sex-biased dispersal \citep{miller_sex-biased_2011,miller_confronting_2011,miller_sex_2013}, and mate finding \citep{shaw_mate_2014}; it is not clear how our results would scale up to a two-sex invasion, or other complex population structures.
In summary, our results suggest that genetic correlations are important considerations when developing expectations for the spread of biological invasions. While positive genetic correlations resulted in faster and more variable invasions, negative genetic correlations resulted in slower and less variable invasions.\tom{[My reaction here, and to your talks where you have made similar statements, is that these are the same result. It is not worth contrasting the effects of negative correlations vs positive correlation IMHO, beyond saying that increasing the correlation increases speed.]} Environmental correlations can have important implications for invasion extent, but relatively little impact on invasion variance. This research adds to a growing body of literature that aims to describe not only the speed of biological invasions, but also the variability in the invasion process.
% Etc ----------
\section{Acknowledgments}
Funding for this work was provided by NSF-DEB-1501814, NSF Data Analysis and Visualization Cyberinfrastructure grant OCI-0959097, and the Godwin Assistant Professorship at Rice University. We thank M. Zapata for help in conducting the experiment. We also thank A. Bibian, A. Compagnoni, C. Dytham, K.B. Ensor, L. Lancaster, V.H.W. Rudolf, E. Schultz, E. Siemann, M. Sneck, J.M.J. Travis, and M.E. Wolak for comments on the project and manuscript.
| {
"alphanum_fraction": 0.8128985015,
"avg_line_length": 260.423255814,
"ext": "tex",
"hexsha": "eebda43baac3395159f283c2cdef751590acbc79",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-04-26T20:56:43.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-04-26T20:56:43.000Z",
"max_forks_repo_head_hexsha": "a6a942b4bc5b7635510ea93b678d51d59d33cd99",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bochocki/correlatedtraits",
"max_forks_repo_path": "Ochocki_correlated_traits/sections/content.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "a6a942b4bc5b7635510ea93b678d51d59d33cd99",
"max_issues_repo_issues_event_max_datetime": "2019-03-22T00:23:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-08-09T00:40:30.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bochocki/correlatedtraits",
"max_issues_repo_path": "Ochocki_correlated_traits/sections/content.tex",
"max_line_length": 3205,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a6a942b4bc5b7635510ea93b678d51d59d33cd99",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bochocki/correlatedtraits",
"max_stars_repo_path": "Ochocki_correlated_traits/sections/content.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 12157,
"size": 55991
} |
\chapter{Combinational logic}
| {
"alphanum_fraction": 0.78125,
"avg_line_length": 8,
"ext": "tex",
"hexsha": "533a730f9b1d08d12f2d94e75783aa8d02ec6130",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/computer/logic/00-00-Chapter_name.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/computer/logic/00-00-Chapter_name.tex",
"max_line_length": 29,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/computer/logic/00-00-Chapter_name.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 9,
"size": 32
} |
\subsection{Instrumental Variable (IV) estimator}
\(\hat {\theta_{IV}} = (Z^TX)^{-1}Z^Ty\)
2SOLS collpases to IV in some circumstances.
| {
"alphanum_fraction": 0.6857142857,
"avg_line_length": 17.5,
"ext": "tex",
"hexsha": "7d77f5e0a522222a07ac58ad5e8df4f48cbc52a7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/statistics/olsMore/05-01-IV.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/statistics/olsMore/05-01-IV.tex",
"max_line_length": 49,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/statistics/olsMore/05-01-IV.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 46,
"size": 140
} |
% !TEX root = ../zeth-protocol-specification.tex
\section{Ethereum}\label{preliminaries:ethereum}
In a nutshell, \ethereum~is a distributed deterministic state machine, consisting of a globally accessible singleton state (``the World state'') and a virtual machine that applies changes to that state~\cite{mastering-eth}.
State transitions in the state machine are represented by transactions on the system. As such, each transaction represents a change in the global state represented as a Merkle Patricia Tree~\cite{patricia-tree} whose nodes are objects called ``accounts'' (\cref{preliminaries:ethereum:eth-account}). The Ethereum Virtual Machine (\evm) allows state transitions to be specified by creating a type of accounts which are associated with a piece of code (smart-contracts). The code of such accounts, and so, the corresponding state transitions, can be executed to transition to another state in the automata, by creating a transaction that calls the given piece of code (\cref{preliminaries:ethereum:eth-tx}).
To prevent unbounded state transitions in the state machine, each instruction executed by the \evm~is associated with a cost in \wei, referred to as ``the gas necessary to run the operation''. The ``gas cost'' of a transaction needs to be paid by the transaction originator (deduced from their account balance), and is awarded to the miner (added to their account balance) who successfully mines the block containing the transaction.
In addition to the cost of every instruction executed as part of a state transition, every transaction has an intrinsic cost of $\txDefaultGas$ gas~\cite[Appendix G]{ethyellowpaper}. Bounding modifications to the $\ethereum{}$ state by the amount of \wei~held in the transaction originator's account allows the system to avoid the Halting problem\footnote{\url{https://en.wikipedia.org/wiki/Halting\_problem}} and protects against a range of Denial of Service (\dos) attacks.
\subsection{Ethereum account}\label{preliminaries:ethereum:eth-account}
An \ethereum~account~\cite[Section 4.1]{ethyellowpaper} is an object containing 4 attributes, as represented~\cref{preliminaries:tab:eth-account}.
We distinguish two types of accounts:
\begin{itemize}
\item ``Externally Owned Accounts'' (\eoa), that are created by derivation of an \ecdsa~secret key; and
\item Smart-contract accounts, that are derived from \evm~code specifying a state transition on the state machine.
\end{itemize}
Each account object is accessible in the Merkle Patricia Tree representing the ``World state'' by a unique $\addressLen$-bit long identifier called the address.
In the context of \eoa, the address is obtained by generating a new \ecdsa~\cite{johnson2001elliptic} key pair $\smalltuple{\sk, \vk}$ over curve \secpCurve~\cite{qu1999sec} and taking the rightmost $\addressLen$ bits of the \keccak{256} hash of the verification key $\vk$.
\begin{table}[H]
\centering
\begin{tabular}{cp{25em}c}
Field & Description & Data type\\ \toprule
$\nonce$ & The nonce of an account is a scalar value representing the number of transactions that have originated from the account, starting at 0. & $\NN_\ethWordLen$ \\ \midrule
$\balance$ & The balance of an account is a scalar value representing the amount of \wei~in the account. & $\NN_\ethWordLen$\\ \midrule
$\sroot$ & The storage root is the \keccak{256} hash representing the storage of the account. & $\BB^{\keccakTwoDigestLen}$\\ \midrule
$\codeh$ & The code hash is the hash of the \evm~code governing the account. If this field is the \keccak{256} hash of the empty string, then the account is said to be an ``Externally owned Account'' (\eoa), and is controlled by the corresponding \ecdsa~private key. If, however, this field is not the \keccak{256} hash of the empty string, the account represents a smart contract whose interactions are governed by its \evm~code. & $\BB^{\keccakTwoDigestLen}$\\ \bottomrule
\end{tabular}
\caption{Ethereum Account structure}\label{preliminaries:tab:eth-account}
\end{table}
\begin{notebox}
In the rest of this document, we will refer to an \emph{Ethereum user} $\eparty{U}$ as a person, modeled as an object, holding \emph{one}\footnote{The same physical person may correspond to multiple ``$\ethereum{}$ users'' and thus control multiple accounts in the Merkle Patricia Tree.} secret key, $\sk$ (object attribute), associated with an existing \eoa~in the ``World state''. We denote by $\eparty{U}.\addr$ the \ethereum~address of $\eparty{U}$ derived from $\eparty{U}.\sk$, and which allows $\eparty{U}$ to access the state of their account $\wstate[\eparty{U}.\addr]$.
We denote by $\contractstyle{SmartC}$ a smart-contract instance/object (i.e.~deployed smart-contract with an address,~\cref{preliminaries:ethereum:eth-tx}), and denote by $\contractstyle{SmartC}.\addr$ its address.
\end{notebox}
\subsection{Ethereum transaction}\label{preliminaries:ethereum:eth-tx}
We now briefly mention what \ethereum~transactions~\cite[Section 4.2]{ethyellowpaper} are, and how they are created, signed and validated. Once more, the reader is highly encouraged to refer to~\cite{ethyellowpaper} for a detailed presentation.
%\subsubsection{Transaction}
Informally, a transaction object ($\tx$) is a signed message originating from an \ethereum~user $\eparty{U}$ (the \emph{transaction originator}, or simply \emph{sender}) that represents a state transition on the distributed state machine (i.e.~a change in the ``World state'' $\wstate$).
\subsubsection{Raw transaction}\label{preliminaries:ethereum:eth-tx:raw}
In the following, we define a raw transaction as an unsigned transaction (\cref{preliminaries:tab:eth-unsigned}).
\begin{table}[H]
\centering
\begin{tabular}{ccc}
Field & Description & Data type \\\toprule
$\nonce$ & Transaction nonce & $\NN_{\ethWordLen}$\\\midrule
$\gasp$ & gasPrice & $\NN_{\ethWordLen}$\\\midrule
$\gasl$ & gasLimit & $\NN_{\ethWordLen}$\\\midrule
$\tto$ & Recipient's address & $\BB^{\addressLen}$\\\midrule
$\val$ & Value of the transaction in $\wei$ & $\NN_{\ethWordLen}$\\\midrule
$\init$ / $\data$ & \begin{tabular}{@{}c@{}}Contract Creation data $\init$ \\ Message call data $\data$\end{tabular} & $\BB^{*}$\\\bottomrule
\end{tabular}
\caption{Structure of a \emph{raw transaction data type} $\txRawDType$}\label{preliminaries:tab:eth-unsigned}
\end{table}
\subsubsection{Finalizing raw transactions}\label{preliminaries:ethereum:eth-tx:final}
A raw transaction needs to be finalized to be accepted. In the context of this document, ``finalizing a raw transaction'' will be a synonym of ``signing a raw transaction''. The transaction structure is represented in~\cref{preliminaries:tab:eth-signed}.
\begin{table}[H]
\centering
\begin{tabular}{ccc}
Field & Description & Data type \\ \toprule
$\rawTx$ & Raw transaction object & $\txRawDType$ \\ \midrule
$\sigv$ & Field $\sigv$ of $\ecdsa$ signature used for public key recovery & $\BB^{\byteLen}$\\ \midrule
$\sigr$ & Field $\sigr$ of $\ecdsa$ signature~\cite{rfc6979} & $\FFx{\rSecp}$\\ \midrule
$\sigs$ & Field $\sigs$ of $\ecdsa$ signature~\cite{rfc6979} & $\FFx{\rSecp}$\\ \bottomrule
\end{tabular}
\caption{Structure of a (finalized) \emph{transaction data type} $\txDType$}\label{preliminaries:tab:eth-signed}
\end{table}
We define the transaction generation function, cf.~\cref{preliminaries:fig:txgen}, as the function taking the sender's \ecdsa~signing key and the components of a raw transaction as arguments, and returning a signed (or finalized) transaction ($\finalTx$ or $\tx$ for short).
\begin{align*}
\finalTx &= \txgen(\sk_{\ecdsa}, \inp{\nonce}, \inp{\gasp}, \inp{\gasl}, \inp{\tto}, \inp{\val}, \inp{\init}, \inp{\data})\\
\finalTx &= \{ \\
& \left.
\begin{array}{l@{}l}
\nonce & {}: \inp{\nonce},\\
\gasp & {}: \inp{\gasp},\\
\gasl & {}: \inp{\gasl},\\
\tto & {}: \inp{\tto},\\
\val & {}: \inp{\val},\\
\init/\data & {}: \inp{\init}/\inp{\data},
\end{array}
\right\rbrace~\rawTx\\
& \left.
\begin{array}{l@{}l}
\sigv & {}: \sigma_\ecdsa.\sigv,\\
\sigr & {}: \sigma_\ecdsa.\sigr,\\
\sigs & {}: \sigma_\ecdsa.\sigs \\
\end{array}
\right\rbrace~\sigma_\ecdsa\\
\}
\end{align*}
To sign a transaction, the sender first computes the hash of the raw transaction using $\keccak{256}$, cf.~\cref{preliminaries:eq:tx-sig-hash}, and then uses their \ecdsa~signing key, $\sk_\ecdsa$, to sign the obtained digest. cf.~\cref{preliminaries:eq:tx-sig-sig}. The signature is then appended to the raw transaction to obtain a finalized transaction, cf.~\cref{preliminaries:fig:txgen}.
\begin{align}
\digest_\ecdsa &= \keccak{256}(\inp{\nonce}, \inp{\gasp}, \inp{\gasl}, \inp{\tto}, \inp{\val}, \inp{\init}/\inp{\data}) \label{preliminaries:eq:tx-sig-hash} \\
\sigma_{\ecdsa} &= \ecdsasigscheme.\sig(\sk_\ecdsa, \digest_\ecdsa)\ (= \smalltuple{\sigv, \sigr, \sigs}) \label{preliminaries:eq:tx-sig-sig}
\end{align}
\begin{figure}[H]
\centering
\procedure[linenumbering]{$\txgen(\sk_\ecdsa, \inp{\nonce}, \inp{\gasp}, \inp{\gasl}, \inp{\tto}, \inp{\val}, \inp{\init}, \inp{\data})$}{%
\pcif \inp{\tto} = \emptyset \pcdo\\
\t \rawTx \gets \{\nonce: \inp{\nonce}, \gasp: \inp{\gasp}, \gasl: \inp{\gasl}, \tto: \inp{\tto}, \val: \inp{\val}, \init: \inp{\init}\}; \\
\pcelse \\
\t \rawTx \gets \{\nonce: \inp{\nonce}, \gasp: \inp{\gasp}, \gasl: \inp{\gasl}, \tto: \inp{\tto}, \val: \inp{\val}, \data: \inp{\data}\}; \\
\pcendif \\
\sigma_\ecdsa \gets \ecdsasigscheme.\sig(\sk_{\ecdsa}, \keccak{256}(\rawTx)); \\
\finalTx \gets \{ \rawTx, \sigv: \sigma_\ecdsa.\sigv, \sigr: \sigma_\ecdsa.\sigr, \sigs: \sigma_\ecdsa.\sigs \}; \\
\pcreturn\ \finalTx;
}
\caption{Transaction generation function \txgen}\label{preliminaries:fig:txgen}
\end{figure}
\begin{remark}\label{preliminaries:recovering-msg-sender}
As one can see, there is no ``from'' attribute in a transaction. The sender's \ethereum~address can be recovered from the \ecdsa~signature. This method is defined in the \ethereum~yellow paper as a ``sender function'' $S$~\cite[Appendix F]{ethyellowpaper} which maps each transaction to its sender.
\end{remark}
\subsubsection{Types of transactions}\label{preliminaries:ethereum:eth-tx:tx-types}
While only two types of transactions are described in~\cite[Section 4.2]{ethyellowpaper}; namely those which result in message calls and those which result in the creation of new accounts with associated code, we will instead differentiate the types of transactions based on their purpose. The reader is encouraged to read~\cite{ethyellowpaper} for a formal discussion.
\medskip
Informally, a transaction can be used to achieve three things: transferring \wei~from an \eoa~to another \eoa, creating a new account with associated code (i.e.~``deploying a smart-contract''), and calling a function of a smart-contract. We will detail here the differences between these usages.
\begin{description}
\item[Creating a contract] The $\tx.\tto$ address is set to $\emptyset$ in the transaction. The contract creation data ($\tx.\init$) includes the new contract's code. The contract address is computed as the rightmost $\addressLen$ bits of the \keccak{256} hash of the \rlp~encoding~\cite{ethrlp} of the transaction originator's address and account nonce~\cite[Section 6]{ethyellowpaper}.
\item[Calling a contract function] The $\tx.\tto$ address is set to the address of the contract. The message call data byte array ($\tx.\data$) is set to the contract's function address (or \emph{``Function Selector''}~\cite{abi-function-selector}) which are the first 4 bytes of the \keccak{256} hash of the function signature, and the function input arguments ($\ethWordLen$ bits per input)~\cite[Section 8]{ethyellowpaper}.
\item[Transferring \wei~from an \eoa~to another \eoa] This corresponds to a ``plain transaction'' spending \wei~from an address to send them to another. In that case the $\tx.\tto$ address corresponds to the recipient's address while the transaction data is left empty.
\end{description}
\begin{notebox}
In order to keep notations simple, we assume, in the rest of the document, that smart-contract functions are uniquely determined by their name. As such, we denote by $\funcSelec{\cdot} \colon \BB^{*} \to \BB^{4 \cdot \byteLen}$ the function that takes a function name as input and returns its function selector.
\end{notebox}
\subsubsection{Transaction validity}\label{preliminaries:ethereum:eth-tx:tx-validity}
Importantly, not all finalized transactions constitute valid state transitions on the state machine~\cite[Section 6]{ethyellowpaper}.
We denote by \ethVerifyTx~the function that takes an \ethereum~transaction object $\tx$ as input and return $\true$ (resp.~$\false$) if $\tx$ is valid (resp.~invalid). To be deemed valid, a transaction $\MUST$ satisfy \emph{all} the following conditions:
\begin{enumerate}
\item The transaction is correctly \rlp~encoded, with no additional trailing bytes;
\item the transaction signature $\smalltuple{\sigv, \sigr, \sigs}$ is valid;
\item the transaction nonce ($\tx.\nonce$) is valid, i.e.~it is equal to the account nonce of the transaction originator;
\item the gas limit is no smaller than the gas used by the transaction;
\item the transactor has enough funds on his account balance to cover at least the cost $\tx.\val + \tx.\gasp \cdot \tx.\gasl$.
\end{enumerate}
\subsubsection{Lifecyle of a transaction, and miners' incentives}\label{preliminaries:ethereum:eth-tx:tx-life}
After the creation of an \ethereum~transaction \tx~by a user from an \ethereum~client (machine running a piece of software that enables to be connected to the \ethereum~network), the transaction is broadcasted to the network and received by a set of peers/nodes.
The transaction is then stored in each node's transaction pool, which is a data structure containing all transactions that should be validated (pending transactions) by the node and mined. To maximize miners' returns, the transaction pools are ordered according to the gas price of the transactions. As such, transactions with the highest $\tx.\gasp$ are subject to be validated and included into a block first.
Once \tx~is selected from the transaction pool, it is validated (fed into \ethVerifyTx), executed, and included into a block (i.e.~``mined''). The block is then broadcasted to all the nodes of the network and is used as the predecessor for the next block to be mined on the network (i.e.~``it is added to the chain'').
\subsection{Ethereum events and Bloom filters}\label{sssec:ethereum-events}
The \evm~contains the set of ``LOGX'' instructions enabling smart-contract functions to ``emit events'' (i.e.~log data) when they are executed\footnote{see~\url{https://ethgastable.info/}}
As such, when a block is generated by a miner or verified by the rest of the network, the address of any logging contract, and all the indexed fields from the logs generated by executing those transactions are added to a Bloom filter~\cite{DBLP:journals/cacm/Bloom70}, which is included in the block header~\cite[Section 4.3]{ethyellowpaper}. Importantly, the actual logs \emph{are not included in the block data} in order to save space.
As such, when an application wants to find (``consume'') all the log entries from a given contract, or with specific indexed fields (or both), the node can quickly scan over the header of each block, checking the Bloom filter to see if it may contain relevant logs. If it does, \emph{the node re-executes the transactions from that block, regenerating the logs, and returning the relevant ones to the application}~\cite{eth-bloom-filters}.
\begin{notebox}
The ability for a smart-contract function to ``emit'' some pieces of data when executed, and for an application to ``consume'' such pieces of data, is used in \zeth~in order to construct a \emph{confidential receiver-anonymous channel}~\cite{DBLP:conf/pet/KohlweissMOTV13}.
\end{notebox}
| {
"alphanum_fraction": 0.7144925787,
"avg_line_length": 94.7085714286,
"ext": "tex",
"hexsha": "9427088a3ab70307799efb54a94c4275f0894396",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-07-26T04:51:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-07-26T04:51:29.000Z",
"max_forks_repo_head_hexsha": "ba29c67587395f5c7b26b52ee7ab9cba12f1cc6b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "clearmatics/zeth-specifications",
"max_forks_repo_path": "chapters/chap01-sec02.tex",
"max_issues_count": 13,
"max_issues_repo_head_hexsha": "ba29c67587395f5c7b26b52ee7ab9cba12f1cc6b",
"max_issues_repo_issues_event_max_datetime": "2021-04-16T10:57:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-10-27T10:41:50.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "clearmatics/zeth-specifications",
"max_issues_repo_path": "chapters/chap01-sec02.tex",
"max_line_length": 705,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "ba29c67587395f5c7b26b52ee7ab9cba12f1cc6b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "clearmatics/zeth-specifications",
"max_stars_repo_path": "chapters/chap01-sec02.tex",
"max_stars_repo_stars_event_max_datetime": "2021-04-29T18:22:00.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-29T18:22:00.000Z",
"num_tokens": 4502,
"size": 16574
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Original author:
% Olivier Simard
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%----------------------------------------------------------------------------------------
% PACKAGES AND OTHER DOCUMENT CONFIGURATIONS
%----------------------------------------------------------------------------------------
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage{fancyhdr} % Required for custom headers
\usepackage{amsmath,amsfonts,amssymb,amsthm}
\usepackage{lastpage} % Required to determine the last page for the footer
\usepackage{extramarks} % Required for headers and footers
\usepackage[usenames,dvipsnames]{xcolor} % Required for custom colors
\usepackage{graphicx} % Required to insert images
\usepackage{placeins}
\usepackage{listings} % Required for insertion of code
\usepackage{courier} % Required for the courier font
\usepackage{hyperref}
\hypersetup{pdftex, colorlinks=true, linkcolor=Red, citecolor=blue, urlcolor=blue}
% Margins
\topmargin=-0.45in
\evensidemargin=0in
\oddsidemargin=0in
\textwidth=6.5in
\textheight=9.0in
\headsep=0.25in
\linespread{1.1} % Line spacing
%----------------------------------------------------------------------------------------
% NAME AND DOC SECTION
%----------------------------------------------------------------------------------------
\newcommand{\Title}{Documentation}
\newcommand{\Doc}{SuperStiff}
\newcommand{\AuthorName}{Olivier Simard}
\newcommand{\InstitutionName}{Université de Sherbrooke}
%----------------------------------------------------------------------------------------
% TITLE LAYOUT
%----------------------------------------------------------------------------------------
\title{
\textmd{\textsc{SuperStiff} documentation}\\
\vspace{6in}
%\textmd{\AuthorName}
\vspace{1cm}
}
% Set up the header and footer
\pagestyle{fancy}
\lhead{\AuthorName} % Top left header
\chead{\textsc{\Doc}\ : \Title} % Top center head
\rhead{\InstitutionName} % Top right header
\lfoot{\lastxmark} % Bottom left footer
\cfoot{} % Bottom center footer
\rfoot{Page\ \thepage\ of\ \protect\pageref{LastPage}} % Bottom right footer
\renewcommand\headrulewidth{0.4pt} % Size of the header rule
\renewcommand\footrulewidth{0.4pt} % Size of the footer rule
\setlength\parindent{0pt} % Removes all indentation from paragraphs
%----------------------------------------------------------------------------------------
% CODE INCLUSION CONFIGURATION
%----------------------------------------------------------------------------------------
\definecolor{MyDarkGreen}{rgb}{0.0,0.4,0.0} % This is the color used for comments
\lstdefinelanguage{Julia}{morekeywords={abstract,break,case,catch,const,continue,do,else,elseif,%
end,export,false,for,function,immutable,import,importall,if,in,%
macro,module,otherwise,quote,return,switch,true,try,type,typealias,%
using,while,begin},%
sensitive=true,%
%alsoother={$},%
morecomment=[l]\#,%
morecomment=[n]{\#=}{=\#},%
morestring=[s]{"}{"},%
morestring=[m]{'}{'},%
}[keywords,comments,strings]%
\lstset{%
language = Julia,
basicstyle = \ttfamily,
keywordstyle = \bfseries\color{blue},
stringstyle = \color{magenta},
commentstyle = \color{ForestGreen},
showstringspaces = false,
backgroundcolor={\color{lightgray}},
breakatwhitespace=true,
breaklines=true,
captionpos=b,
frame=tb,
resetmargins=true,
sensitive=true,
stepnumber=1,
tabsize=4,
%upquote=true
}
% Creates a new command to include a julia script, the first parameter is the filename of the script (with .jl), the second parameter is the caption
\newcommand{\juliascript}[4]{
\begin{itemize}
%\item[]\lstinputlisting[caption=#2,firstline=#3,lastline=#4,label=#1]{#1.py}
\item[]\lstinputlisting[caption=#2,firstline=#3,lastline=#4,label=#1]{../../examples/InplaneCalc/GreenPer/params.json}
\end{itemize}
}
\newcommand{\juliascriptFT}[4]{
\begin{itemize}
%\item[]\lstinputlisting[caption=#2,firstline=#3,lastline=#4,label=#1]{#1.py}
\item[]\lstinputlisting[caption=#2,firstline=#3,lastline=#4,label=#1]{../../examples/FiniteTemperatureCalc/FiniteTSuperstiff.jl}
\end{itemize}
}
%----------------------------------------------------------------------------------------
\begin{document}
%----------------------------------------------------------------------------------------
% TABLE OF CONTENTS
%----------------------------------------------------------------------------------------
\maketitle
%\setcounter{tocdepth}{1} % Uncomment this line if you don't want subsections listed in the ToC
\newpage
\tableofcontents
\newpage
%----------------------------------------------------------------------------------------
% CONTENT
%----------------------------------------------------------------------------------------
\section{Outlook}
\label{sec:outlook}
This documentation is addressed to persons that intend to compute the superfluid stiffness using Cluster Dynamical Mean Field Theory~\cite{charlebois_these}. The main programs have been tailored for the usage of $8\times 8$ cluster self-energies $\Sigma_c$, that is self-energies obtained solving a $2\times 2$ cluster impurity. It can be straightforwardly adapted for bigger clusters.\\
In this document, the following aspects are set out:
\begin{itemize}
\item The main tasks \textsc{SuperStiff} accomplishes
\item The structure of the program \textsc{SuperStiff}
\item The main functions of the different modules
\end{itemize}
I will also follow through different examples given in the folder \path{examples}. Some of the examples are parts of the results already shown in my master's thesis~\cite{simard_master}. If you still haven't read the main \path{README} file stored in the folder \path{SuperStiff}, take a look! This document gives complementary informations for anyone that wants to dig in deeper.\\
The calculations can be made on a personal laptop since the program is not too time-consuming. This Julia program has only been benchmarked for Linux operating systems. Nervertheless, the installations of Julia and the different packages are quite easy and do not depend on the operating system as far as I can tell\footnote{Julia programming language is well supported for any operating systems (MacOs, Windows or Linux)}.
\section{Purpose of the program}
\label{sec:Purpose}
The main purpose of \textsc{SuperStiff} is to compute the superfluid stiffness in the layered cuprates. It is a wrapper over a CDMFT procedure that would provide converged self-energies. This program computes the superfluid stiffness along all the principal axes of the unit-cell. The current vertex corrections are neglected.
It computes the superfluid stiffness using any of the periodization schemes available, that is, periodizing the Green's function (\textit{pér. G}), periodizing the cumulant of the Green's function (\textit{pér. M}), or tracing over the cluster Green's function (\textit{tr.}) (see section 4.2.3 of Ref.\cite{simard_master}). It does so if ones has a self-energy that has converged in the pure state or in the mixed state AF+SC.
\section{Structure of the program}
\label{sec:structure_of_program}
The structure of the program is glanced over in this section, setting out the necessary informations to provide in order for the program to successfully run. The program uses the Julia programming language. Versions of the code are available in both Python and C++, but these versions have had poor maintenance and are prone to bugs.
The program is composed of three different modules stored in folder \path{src}, three different main files stored in the folder \path{examples} and one input file named \path{params.json}. The objective here is to explain each and every entry in the input file. The content of the \path{params.json} file is exposed in listing \ref{params.json}. It can be helpful to read first off the appendices M and N of Ref.\cite{simard_master}.
\clearpage
\subsection{Zero temperature calculation}
\label{zero_T_calculation}
For zero temperature calculations of the superfluid stiffness, the main program \path{OutofPlaneSuperStiff.jl} can be used to compute the $c$-axis current-current correlation function or \path{InplaneSuperStiff.jl} can be used to compute the $a$- or $b$-axis current-current correlation function. Each of the these main programs needs the input file \path{params.json} to specify important parameters. The content of the \path{params.json} file is described below.
\juliascript{params.json}{Content of params.json}{1}{17}
The first three parameters stand for three nearest-neighbor tight-binding hopping terms: $t$ depicts the nearest-neighbor hopping term, $t^{\prime}$ the second nearest-neighbor hopping term and $t^{\prime\prime}$ the third nearest-neighbor hopping term. The example given is for YBCO. The other input parameters are
\begin{description}
\item["inplane axis":] specifies the axis along which the superfluid stiffness is to be computed. Only the following set of strings is acceptable: $\{\text{"xx"},\text{"yy"}\}$. This parameter doesn't matter if the $c$-axis superfluid stiffness is computed ($zz$).
%
\item["path\_to\_files":] specifies the file path containing the cluster self-energies resolved in Matsubara frequencies. The file path MUST start by either "NOCOEX" in the case of pure SC calculations or "COEX" in the case of mixed AF+SC calculations.
%
\item["data\_loop":] specifies the file path containing the chemical potential, the particle density and the order parameter amplitude(s). The number of self-energy binary files (*.npy) has to be the same as the number of lines in the \path{NOCOEX/U8/Loop_NOCOEX.dat} file and the integer specifying the line has to correspond to the one of the self-energy binary plus 2 (because Python and C++ starts with number $0$ and there is a header). For example, the binary file named \path{NOCOEX/U8/SEvec_b500_SC/SEvec_b500_SC138.npy} corresponds to line 140 in the file \path{NOCOEX/U8/Loop_NOCOEX.dat}.
%
\item["AFM\_SC\_NOCOEX":] this field takes in the binary set of values $\{0,1\}$. For example, when $0$ is entered, the formula Eq.(5.31) or Eq.(5.37) of Ref.\cite{simard_master} are used with the CDMFT self-energy converged in the mixed state in the case \path{OutofPlaneSuperStiff.jl} is launched. The formula Eq.(5.31) is used if \textbf{"Periodization"} is set to $0$ and Eq.(5.37) if $1$ is provided instead. If the file \path{InplaneSuperStiff.jl} were used instead and \textbf{"AFM\_SC\_NOCOEX"} were set to 0, it would have called Eq.(5.30) having set \textbf{"Periodization"} to $0$ or Eq.(5.36) having set \textbf{"Periodization"} to 1. If otherwise \textbf{"AFM\_SC\_NOCOEX"} is set to $1$, both the input fields \textbf{"path\_to\_files"} and \textbf{"data\_loop"} have to start with NOCOEX in order to use the superfluid formulae developed in the regime of AF+SC coexistence with CDMFT data converged in the pure SC state. Most of the time, \textbf{"AFM\_SC\_NOCOEX"} is set to $0$.
%
\item["Print\_mu\_dop":] this field is only relevant when debugging and takes in $\{0,1\}$. This entry is useful when \textsc{SuperStiff} is used in conjonction with other programs that are kept private. Always set to $0$. Prints out some information and the program might not work if set to $1$.
%
\item["pattern":] specifies the pattern of the binary files contained in the path \textbf{"path\_to\_files"}. It can be whatever string value, as long as it is labelled with an integer as mentionned previously and it is a binary file.
%
\item["beta":] gives the fictitious temperature that is used to sum over the fermionic Matsubara frequencies. The value of \textbf{"beta"} can be changed, but it is preferable to keep its value to 500. The higher the value is, the better it is if one periodizes the Green's function (it is not the case if one periodizes the cumulant or traces), but the calculations are lengthened. Setting it to 500 is the best compromise I have found.
%
\item["w\_discretization":] gives the number of fermionic Matsubara frequencies that compose the grid upon which one sums over. The value of 2000 can reduced, as the important is to have a great resolution at low frequencies (the superfluid stiffness converges as $\propto \frac{1}{(i\omega_n)^4}$).
%
\item["cumulant":] specifies if the cumulant of the Green's function is to be periodized: when its value is set to $0$ and \textbf{"Periodization"} is set to $1$, the Green's function is periodized. If its value is set to $1$ and \textbf{"Periodization"} is set to $1$, the cumulant is instead periodized. To trace over the cluster Green's function in order to avoid any periodization, one has to set both \textbf{"cumulant"} and \textbf{"Periodization"} to $0$. Notice that if \textbf{"Periodization"} is set to $0$, \textbf{"cumulant"} has no effect whatsoever.
%
\item["Periodization":] has already been talked about quite a lot. This field takes in the values $\{0,1\}$. If $0$ is chosen, then the cluster Green's function is not periodized when computing the superfluid stiffness. Otherwise, if $1$ is chosen, the cluster Green's function is periodized, either the cumulant or the Green's function itself, depending on the value of \textbf{"cumulant"}.
%
\item["fout\_name":] is the string name of the file that will contain the superfluid stiffness. One can name as he/she wants. The values are appended dynamically in the file at runtime. Interrupting the program does not erase the progress of the program.
\end{description}
\subsection{Finite temperature calculation}
\label{finite_T_calculation}
To perform finite temperature calculations, no \path{params.json} input file is needed. All the important parameters to feed in are specified in the main program itself, that is the \path{FiniteTSuperstiff.jl} file.
\juliascriptFT{FiniteTSuperstiff.jl}{Input parameters in FiniteTSuperstiff.jl}{1}{15}
The listing \ref{FiniteTSuperstiff.jl} shows the input parameters necessary for finite temperature calculations and each of these are explained and detailed below:
\begin{description}
\item[filename\_to\_write:] sets the name of the output file produced by the main program \path{FiniteTSuperstiff.jl}
%
\item[t:] specifies the nearest-neighbor hopping term. It should always be set to $1.0$, as it represents the energy scale of the system.
%
\item[tpp:] specifies the third nearest-neighbor hopping term. It should be set to $0.0$ if one only considers the nearest-neighbor and second nearest-neighbor hopping term, as is our case here.
%
\item[Grid\_:] determines the resolution of the $\mathbf{k}$-space grid when the parameter \textbf{AXIS\_} is set to $"xx"$ or $"yy"$. To set it to $100$ is a sensible choice.
%
\item[OPT\_:] specifies the periodization scheme ("PER" or "CUM") to calculate the superfluid stiffness. If one wants to calculate the superfluid stiffness by tracing over the cluster Green's function, one has to type in "TR". If "TR" is chosen, one has to set \textbf{AXIS\_} to "$zz$".
%
\item[AXIS\_:] specifies the principal axis of the unit-cell along which the superfluid stiffness is to be computed. The following set holds all the permissible input parameters: $\{"xx", "yy", "zz"\}$.
\end{description}
The example given in the folder \path{examples} concern the case where only the second nearest-neighbor hopping term is changed ($t^{\prime}$). The folder structure given in this example MUST be followed for the program to succeed. Inside the folder specifying the value of $t^{\prime}$, one has to name the folder containing the raw cluster self-energies computed at different temperatures in the following way:
\begin{equation}
\label{name_folder_inside_tp}
\text{U}\underbrace{8}_{\substack{\text{value of}\\ \text{Hubbard interaction}}}\text{m}\underbrace{5.65}_{\substack{\text{value of}\\ \text{chemical potential}}}\underbrace{\text{\_all.beta}}_{\substack{\text{contains}\\ \text{all}\\ \text{temperature calculations}}}.
\end{equation}
If for a given temperature the cluster self-energies don't show any anomalous components, the folder containing the self-energies at each iteration MUST end by "n". This way, these folders are ignored by the program. The main program \path{FiniteTSuperstiff.jl} is a wrapper to the program \path{Lazyskiplist}.
\subsection{Modules}
\label{sec:modules}
In this subsection, I give some broad information about the modules called by the main programs, althought it is somewhat straightforward. Some useful informations are provided in case anyone were to extend the utility/scope of this program. The three following modules are necessary for \textsc{SuperStiff} to run:
\begin{itemize}
\item SuperStiff.jl
\item PeriodizeSC.jl
\item Stiffness.jl
\end{itemize}
The first module SuperStiff is the module that pre-compiles the two other modules, that is PeriodizeSC and Stiffness (it acts as the binder). All these files have to be stored as indicated in the \path{INSTALL} file in \path{src}.\\
The second module PeriodizeSC is the module containing the lower level functions. It contains the function that build the cluster Green's function from the cluster self-energy. It contains all the different superfluid stiffness formulae. It also contains the many wrapper functions (decorators) necessary to have to integrate using cubature. It is the heaviest module of \textsc{SuperStiff}.\\
The last module Stiffness calls in PeriodizeSC --- it builds on its member functions (inherits from it). This module loads the \path{params.json} file holding the instructions to the program. From the information taken from the input file, it selects the proper functions to be called from PeriodizeSC. If anyone modifies the input parameters of \path{params.json}, one has to cast his/her attention particularly on the Stiffness module.\\
From this small overview and the several examples provided in the folder \path{examples}, one should be able to take ownership of this program.
%----------------------------------------------------------------------------------------
% REFERENCES
%----------------------------------------------------------------------------------------
\nocite{*}
\bibliographystyle{ieeetr}
\bibliography{BibDoc}
%
%\begin{lstlisting}
%#= This is a sample of the input file params.json =#
%1 {
%2 "t": 1.0,
%3 "tp": -0.3,
%4 "tpp": 0.2,
%5 "inplane_axis": "xx",
%6 "path_to_files": "COEX/U8/SEvec_b500_SC_AFM/",
%7 "data_loop": "COEX/U8/Loop_COEX.dat",
%8 "AFM_SC_NOCOEX": 0,
%9 "Print_mu_dop": 0,
%10 "pattern": "SEvec*.npy",
%11 "beta": 500,
%12 "w_discretization": 2000,
%13 "cumulant": 1,
%14 "Periodization": 1,
%15 "fout_name": "stiffness_b500_w_2000_coex_int_K_per_cum_U8.dat"
%16 }
%%\end{lstlisting}
\end{document} | {
"alphanum_fraction": 0.7047411502,
"avg_line_length": 64.875862069,
"ext": "tex",
"hexsha": "f964648b739b42ae306ee96f38a899a1ae804112",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3dd08456bd0baeed1df7a68413abfa6b64a33484",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "oliviersimard/SuperStiff",
"max_forks_repo_path": "doc/LatexDocumentation/Documentation.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3dd08456bd0baeed1df7a68413abfa6b64a33484",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "oliviersimard/SuperStiff",
"max_issues_repo_path": "doc/LatexDocumentation/Documentation.tex",
"max_line_length": 995,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3dd08456bd0baeed1df7a68413abfa6b64a33484",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "oliviersimard/SuperStiff",
"max_stars_repo_path": "doc/LatexDocumentation/Documentation.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4685,
"size": 18814
} |
% Chapter on Online Monitoring, Data Quality Monitoring & Event Displays
% 10 pages
\graphicspath{{OnlineMonitoring/Figs/}}
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\chapter{Online Monitoring and Event Displays for the 35-ton Experiment}\label{chap:OnlineMonitoring}
Monitoring of the data collected during the running of an experiment is imperative to ensure a high quality is maintained. Such monitoring is often provided in real-time (`online monitoring'), summarising the data from the current run, or in near real-time (`nearline monitoring'), summarising data over runs from typically the previous day, week or month to represent the longer term fluctuations in the data quality. An event display, designed to illustrate physics events as they occur in the detector, is another desirable feature that is particularly useful during data collection. The system developed to provide online feedback, including a basic event display, for the 35-ton Phase~II data taking period, described in detail in Chapter~\ref{chap:35ton}, is the subject of this present chapter.
The framework was designed to be flexible and provide prompt feedback for those operating the experiment; it was thus included as part of the DAQ system, discussed in Section~\ref{sec:35tonDAQ}. The monitoring framework itself is the subject of Section~\ref{sec:OnlineMonitoring}, with its two functions, data quality monitoring and producing online event displays, presented in Sections~\ref{sec:DQM} and~\ref{sec:EventDisplay} respectively. Finally, the web interface developed to allow synchronisation of this monitoring data to a dedicated web page for ease of access is briefly described in Section~\ref{sec:WebInterface}.
% Moved to 35ton chapter
%% %----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
%% \section{The DAQ Framework}\label{sec:lbne-artdaq}
%% Experiments at FNAL are migrating to \textit{artdaq}, a centrally-maintained data acquisition system built on the art framework utilised by all offline software written for experiments hosted at the lab. The DUNE 35-ton experiment was one of the first to use this new software (only LArIAT had previously used it for data taking) and used an experiment specific system named lbne-artdaq.
%% %% \footnote{Since the formation of the DUNE experiment occurred only a few months before the running of the 35-ton, all online software maintained the use of the outdated `lbne' descriptor to prevent unnecessary potential problems associated with large scale code changes and alleviate the risk of further delays. It should be again stressed that the 35-ton was recognised by the DUNE collaboration as an integral part of the DUNE plan and the use of \textit{lbne} was in no way an indication of a project associated only with the dissolved previous experiment!}.
%% A general overview of lbne-artdaq is shown in Figure~\ref{fig:lbne-artdaq}.
%% %% \begin{figure}
%% %% \centering
%% %% \includegraphics[width=16cm]{artdaqFramework.pdf}
%% %% \caption[The \textit{lbne-artdaq} framework]{Overview of the \textit{lbne-artdaq} framework used for data acquisition by the DUNE 35-ton experiment \cite{Freeman2014}. See the text for a complete description.}
%% %% \label{fig:lbne-artdaq}
%% %% \end{figure}
%% Data flows from left to right and pass through components common to most DAQ systems. Closest to the detector components (i.e. the RCEs, SSPs and PTB [see Section~\ref{sec:DetectorComponents}]) are the board readers which take the output from the firmware as soon as it is ready and sends it downstream to the event builders. There exists a board reader for each of the detector components (totalling 24) and each is unaware of the existence of the others. It is the job of the event builders to assemble a full `event' from these individual `fragments' passed on from each of the detector elements. An event is complete once composed of a full set of fragments and the event builders will wait to receive them all before sending the data onwards to the aggregators.
%% There are two aggregators which take the full events but process them in very different ways. All the data passes through only the first aggregator, whose function it is to write the output to disk and thus end processing by the DAQ. The second aggregator receives no events but instead has access to the shared memory occupied by the data as it passes through the first aggregator; it is thus designed specifically for the purpose of monitoring and in no way affects the data or the output from the first aggregator. It is within this second aggregator process that the online monitoring system described in the proceeding section is designed to run.
%% Each of the DAQ processes runs on a machine on the private DAQ network and is configured as normal within art (using the \textit{fhicl} (Fermilab Hierarchical Configuration Language) configuration language). Two nodes on the main FNAL network (lbne-gateway01/02) provide access to these private machines, of which there are 7 (lbnedaq1-7), and contain all scripts and setup necessary to run through the DAQ via a command line interface.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\section{The Online Monitoring Framework}\label{sec:OnlineMonitoring}
The framework developed for the monitoring system had the following design goals:
\begin{itemize}
\item to be able to analyse the data read out of memory in its raw `DAQ format';
\item to be computationally efficient to allow for processing at the event rate (data taking rate);
\item to provide the flexibility for further monitoring plots to be added with ease;
\item to allow for use of an online event display to provide comprehensible images of the raw data.
\end{itemize}
In general, the developed system succeeded in all these goals and provided invaluable information, becoming an integral tool in the commissioning and the data taking during the 35-ton Phase~II run.
\begin{figure}
\centering
\includegraphics[width=12cm]{softwareFramework.eps}
\caption[The software framework designed and built for online monitoring during the 35-ton Phase~II run.]{The software framework designed and built for online monitoring during the 35-ton Phase~II run.}
\label{fig:OnlineMonitoringFramework}
\end{figure}
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{Design of the Monitoring Framework}\label{sec:MonitoringFrameworkDesign}
The setup consists of a central `module', \texttt{OnlineMonitoring\_module.cc}, which is configured within the \textit{art} framework through its base class. An illustration of the full monitoring framework is shown in Figure~\ref{fig:OnlineMonitoringFramework}. The OnlineMonitoring class manages the running of the system and owns instances of further classes each designed for a specific purpose, controlling the data flow by calling the relevant methods when required. Once an event has been obtained, the data for each component is processed and repackaged into RCEFormatter, SSPFormatter and PTBFormatter objects. The purposes of this method are
\begin{itemize}
\item to provide an interface between the raw data and the methods which analyse the data. This is important as it provides a single point of maintenance for when formats change and allows for various `DAQ modes' to use the same analysis code;
\item to separate interaction with the DAQ from the handling of output data objects;
\item to facilitate random access of the data for more detailed analysis which would not be possible if just processing linearly.
\end{itemize}
The main drawback to performing this step is it requires all the data to be held in memory until the end of the event and represents basically the same information as initially present. However, it was decided the advantages were worth the required compromises in memory usage and no problems were apparent during the course of the run except when operating at the very limits of DAQ capabilities.
These reformatted data objects are then passed to the methods in the MonitoringData class for analysis. This class owns all of the data products which are output from the monitoring (e.g. histograms, graphs, trees and files) and deals with their filling and writing out when required. This is discussed further in Section~\ref{sec:DQM}.
The event display is handled by its own dedicated class, EventDisplay, which has methods for making the displays and saving them as an image in the correct place when required. It is designed to accept the reformatted RCE object and presents the data in as meaningful a way as possible; this is detailed fully in Section~\ref{sec:EventDisplay}.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{Interface with the DAQ Framework}
The 35-ton DAQ, previously discussed in Section~\ref{sec:35tonDAQ}, was based on the \textit{lbne-artdaq} framework illustrated in Figure~\ref{fig:lbne-artdaq}. This system has support for running online monitoring embedded into its design philosophy, with the Aggregator2 process allowed to access data from shared memory as it is managed by Aggregator1. The controlling monitoring module, discussed in Section~\ref{sec:MonitoringFrameworkDesign}, may be configured to run within this second aggregator process and thus receive events in real-time as they pass through the data acquisition system.
The events are passed to the Aggregator2 process by the framework when resources are available; if this is not possible then the event is simply skipped. This behaviour does not affect the processing of the data through Aggregator1 and any events missed by the monitoring will still be saved to disk. The issues arise primarily when the monitoring runs slower than the data taking rate (i.e. when producing monitoring information for an event takes longer than the length of the event itself) and were largely inevitable due to the number of required plots and the computational resources necessary for tasks such as FFTs and event displays. As most monitoring plots, such as TPC noise, require only a few events, they are mostly unaffected; however, there are implications when calculating rates and similar quantities. During normal running, as many as half of the events may be missed by the monitoring, depending on the detail of the plots being produced. Using multiple threads detached from the main processes was considered, particularly when making events displays, as way to increase the event exposure but, due to the potential computing issues which may arise, it was decided not to implement this for the purposes of a short prototype run.
Each of the DAQ processes run on a machine on the private DAQ network and are configured as normal within \textit{art}, using \textit{fhicl}. Two nodes on the main FNAL network (lbne-gateway01/02) provide access to these private machines, of which there are 7 (lbnedaq1-7), and contain all scripts necessary to setup, configure and run the DAQ via a command line interface.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{Writing the Monitoring Data}\label{sec:WritingMonitoringData}
The data objects are newly created for each subrun and are written out at three points during data taking:
\begin{itemize}
\item an initial write out N seconds after the start of the subrun;
\item at frequent intervals during the subrun, every M seconds;
\item at the end of the subrun.
\end{itemize}
The parameters N and M are configurable and were set to 30 and 500 respectively for normal data taking. The data products are only cleared at the end of a subrun, so any intermediate writing out of data simply refreshes the current plots.
The event displays are computationally expensive to make and so were only created once per subrun during normal running. However, since a subrun was automatically stopped, and a new one started, by the DAQ once the output file had reached 5~GB in size, and (since zero suppression was not utilised at any point during the run) this occurred on average every four minutes, a new event display was made relatively frequently.
All the output data were saved on a shared disk on the gateway DAQ machines for further use. This is discussed in Section~\ref{sec:WebInterface} below.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
%% \subsection{Configuring the Monitoring}\label{sec:MonitoringConfiguration}
%% [Possibly don't need this section...]
%% The system was designed to be flexible and many parameters were available to control the running of the monitoring. These are listed and described below, with default parameter provided in brackets.
%% \begin{itemize}
%% \item TPCModuleLabel (``daq'') -- art module label for TPC data saved in the DAQ;
%% \item PhotonModuleLabel ([ ``sparseSsp'', ``daq'' ]) -- art module label for photon data saved in the DAQ;
%% \item TriggerModuleLabel (``daq'') -- art module label for counter data saved in the DAQ;
%% \item DetailedMonitoring (false) -- fills more, and usually more computationally expensive, data;
%% \item ScopeMonitoring (false) -- support for a different DAQ running mode, `scope mode';
%% \item DataDirPath (``/storage/data/'') -- path at which the data files are saved by the first aggregator;
%% \item MonitorSavePath (``/data2/lbnedaq/monitoring/'') -- location to save DQM data;
%% \item EVDSavePath (``/data2/lbnedaq/eventDisplay/'') -- location to save event displays;
%% \item PedestalFile (``/data2/lbnedaq/pedestal.csv'') -- location of the most recent pedestal file, containing pedestals for all channels. This was used for pedestal subtraction when making event displays;
%% \item ImageType (``.png'') -- the format to save any images;
%% \item MonitoringRefreshRate (500) -- how often to write out the most recent monitoring data plots;
%% \item InitialMonitoringUpdate (30) -- how long after starting a new subrun to initially write out the data;
%% \item EventDisplayRefreshRate (60) -- how often to refresh the event display when not just making one per subrun;
%% \item LessFrequentFillRate (20) -- how often to fill the more computationally expensive plots;
%% \item DriftVelocity (0.9 \#mm/us) -- rough drift electron velocity, used for calculating the x coordinate for the event display;
%% \item CollectionPedestal (550) -- the default pedestal value if the file isn't readable;
%% \item MicroslicePreBuffer (5) -- how many microslices are saved by the RCEs before the one containing the trigger;
%% \item MicrosliceTriggerLength (5) -- the length of the trigger.
%% \end{itemize}
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\section{Data Quality Monitoring}\label{sec:DQM}
The overarching aim of the online monitoring system was to provide direct feedback to the experimental operators with information about the status of the data taking and the quality of the data. This is vital for various different aspects of data taking, for example
\begin{itemize}
\item ensuring all detector components being used in the current run are receiving and processing data;
\item noting the TPC readout has entered the `high noise state' and acting accordingly;
\item checking the trigger rates from the external cosmic muon counters are feasible.
\end{itemize}
The monitoring was diagonalised in a similar way to the DAQ readout with data from the TPC, photon detector and external counters processed separately.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{TPC Monitoring}\label{sec:TPCMonitoring}
Monitoring of the TPC data involved mainly considering various distributions of the ADC values provided by the front-end boards, separated by channel, board and APA. The mean and RMS of the ADC values for a given channel provides information such as the measured pedestal and the level of noise being read out. The uncorrelated component of the noise can be monitored using the concept of `DNoise'; this considers the difference in ADC value between two neighbouring channels at a given readout time and represents the level of noise which would be impossible to remove by the use of coherent noise filters only. Unfortunately, for the 35-ton, this uncorrelated component made up most of the noise across all channels (see Figure~\ref{fig:DQMPlot1}). FFTs of the signal waveforms, performed separately for each RCE, were also useful in monitoring bands of noise in frequency space.
Monitoring of various other problems, such as the digitiser stuck code issue, synchronisation concerns resulting in a different number of microslices being saved in corresponding RCE millislices, and the asymmetry of bipolar pulses, were added as these issues became apparent during the commissioning.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{Photon Detector Monitoring}\label{sec:PhotonMonitoring}
Analogously to the TPC data, monitoring of the photon detectors mainly involved considering various ADC distributions separated by optical channel and by photon detector. The peak height, pedestal and integral of each waveform were also considered as a function of channel to ensure each were operating consistently.
The triggers sent on by the SSPs were also studied; unfortunately, due to the design of the monitoring framework (with it not guaranteed to receive each event), trigger rates were challenging to compute. It was decided to leave them in the monitoring but only consider the relative rates; the monitoring code may be utilised offline, processing closed files on disk, to determine accurate rates by ensuring all events are considered. Along with the trigger rate, the number of triggers, the fraction of events containing a trigger and the number of readout ticks within each trigger were also considered.
During installation, one photon detector was erroneously left unconnected to its SSP and so was unavailable during the run. This was discovered using the online monitoring framework but unfortunately only following the completion of the installation and the sealing of the cryostat.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{External Counter Monitoring}\label{sec:CounterMonitoring}
Since monitoring the external counters primarily involves considering trigger rates, a similar issue to the photon detector monitoring was encountered; as with the SSP triggers, the rates were only considered relative to different counters. For each counter, the hit rate and the average activation time were monitored to ensure counters in similar positions were recording comparable cosmic muon data. The number and type of payloads sent on from the PTB were also detailed so the amount of data, along with information about what the data are comprised of, could be monitored.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{General Monitoring}\label{sec:GeneralMonitoring}
A variety of useful quantities not pertaining to any specific subcomponent were also monitored to assure smooth data taking. These include the size of output files and the average event size from recent runs, information about which detector subcomponents are taking data and the number of events seen by each, and also synchronisation information between various detector components.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{DQM Plots}\label{sec:DQMPlots}
The DQM component of the online monitoring produced around 60 figures for each subrun, illustrating the data discussed in Sections~\ref{sec:TPCMonitoring},~to~\ref{sec:GeneralMonitoring}. A sample subset of these figures is shown in Figure~\ref{fig:DQMPlots}. Figure~\ref{fig:DQMPlot1} shows the TPC noise; the total noise (RMS of the ADC values) is shown in blue and the uncorrelated component of this noise in green (the gaps indicating dead wires). The FFT of a waveform read out by the first RCE (channels~1--128) is shown in Figure~\ref{fig:DQMPlot2} and a 2D plot showing the ADC values for each channel, hugely useful as it demonstrates both the mean and RMS for all channels together, is depicted in Figure~\ref{fig:DQMPlot3}. Figure~\ref{fig:DQMPlot4} shows the subdetectors which are successfully collecting data and may be used to note one quarter of the TPC readout, along with three photon detector readouts, were turned off in this subrun.
%; refer to Appendix \# for a reproduction of all figures from a particular run. {\color{red} Lee: I'm not sure this appendix is necessary, but happy to add it if you think it would be good. Beware it would just be $\sim$60 plots!}
\begin{figure}
\centering
\begin{subfigure}[t]{0.48\linewidth}
\centering
\includegraphics[width=0.95\textwidth]{DQM1.png}
\caption{TPC noise.}
\label{fig:DQMPlot1}
\end{subfigure}
\begin{subfigure}[t]{0.48\linewidth}
\centering
\includegraphics[width=0.95\textwidth]{DQM2.png}
\caption{FFT.}
\label{fig:DQMPlot2}
\end{subfigure}
\begin{subfigure}[t]{0.48\linewidth}
\centering
\includegraphics[width=0.95\textwidth]{DQM3.png}
\caption{ADC values as a function of channel.}
\label{fig:DQMPlot3}
\end{subfigure}
\begin{subfigure}[t]{0.48\linewidth}
\centering
\includegraphics[width=0.95\textwidth]{DQM4.png}
\caption{Subdetectors which are successfully collecting data.}
\label{fig:DQMPlot4}
\end{subfigure}
\caption[Selection of figures made by the Data Quality Monitoring framework during 35-ton Phase~II running.]{Selection of figures made by the Data Quality Monitoring framework during 35-ton Phase~II running.}
\label{fig:DQMPlots}
\end{figure}
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\section{Online Event Display}\label{sec:EventDisplay}
One of the highlights of the data taking was being able to watch events refresh on the online event display. In addition to the interesting visual display of interactions in the detector, it was an additional tool used in data monitoring; high noise states, poor LAr purity and drift field problems were all immediately evident from the display.
%One of the highlights of being in ROC West (Remote Operation Control room at FNAL) during data taking was watching the online event display refresh with updated images representing cosmics passing through the detector. In addition to the interesting visual display of interactions in the detector, it allowed for data to be monitored with ease; high noise states, poor LAr purity and drift field problems were all immediately evident from the display.
Given the structure of the data when the detector was read out, it proved to be challenging finding a comprehensible way to represent events. The construction of such a display is the subject of this section.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{Selecting the Data}\label{sec:SelectingEVDData}
The raw data formats for the various 35-ton data streams were discussed in detail in Section~\ref{sec:35tonDataFormats}. Each DAQ event comprises a collection of millislices, one for each of the detector subsystems (RCEs, SSPs, PTB), with further structure specific to each system and comprehensively illustrated in Figure~\ref{fig:35tonDataFormat}. An example triggered event in the 35-ton data is demonstrated in Figure~\ref{fig:35tonTriggeredEvent}.
Since the event display runs online, a suitable selection must be applied to ensure the full physics event occurs within the current DAQ event; proceeding and preceding events are inaccessible during running. This is achieved by noting whether or not a trigger occurred (i.e. microslices contain nanoslices), and in which microslice it occurred, when reformatting the RCE data in DataReformatter. For the event display, an event is only useful if the trigger occurred within a certain range (e.g. Microslice~5 to Microslice~10), ensuring all the filled microslices are present within the current millislice. The event display is then filled for a given range of microslices around the trigger to capture all the physics data.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{Representing the Data}\label{RepresentingEVDData}
The wrapped nature of the induction wires, and the inability to perform disambiguation without full reconstruction, results in only data from the collection plane being useful for an online event display. Use of a second dimension is possible if the detector is viewed from above and by using the drift time as a coordinate. This necessitates the two centre APAs to be shown together as one combined readout structure and a global two-dimensional coordinate system established for the entire detector. The wire coordinate is defined simply by counting wires from the collection planes across all APAs and incorporating fake wires between the frames, and the time coordinate may be used to take multiple drift regions into account by correcting all charge deposited in the short drift region to negative ticks.
The event displays are filled with the raw ADC values provided by the FE readout without the use of reconstruction. An approximate pedestal subtraction is possible by working with the system used to record these pedestal values. During data taking, the shifter would perform a run at least once per shift to produce a file containing all the calculated pedestals on each channel for subsequent uploading to a database for offline use. By ensuring a copy of the most recent file is always available to the monitoring framework, the pedestals may be corrected for and the charge represented as accurately as possible. To limit noisy channels and to correct for accidental negative charge, the pedestal-subtracted ADC values are only included if within the range $0-250$. Finally, given the relatively low signal-to-noise ratio, it was decided a grey-scale image showed the best resolution for observing tracks in the cryostat.
An example event display is shown in Figure~\ref{fig:EVD}.
\begin{figure}
\centering
\includegraphics[width=14cm]{evd.png}
\caption[Example online event display made by the Online Monitoring framework.]{Example online event display made as part of the online monitoring framework for run 14306 (2nd March, 2016). The view is from the top of the detector looking down; the red lines represent the spaces between the APAs and the blue line the location of the APA frames, separating the long and short drift regions.}
\label{fig:EVD}
\end{figure}
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\section{Monitoring Web Interface}\label{sec:WebInterface}
The output of the monitoring is vital in assuring the experiment continues to take high quality, analysable data. To facilitate this process, a web interface was developed to enable all useful information to be displayed and accessed in a convenient, universal location. This interface, along with the complementary web page, was relatively basic but was functional and performed all that was required for the purposes of a short prototype run. The method of automating the transfer of the monitoring data from where it was saved by the DAQ process to somewhere accessible by the web server is briefly described in Section~\ref{sec:AutomatedDataTransfer} and the web page itself is overviewed in Section~\ref{sec:WebPage}.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{Automated Data Transfer}\label{sec:AutomatedDataTransfer}
Ensuring the monitoring output was available in the correct place when needed was the most complicated part of the web interface. This was achieved using a combination of disk mounting and automated scripts, demonstrated in Figure~\ref{fig:WebInterface}.
\begin{figure}
\centering
\includegraphics[width=12cm]{webInterface.eps}
\caption[Schematic showing the interface between the online monitoring system and the web.]{Schematic showing the interface between the online monitoring system and the web. The DAQ machines are shown as rectangles with their disks represented as cylinders. Connections between a node and a disk are shown as straight lines, with dotted lines representing processes running on the machine.}
\label{fig:WebInterface}
\end{figure}
The DAQ aggregator processes run on the lbnedaq6 and lbnedaq7 nodes, requiring any saved output be placed in a location accessible to these machines. Mounting a disk belonging to a gateway node onto these private machines and saving the output directly onto this ensured the data may be available outside of the private network. The constraints placed on the configuration by the DAQ group, which preferred nothing other than DAQ processes to run on lbne35t-gateway01, required a second gateway node, lbne35t-gateway02, be utilised. The web transfer framework was completed by mounting the Fermilab web area onto this machine and utilising an automated job to copy the monitoring data from the disk to the relevant part of the web server. The frequency of this job, 30 seconds, defined the maximum latency one could expect between data being written out and images appearing online.
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\subsection{Web Page}\label{sec:WebPage}
The web page was hosted at FNAL and located at lbne-dqm.fnal.gov. When the monitoring framework initiates a write out of all data products, the HTML necessary to correctly display these images is also written and saved as part of the output. This is copied, along with all the images and data files, to the web area as discussed in Section~\ref{sec:AutomatedDataTransfer}. The web page was basic but fulfilled all fundamental requirements for 35-ton monitoring; it had dedicated pages for all the data quality monitoring information and the online event display (the nearline monitoring was also hosted at this website but is not described here). See Figure~\ref{fig:WebPage} for a demonstration of web page and example navigation.
\begin{figure}
\centering
\includegraphics[width=14cm]{webPage.png}
\caption[Demonstration of the web page developed to display information produced by the online monitoring and event display.]{Demonstration of the web page developed to display information produced by the online monitoring and event display. The pages are written in HTML and allowed prompt and convenient feedback directly from the DAQ be accessed anywhere and assist in remote monitoring of the experiment. All previous runs are also kept on the website for reference.}
\label{fig:WebPage}
\end{figure}
%----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
\section{Online Monitoring Summary}
The monitoring, with web support, was imperative for the success of the 35-ton. During the ongoing vertical slice tests in summer 2015, the majority of the setup was in place and enabled progress in testing and commissioning the APAs to be completed significantly faster than it otherwise would have been. During this time, and also during commissioning, the framework was the only way of analysing the data without reading it into LArSoft and writing specific software. Overall, the framework provided essential feedback and contributed positively towards DAQ uptime during the data taking period. It is currently in the process of being adapted for future use in DUNE, specifically as part of the ProtoDUNE DAQ for the run in 2018.
| {
"alphanum_fraction": 0.7168280944,
"avg_line_length": 129.4538461538,
"ext": "tex",
"hexsha": "b2b4ce3243cff5e82a8772c64518ccc213840de2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ebd856669d3ec628c8e4f025ee4196845d9282b7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mike-wallbank/thesis",
"max_forks_repo_path": "OnlineMonitoring/OnlineMonitoring.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ebd856669d3ec628c8e4f025ee4196845d9282b7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mike-wallbank/thesis",
"max_issues_repo_path": "OnlineMonitoring/OnlineMonitoring.tex",
"max_line_length": 1256,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "ebd856669d3ec628c8e4f025ee4196845d9282b7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mike-wallbank/thesis",
"max_stars_repo_path": "OnlineMonitoring/OnlineMonitoring.tex",
"max_stars_repo_stars_event_max_datetime": "2020-09-11T11:07:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-09-11T11:07:09.000Z",
"num_tokens": 6642,
"size": 33658
} |
\documentclass[a4paper,twocolumn]{article}
\usepackage{graphicx}
\usepackage{url}
\usepackage[utf8]{inputenc}
%opening
\title{AUTOMATABLE EVALUATION METHOD\\ORIENTED TOWARD BEHAVIOUR BELIEVABILITY\\FOR VIDEO GAMES}
\author{
Fabien Tenc\'e$^{*,**}$ and C\'edric Buche$^*$\\
$^*$ Universit\'e Europ\'eenne de Bretagne - ENIB\\
LISyC - CERV\\
25, rue Claude Chappe\\
29280 Plouzan\'e (France)\\
$^{**}$ Virtualys\\
41 rue Yves Collet\\
29200 Brest (France)\\
\{tence,buche\}@enib.fr
}
\date{}
\begin{document}
\maketitle
\thispagestyle{empty}
\keywords{Artificial intelligence, agent evaluation, believable behaviour, human tracking}
\begin{abstract}
Classic evaluation methods of believable agents are time-consuming because they involve many human to judge agents. They are well suited to validate work on new believable behaviours models. However, during the implementation, numerous experiments can help to improve agents' believability. We propose a method which aim at assessing how much an agent's behaviour looks like humans' behaviours. By representing behaviours with vectors, we can store data computed for humans and then evaluate as many agents as needed without further need of humans. We present a test experiment which shows that even a simple evaluation following our method can reveal differences between quite believable agents and humans. This method seems promising although, as shown in our experiment, results' analysis can be difficult. % 121 words
\end{abstract}
\section{INTRODUCTION}
\paragraph{}%games->believable agents->what for?->what is it?
The vast majority of video games features computer controlled virtual characters, also known as agents, to enrich their environment. For games to be able to suspend disbelief \cite{Bates1992}, those agents should be \emph{believable}. However, this term have two distinct meanings: first, the ability to be a good role-player and second, the ability to be indistinguishable from a human player \cite{Livingstone2006}. We aim at agents taking on the role of players, so we will use the latter definition in this paper.
\paragraph{}%why eval?->why is it difficult for believable?->overview of existing methods->problems
When modelling or implementing an agent's behaviour, an important step is evaluation. It can be very useful to improve the final result by tuning the model's parameters or by modifying the model itself. Evaluation is almost mandatory to validate work and should be able to answer: to what extent does the agent meet the objectives? or is it better than what have been done before? However, evaluation of believable agents is rather difficult because believability is based on the observers' feelings. Many studies deal with this problem like \cite{MacNamee2004,Livingstone2006,Gorman2006}. However, they still rely on humans to judges agents. As a result, this kind of method cannot be used to evaluate a large number of different behaviours. Moreover, the use of questionnaires for evaluation is criticised \cite{Slater2004}.
\paragraph{}%why new method?->objective of the presented method
Knowing these problems, we propose a method using a different approach: we aim at reducing human intervention as much as possible still helping researchers assessing artificial behaviours' believability. The objective is to have a method which could be automatised and thus which can be used in optimisation algorithms or for tests during the night. For this, we chose to measure artificial behaviours' likeness to human behaviours, a concept close to believability. Of course, this method does not aim at replacing classic ones but rather offers a complementary approach so that the final result looks more believable.
\section{EVALUATION METHOD PROPOSITION}
\subsection{Principle}
\paragraph{}%General Principle
The evaluation's general principle is to compute vectors which are features of humans' and agents' behaviours. We call those vectors ``signatures'' as they should be designed to be representative of a behaviour. By measuring the distance between an agent's signature and humans', it should be possible to tell if its behaviour looks like a human behaviour or not.
\subsection{Protocol}
\paragraph{}%Protocol
Here are the main stages of the protocol to evaluate an agent with respect to humans:
\begin{enumerate}
\item Define behaviour signatures and metrics between them;
\item Monitor humans (the more the better) and compute the signatures;
\item Monitor the agent to be evaluated and compute the signatures in the same conditions as the monitored humans;
\item Compute the distance between each agent's signature and its human equivalent.
\end{enumerate}
\paragraph{}%How to: signature and metric
The first part is very challenging because we need to find signatures having a low variance for humans, still being able to detect non-believable behaviours. This could be a problem when humans are given a great freedom because their behaviour can vary widely. To determine good signatures, it can be necessary to run the protocol several times with few humans and to modify signatures and/or metrics so that they have nice properties.
\paragraph{}%Same conditions
For the second and the third steps, human and agents must be studied under the same conditions. Since agents do not play against humans, if virtual characters coexist in the same environment we have to make a concession: for the step 2, humans are together and for the step 3, agents are together. If we introduce agents in step 2, signatures could be flawed by reflecting interaction between humans and non-believable agents.
\paragraph{}%advantage: step 3 and 4 can be redone without further need of humans -> automatization!
We presented our method as been automatable. Indeed, step 3 and 4 can be redone without further need of humans. However, automatable does not mean fast as step 3 can take some time. To determine the experiment's duration, a study should be done in order to test how much time the signatures need to stabilise.
\subsection{Monitoring agents and humans}
\paragraph{}%Point of view: perception, decision, action loop scheme
As explained in the protocol, we must monitor both humans and agents so that we can compute signatures. To be totally independent from the internal architecture, we chose to take the same point of view as a judge looking over the subject's shoulder. Therefore, in the well-known perception-decision-action loop we can only have access to the actions and the perceptions. We assume that the loop can be observed for human as well so that we can compare agents' and humans' perceptions and actions in the same manner.
\paragraph{}%Monitor variables and events -> build signatures (few data that describe well a behaviour)
The principle of the monitoring is to keep track of a subset of these perceptions and actions during simulations so as to build the signatures. For example, in a first person shooter, we could track basic actions and perceptions such as if the actor is jumping and if it sees an enemy. It could then be possible to build a very simple signature measuring the proportion of jumps when an enemy is visible.
\paragraph{}%signatures : actions and/or perceptions ?
There are three main kind of signatures: perception-based, action-based and those linking actions to perceptions. Perception-based signatures are not very useful because players judges agents on their actions. Action-based can be useful but may be too simple to explain complex behaviours. Finally, signatures linking actions to perceptions are the most interesting ones because they may find patterns in the decisions. However, there is a difficulty: if an information is monitored in perceptions, it is not sure that the agent or the human noticed this information.
\section{EXPERIMENT}
\paragraph{}%present UT2004 game mechanics / why UT is interesting for researchers
For the experiment, we used the game Unreal Tournament 2004. It is a first person shooter game, in other words each player or agent controls a unique virtual character and sees through its eyes. The character can, non-thoroughly , grab items (weapons, \dots{}), move (walk, jump, \dots{}) and shoot with a weapon. Each character have an amount of hit points, also known as life points: each time an actor is hit by an enemy fire, a certain amount of hit points are subtracted to the current value. When hit points reaches zero, the character ``dies'' but can usually reappear at another place in the virtual environment. Although the concept can seem very basic, it can prove challenging for agents to mimic humans.
\paragraph{}%Objectives
This experiment's main objective is to give a concrete example and to show the interest of the method. Therefore, the signatures are simple and would be insufficient for a real evaluation. Moreover, for the sake of simplicity, this sample experiment try to assess only the believability of Unreal Tournament 2004 agents' movements. Here are the signatures:
\begin{itemize}
\item Velocity change angle: 20-dimensions vector, the value of the $i$-th component is the number of time we measured an angle of approximatively $i$ between $\overrightarrow{V}_t$ and $\overrightarrow{V}_{t+1}$. Note that the vector as only 20 components so an angle of 20 corresponds to a whole turn, 10, half turn, etc. This vector is then normalised.
\item Velocity relative to the direction: 20-dimensions vector, the value of the $i$-th component is the number of time we measured an angle of approximatively $i$ between $\overrightarrow{V}_t$ and $\overrightarrow{D}_t$. The same unit as the previous signature is used for angles. This vector is then normalised.
\end{itemize}
where $t$ is the current time step and $t+1$ is the following one, time steps occurring every 125 ms. $\overrightarrow{V}$ is the velocity vector without its $z$ component, $\overrightarrow{D}$ is a vector pointing toward the character's aiming point without its $z$ component (figure \ref{vectors}). The $z$ component was ignored for the signatures because it simplifies the signatures without losing too much information.
\begin{figure}[!ht]
\centering
\includegraphics[width=0.8\linewidth]{pdf/vectors.eps}
\caption{Scheme of a virtual character viewed from top view at two following time steps, $t$ and $t+1$. $\protect\overrightarrow{D}$ is the direction vector, pointing toward the character's aiming point and $\protect\overrightarrow{V}$ is the velocity vector.}
\label{vectors}
\end{figure}
\paragraph{}%protocol
The experiment is composed of two steps:
\begin{enumerate}
\item 6 low/medium level human played a \emph{deathmatch} game during 20 minutes during which signatures were computed. We chose a duration of 20 minutes because signatures tend to stabilise after 15 to 20 minutes. A \emph{deathmatch} follows the basic rules and the objective is to ``kill'' as many other players as possible. Each human plays using a keyboard and a mouse to control the virtual character and a classic desktop computer screen to see the virtual environment.
\item Signatures are computed for 8 agents having 8 different skill levels. Those agents fight against each other in 6-agent matches in the same topography, following the same rules and during the exact same time.
\end{enumerate}
We used tools developed by our team and based on Pogamut 2 \cite{Burkert2007}. They are available at \url{svn://artemis.ms.mff.cuni.cz/pogamut/branches/fabien_tence}. The experiment gave us 2 signatures for each of the 14 subjects, each signature having 20 components. A sample of those results is given in figure \ref{signatures}.
\begin{figure}[!ht]
\centering
\includegraphics[width=0.9\linewidth]{original/signaturesBW2_corrected.ps}
\caption{Sample of the signatures. On the left ``velocity change angle'' signatures and on the right ``velocity relative to the direction'' signatures. The graphs are truncated for the $0^{th}$ component for the sake of readability.}
\label{signatures}
\end{figure}
\section{RESULTS' ANALYSIS}
\paragraph{}%presenting PCA/ what for
In order to visualise the data, we chose to do a Principal Components Analysis (PCA) on the signatures. Subjects are represented using the two principal components in figures \ref{PCAv} and \ref{PCAdv}.
\begin{figure}[!ht]
\includegraphics[width=\linewidth]{pdf/PCAv.ps}
\caption{Subjects represented using the two principal components which explain 90.37\% of the variance for the ``velocity change angle'' signatures.}
\label{PCAv}
\end{figure}
\begin{figure}[!ht]
\includegraphics[width=\linewidth]{pdf/PCAdv.ps}
\caption{Subjects represented using the two principal components which explain 78.18\% of the variance for the ``velocity relative to the direction'' signatures.}
\label{PCAdv}
\end{figure}
\paragraph{}%analysis of the PCA results
The first chart (figure \ref{PCAv}) represents very well the data with about 90\% of the variance represented. The first PCA axis is negatively correlated with brutal angle changes and the second PCA axis is positively correlated with small angle changes. The second chart (figure \ref{PCAdv}) is less accurate with only 78\% of the variance represented; however, it does suffice to extract some interesting data's features. The first PCA axis is globally negatively correlated with walking backwards and backwards+sideways. The second PCA axis is positively correlated with walking forward+slightly sideways and negatively with forward+sideways.
\paragraph{}
The most important information that both charts reveal is that there is a difference between humans' and agents' behaviours: it is possible to draw a line which separate humans from agents on the two charts. To study the influence of efficiency, agents and humans are ranked depending on their score (number of ``kills''). As humans did not play against agents, these ranks apply only within those two groups: Human1 is more efficient than Human2, which is more efficient than Human3, etc. Agent1 is more efficient than Agent2, etc. but we do not know if Agent1 is more efficient than Human1. It seems that the \emph{skill} parameter which influence the agents' efficiency have a quite artificial effect: agents are ordered on the first principal component on both charts.
\paragraph{}%reason for another analysis tool
Even if those results are promising, PCA suffers from a flaw in our case: in the signatures, a component $i$ is much closer in term of angle to the $i+1^{th}$ and $i-1^{th}$ than to the $i+2^{th}$ and $i-2^{th}$ component, which is not taken into account by the PCA. To calculate the distance between two signatures considering this particularity, we chose to use the Earth Mover's Distance (EMD) \cite{Rubner2000}. Figuratively speaking, the distance between two vectors $V_1$ and $V_2$ is equal to the minimum effort made to carry earth from the ``relief'' $V_1$ to the ``relief'' $V_2$. The interest of this metric is explained in the figure \ref{EMD}.
\begin{figure}[ht]
\begin{minipage}[c]{0.57\linewidth}
\centering
\includegraphics[width=\linewidth]{original/EMD3.eps}
\end{minipage}
\hspace{0.5cm}
\begin{minipage}[c]{0.35\linewidth}
\centering
\hspace{-0.8cm}
\begin{tabular}{|l|c|c|}
\hline
&EMD&$L_2$\\
\hline
$V_1$,$V_2$&3.0&1.15\\
$V_1$,$V_3$&9.6&1.15\\
$V_2$,$V_3$&7.0&1.15\\
\hline
\end{tabular}
\end{minipage}
\caption{Example showing the interest of the EMD compared to Euclidean distance ($L_2$) in our case. The normalised vectors not having any common component, the $L_2$ distance is then equal between each other. However, it is obvious that $V_1$ and $V_2$ are the most similar vectors which is detected by the EMD.}
\label{EMD}
\end{figure}
\begin{figure}[!ht]
\includegraphics[width=\linewidth]{pdf/MDSv.ps}
\caption{Result of a MDS applied to EMD matrix computed on ``velocity change angle'' signatures. This representation represents well the original distances as the stress is a bit less than 0.003.}
\label{MDSv}
\end{figure}
\paragraph{}%presenting MDS / advantages over PCA in this case
With the EMD, we can build a matrix of dissimilarities by computing the distance between each signature. To visualise these dissimilarities, we used a method called MultiDimentionnal Scaling (MDS) which consists in representing each vector by a point in a plan. The goal of MDS is to approximate a given dissimilarity matrix between vectors by a matrix of Euclidean distances ($L_2$) between the points representing the vectors. The result of the MDS method applied to dissimilarities matrices computed with EMD on our signatures are given in figures \ref{MDSv} and \ref{MDSdv}. Note that there is no need for a scale because it is the relative distance between points that is important, not the real distance.
\begin{figure}[!ht]
\includegraphics[width=\linewidth]{pdf/MDSdv.ps}
\caption{Result of a MDS applied to EMD matrix computed on ``velocity relative to the direction'' signatures. This representation represents well the original distances as the stress is a bit more than 0.003.}
\label{MDSdv}
\end{figure}
\paragraph{}%analysis of the MDS results
The results are a bit different from what we get with the PCA analysis: humans' signatures are much more mixed with the agents' than with the PCA. In the figure \ref{MDSdv} we can see that humans and agents have quite similar behaviours although there are still some differences between the two. We can also note that all agents are on a line in figures \ref{MDSv} and \ref{MDSdv} which reinforce the idea that the \emph{skill} parameter have a quite linear effect. It seems that this effect is not pertinent in the case of the figure \ref{MDSv} because as the humans' efficiency rise they tend to move away from the agents'.
\paragraph{}%global analysis
The PCA and the MDS analysis seems to give quite different results. The MDS is based on the EMD which measure the difference between global signatures' shapes. From this point of view, agents are quite close to humans so we can think that they might be believable. However, the PCA analysis, being more strict, allows us to clearly distinguish human and artificial behaviours. This difference can come from the fact that humans have limitations to control their virtual character because they use keyboards. Evaluated agents do not copy those limitations, as a result, they may have a global human-like behaviour but they can be recognised if we look closely. Note that our two signatures are designed to study the velocity vector so all our conclusion applies only to the way agents move. We should have designed a lot more signatures in order to study much more aspects of the behaviours.
\section{CONCLUSION}
%summary
\paragraph{}% promising method
The proposed method seems promising as it could help in assessing the believability of a behaviour. Its main advantage is that it can evaluate a large number of agents, allowing finer improvement of the models' parameters. This advantage is due to the principle of signatures, vectors which characterise behaviours' aspects. We found out in our test experiment that even simple signatures can give interesting results.
%prospect
\paragraph{}%cross-evaluation
However, there is still some questions that should be answered: to what extent do humans notice variations in behaviours? and do signatures have the same sensitivity as humans? Studies confronting the current method to classic ones should be done to evaluate signatures. Even if those studies are time-consuming and complex they will result in useful and reusable signatures.
\paragraph{}% use this method in real behaviour modelling projects
The next step is to use this method in a behaviour modelling project. The goal is to optimise the models according to an evaluation method based on our present work. The final agents will then be evaluated in a classic way, with subjects judging agents in their environment.
\paragraph{}% enhance signatures
We will, however, need to improve the type of signatures for the evaluation to be more precise. Presented signatures are \emph{global signatures}, they are computed at each time step. \emph{Contextual signatures} are computed only when certain perceptions and/or actions based conditions are met. They will provide more meaningful information about behaviours. Another new type of signature can be useful, \emph{temporal signatures} which will measure time between events. It can be used to measure reaction time which is an important factor of believability.
%use IA to find discriminative variables
\bibliographystyle{myBibStyle}
\bibliography{library}
\end{document}
| {
"alphanum_fraction": 0.793814433,
"avg_line_length": 118.8670520231,
"ext": "tex",
"hexsha": "5d30693a527a449800b8340afe23354c436109dc",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-04-14T17:51:26.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-03-30T18:18:40.000Z",
"max_forks_repo_head_hexsha": "36a69bb5ee74e1ca362968604b4a554034c5f408",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "willsower/latex2speech",
"max_forks_repo_path": "Documentation/Listen/gameon.tex",
"max_issues_count": 50,
"max_issues_repo_head_hexsha": "36a69bb5ee74e1ca362968604b4a554034c5f408",
"max_issues_repo_issues_event_max_datetime": "2021-07-14T14:22:45.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-15T23:03:43.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "willsower/latex2speech",
"max_issues_repo_path": "Documentation/Listen/gameon.tex",
"max_line_length": 890,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "36a69bb5ee74e1ca362968604b4a554034c5f408",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "willsower/latex2speech",
"max_stars_repo_path": "Documentation/Listen/gameon.tex",
"max_stars_repo_stars_event_max_datetime": "2021-08-30T20:35:39.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-17T22:13:23.000Z",
"num_tokens": 4702,
"size": 20564
} |
\section{Conclusion}
While our Q-Learner did not consistently perform better than the brute forcing
algorithm on every tested game, there were some clear areas that the learner
beat the brute forcer. When the state space was substantially larger the
approximating Q-Learner was able to learn faster by using lookaheads and
exploring its environment more than the greedy algorithm could. From testing
we were able to conclude that dept and exploration had significant impacts on the
learning rate of the algorithm. While there was a lack of resources and
optimization in the code, these experiments show great promise in Q-Learning
being able to solve extremely complex problems and warrants further studying.
| {
"alphanum_fraction": 0.824684432,
"avg_line_length": 59.4166666667,
"ext": "tex",
"hexsha": "1c95e8c243befcb7fec24e7bbacb2756516e8c70",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ba14c5adfc5cf7ad334ceedf08a7e0285ee25632",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "stevenwalton/510-Multi-Agent_Systems",
"max_forks_repo_path": "Project/Conclusion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ba14c5adfc5cf7ad334ceedf08a7e0285ee25632",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "stevenwalton/510-Multi-Agent_Systems",
"max_issues_repo_path": "Project/Conclusion.tex",
"max_line_length": 81,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ba14c5adfc5cf7ad334ceedf08a7e0285ee25632",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "stevenwalton/510-Multi-Agent_Systems",
"max_stars_repo_path": "Project/Conclusion.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 141,
"size": 713
} |
\documentclass [12pt]{article}
\usepackage {amsmath}
\usepackage {amsthm}
\usepackage {amssymb}
\usepackage {graphicx}
\usepackage {float}
\usepackage {multirow}
\usepackage {xcolor}
\usepackage [ruled,vlined,commentsnumbered,titlenotnumbered]{algorithm2e} \usepackage {array}
\usepackage {booktabs}
\usepackage {url}
\usepackage {parskip}
\usepackage [margin=1in]{geometry}
\usepackage [T1]{fontenc}
\usepackage {cmbright}
\usepackage [many]{tcolorbox}
\usepackage [colorlinks = true,
linkcolor = blue,
urlcolor = blue,
citecolor = blue,
anchorcolor = blue]{hyperref}
\usepackage {enumitem}
\usepackage {xparse}
\usepackage {verbatim}
\usepackage{algpseudocode}
\usepackage{listings}
\usepackage{xcolor}
\lstset { %
language=C++,
backgroundcolor=\color{black!5}, % set backgroundcolor
basicstyle=\footnotesize,% basic font setting
}
\newtheorem{theorem}{Theorem}
\newtheorem{remark}{Remark}
\newtheorem{lemma}[theorem]{Lemma}
\theoremstyle{definition}
\newtheorem{definition}{Definition}[section]
\newtheorem{claim}{Claim}
\DeclareTColorBox {Solution}{}{breakable, title={Solution}} \DeclareTColorBox {Solution*}{}{breakable, title={Solution (provided)}} \DeclareTColorBox {Instruction}{}{boxrule=0pt, boxsep=0pt, left=0.5em, right=0.5em, top=0.5em, bottom=0.5em, arc=0pt, toprule=1pt, bottomrule=1pt} \DeclareDocumentCommand {\Expecting }{+m}{\textbf {[We are expecting:} #1\textbf {]}} \DeclareDocumentCommand {\Points }{m}{\textbf {(#1 pt.)}}
\begin {document}
\vspace {1em}
\begin {Instruction}
Adapted From Virginia Williams' lecture notes.
\end {Instruction}
{\LARGE \textbf {COMP 285 (NC A\&T, Spr `22)}\hfill \textbf {Lecture 27} }
\begin{centering}
\section*{Dynamic Programmig V - 0/1 Knapsack and Maximal Independent Set}
\end{centering}
\section{The Knapsack Problem}
This is a classic problem, defined as the following:
We have $n$ items, each with a value and a positive weight. The $i$-th item has weight $w_i$ and value $v_i$. We have a knapsack that holds a maximum weight of $W$. Which items do we put in our knapsack to maximize the value of the items in our knapsack? For example, let’s say that $W = 10$; that is, the knapsack holds a weight of at most 10. Also suppose that we have four items, with weight and value:
\begin{table}[h!]
\centering
\begin{tabular}{c|c|c}
Item & Weight & Value \\
\hline
A & 6 & 25 \\
B & 3 & 13 \\
C & 4 & 15 \\
D & 2 & 8
\end{tabular}
\end{table}
We will talk about two variations of this problem, one where you have infinite copies of each item (commonly known as Unbounded Knapsack), and one where you have only one of each item (commonly known as 0-1 Knapsack).
What are some useful subproblems? Perhaps it’s having knapsacks of smaller capacities, or maybe it’s having fewer items to choose from. In fact, both of these ideas for subproblems are useful. As we saw last lecture, the first idea is useful for the Unbounded Knapsack problem, and a combination of the two ideas is useful for the 0-1 Knapsack problem.
\subsection{The 0-1 Knapsack Problem}
We consider what happens when we can take at most one of each item. Going back to the initial example, we would pick item $A$ and item $C$, having a total weight of $10$ and a total value of $40$.
The subproblems that we need must keep track of the knapsack size as well as which items are allowed to be used in the knapsack. Because we need to keep track of more information in our state, we add another parameter to the recurrence (and therefore, another dimension to the DP table). Let $K(x, j)$ be the maximum value that we can get with a knapsack of capacity $x$ considering only items at indices from $1, \cdots , j$. Consider the optimal solution for $K(x, j)$. There are two cases:
\begin{enumerate}
\item Item $j$ is used in $K(x, j)$. Then, the remaining items that we choose to put in the knapsack must be the optimum solution for $K(x - w_j , j - 1)$. In this case, $K(x, j) = K(x - w_j , j - 1) + v_j$ .
\item Item $j$ is not used in $K(x, j)$. Then, $K(x, j)$ is the optimum solution for K(x, j - 1). In this case, $K(x, j) = K(x, j - 1)$.
\end{enumerate}
So, our recurrence relation is: $K(x, j) = \max\{K(x -w_j , j -1) +v_j , K(x, j -1)\}$. Now, we’re done: we simply calculate each entry up to $K(W, n)$, which gives us our final answer. Note that this also runs in $O(nW)$ time despite the additional dimension in the DP table. This is because at each entry of the DP table, we do $O(1)$ work.
\begin{algorithm}
\caption{ZeroOneKnapsack(W, n, w, v)}
\label{alg:zerooneknapsack}
\begin{algorithmic}
\State \For{$j = 1, \cdots, n$}{
$K[0,j] \gets 0$
}
\State \For{$x = 0, \cdots, W$}{
$K[x, 0] \gets 0$
}
\State \For{$x = 1, \cdots, W$} {
\State \For{$j = 1, \cdots, n$} {
\State $K[x, j] \gets K[x, j-1]$
\State \If {$w_j <= x$} {
$K[x, j] = \max\{K[x - w_j, j-1] + v_j, K[x, j] \}$
}
}
}
\State \Return $K[W, n]$
\end{algorithmic}
\end{algorithm}
\section{The Independent Set Problem}
This problem is as follows:
Say that we have an undirected graph $G = (V, E)$. We call a subset $S \subseteq V$ of vertices ``independent'' if there are no edges between vertices in $S$. Let vertex $i$ have weight $w_i$ , and denote $w(S)$ as the sum of weights of vertices in $S$. Given $G$, find an independent set of maximum weight $arg\max_{S\subseteq V} w(S)$.
Actually, this problem is NP-hard for a general graph $G$. However, if our graph is a tree, then we can solve this problem in linear time. In the following figure, the maximum weight independent set is highlighted in blue.
\begin{remark}
Dynamic programming is especially useful to keep in mind when you are solving a problem that involves trees. The tree structure often lends itself to dynamic programming solutions.
\end{remark}
\begin{figure}[h!]
\centering
\includegraphics[scale=0.5]{max_is.png}
\end{figure}
As usual, the key question to ask is, ``What should our subproblem(s) be?'' Intuitively, if the problem has to do with trees, then subtrees often play an important role in identifying our subproblems. Let's pick any vertex $r$ and designate it as the root. Denoting the subtree rooted at $u$ as $T_u$, we define $A(u)$ to be the weight of the maximum weight independent set in $T_u$. How can we express $A(u)$ recursively? Letting $S_u$ be the maximum weight independent set of $T_u$, there are two cases:
\begin{enumerate}
\item If $u \notin S_u$, then $A(u) = \sum_{v} A(v )$ for all children $v$ of $u$.
\item If $u \in S_u$, then $A(u) = w_u + \sum_{v} A(v )$ for all grandchildren $v$ of $u$.
\end{enumerate}
To avoid solving the subproblem for trees rooted at grandchildren, we introduce $B(u)$ as the weight of the maximum weight independent set in $T_u \setminus \{u\}$. That is, $B(u) = \sum_{v} A(v )$ for all children $v$ of $u$. Equivalently, we have the following cases:
\begin{enumerate}
\item If $u \notin S_u$, then $A(u) = \sum_{v} A(v )$ for all children $v$ of $u$.
\item If $u \in S_u$, then $A(u) = w_u + \sum_{v} B(v )$ for all children $v$ of $u$.
\end{enumerate}
So, we can calculate the weight of the maximum weight independent set:
$$
A(u) = \max\left\{w(u) + \sum_{v \in Children(u)} B(u), \sum_{v \in Children(u)} A(v) \right\}
$$
To create an algorithm out of this recurrence, we can compute the $A(u)$ and $B(u)$ values in a bottom-up manner (a post-order traversal on the tree), arriving at the answer, $A(r)$. This
takes $O(|V|)$ time.
\begin{algorithm}
\caption{MaxWeightIndependentSet(G)}
\label{alg:MaxWeightIndependentSet}
\begin{algorithmic}
\State \texttt{// G is a tree}
\State $r \gets \text{ArbitraryVertex}(G)$
\State $T \gets \text{RootTreeAt}(r)$
\Procedure{SolveSubtreeAt}{$u$}
\State \eIf{Children$(T, u) = \emptyset$} {
\State $A(u) \gets w_u$
\State $B(u) \gets 0$
}{
\State \For{$v \in \text{Children}(u)$} {
\State \texttt{SolveSubTreeAt}(v)
}
\State $A(u) \gets \max\left\{\sum_{v \in \text{Children}(u)} A(v), w_u + \sum_{v \in \text{Children}(u)} B(v) \right\}$
\State $B(u) \gets \sum_{v \in \text{Children}(u)} A(v)$
}
\EndProcedure
\State \texttt{SolveSubtreeAt}$(r)$
\State \Return $A(r)$
\end{algorithmic}
\end{algorithm}
\end{document}
| {
"alphanum_fraction": 0.6863940698,
"avg_line_length": 40.8,
"ext": "tex",
"hexsha": "7e9bc22570af6e878c5bf4734545ff00133a34ce",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-01-21T03:00:16.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-20T21:52:09.000Z",
"max_forks_repo_head_hexsha": "f0893b43aaf3b321eb134c82512bd7b9271fdea6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "facebookEIR/algorithms-course",
"max_forks_repo_path": "assets/lectures/lecture27.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f0893b43aaf3b321eb134c82512bd7b9271fdea6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "facebookEIR/algorithms-course",
"max_issues_repo_path": "assets/lectures/lecture27.tex",
"max_line_length": 505,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "f0893b43aaf3b321eb134c82512bd7b9271fdea6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "facebookEIR/algorithms-course",
"max_stars_repo_path": "assets/lectures/lecture27.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-16T02:47:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-16T02:47:46.000Z",
"num_tokens": 2636,
"size": 8364
} |
% File acl2020.tex
%
%% Based on the style files for ACL 2020, which were
%% Based on the style files for ACL 2018, NAACL 2018/19, which were
%% Based on the style files for ACL-2015, with some improvements
% taken from the NAACL-2016 style
%% Based on the style files for ACL-2014, which were, in turn,
%% based on ACL-2013, ACL-2012, ACL-2011, ACL-2010, ACL-IJCNLP-2009,
%% EACL-2009, IJCNLP-2008...
%% Based on the style files for EACL 2006 by
%%[email protected] or [email protected]
%% and that of ACL 08 by Joakim Nivre and Noah Smith
\documentclass[11pt,a4paper]{article}
\usepackage[hyperref]{acl2020}
\usepackage{times}
\usepackage{graphicx}
\usepackage{subfig}
\usepackage{latexsym}
\usepackage{float}
\usepackage{placeins}
\usepackage{booktabs}
\renewcommand{\UrlFont}{\ttfamily\small}
% This is not strictly necessary, and may be commented out,
% but it will improve the layout of the manuscript,
% and will typically save some space.
\usepackage{microtype}
\aclfinalcopy % Uncomment this line for the final submission
%\def\aclpaperid{***} % Enter the acl Paper ID here
%\setlength\titlebox{5cm}
% You can expand the titlebox if you need extra space
% to show all the authors. Please do not make the titlebox
% smaller than 5cm (the original size); we will check this
% in the camera-ready version and ask you to change it back.
\newcommand\BibTeX{B\textsc{ib}\TeX}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\title{CS272 Hw3: Part of Speech and Entity Recognition Sequence Tagging
with Viterbi Parsing and Per-label Evaluation}
\author{Sam Showalter \\
University of California, Irvine \ (showalte) \\
Kaggle: Sam Showalter \\
\texttt{[email protected]}}
\date{}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\maketitle
\begin{abstract}
Part of speech tagging (POS) and entity recognition (ER) are essential tasks in natural
language processing. Over time, language models have evolved substantially in their
ability to incorporate context to leverage in accomplishing these objectives. Many
variations of language models for this purpose exist today, and in this study we examine
different configurations on a POS and NER dataset from Twitter. In addition, we develop
a per-label evaluation metric to allow for more granular exploration of our findings. Finally,
we implement Viterbi parsing, which uses dynamic programming to determine the most likely
sequence of tags using dynamic programming.
\end{abstract}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Introduction
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Introduction}
Language models for POS and NER tagging take many forms. Initially, models relied on
window-based context that only considered a set of words directly adjacent to the
token in question. Quickly, researchers determined that this approach would be infeasible for
capturing context over a long sequence, which occurs frequently in most languages. Therefore,
a major revolution came from the first proposal of a recurrent, or auto-regressive network (RNN) that could carry forward its hidden state and allow it to influence the outcomes of future predictions in the sequence. Unfortunately, the most basic formulation of RNNs suffers from vanishing and exploding gradients - as a signal is passed through a sequence and subsequently propogated back during learning, the gradient tends to vanish or explode if the eigenvalues of the weight matrix is not quite close to one \cite{hochreiter1998vanishing}.
To remedy this, many ideas were proposed including gradient clipping and skip-connections, which could allow a signal to bypass parts of the sequence \cite{hochreiter1998vanishing}. Though these ideas did help, the next big breakthrough in sequence tagging and language modeling came with the idea of gating recurrent cells \cite{chung2014empirical}. That is, providing a set of parameters and strucutre that would allow the model to learn to limit context from the previous cells when it is not useful. One formulation of this, the Gated Recurrent Unit (GRU), became popular for this task. Shortly after, a more complicated but powerful variation of gating and context came from a unit known as the Long-Short Term Memory (LSTM) unit \cite{huang2015bidirectional}. Until recently, LSTM networks and their variations were considered to be state of the art. One of their notable pitfalls is there inability to be highly parallelized due to their sequential nature. This was changed by the notion of self-attention \cite{vaswani2017attention}, which dynamically computes attention and context in a parallelized fashion, leading to the enormously powerful language models known as Transformers that we see today.
Building on these ideas further, the next insight gained realized that a knowledge of the sequence predictions was necessary to ensure that the overall sequence prediction was coherent. Independently predicting labels in a sequence often leads to poor overall coherence for many tasks. Therefore, the notion of Viterbi parsing, which dynamically evaluates the most probable sequence, began to proliferate \cite{ma2016end}. In our following sections, we will walk through our custom evaluation metrics, our Viterbi parser, and our findings on POS and NER experiments with different configurations.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Custom Evaluation Metric
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Per Label Evaluation}%
\label{sec:per_label_evaluation}
Aggregate metrics often to not capture the granularity in performance needed to evaluate a
model in a real-world setting. Therefore, we created code that stratifies our predictions
based on their true label and computes performance metrics - in this case, accuracy - for every label. To see our code implementation, please refer to \texttt{metric.py}. This implementation passes the metric test.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Viterbi Parsing
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Viterbi Parsing}%
\label{sec:viterbi_parsing}
One way to interpret a sequence and the operations conducted on it by a language model is as a graph where nodes are the tokens and their structural relationship (as defined by the model) are the edges. This is also know as a conditional random field, which we can use to ensure that our final sequence is coherent and incorporates the joint likelihood of the full sequence instead of independent likelihoods per term.
For each token, we receive a transition and emission score. The emission score is the likelihood of the predicted label for that token based on the input and, depending on the model, the hidden state (context) previous to it. Conversely, the transition score is the likelihood of seeing a given emission (prediction), given the emissions previous to it. This term can be thought of as the overall coherence in the labels. With these terms together, we can find the most probable sequence using Viterbi parsing, a form of dynamic programming.
To start, initialize a matrix of scores to some default value, perhaps negative infinity. This matrix considers all labels (L) across the length of the sequence (N). Then, we also consider a (NxL) matrix of emission scores, with L scores (one per label) for each token in the sequence N. Transition scores from all labels to the others, an LxL matrix, as well as the vector of transition starting and ending scores (starting or ending with a specific label) are also probided. Lastly, we need to create another (NxL) matrix that will dynamically track the most likely sequence elements. We call this \texttt{bp} or backward pointer.
To begin, the first row of the R matrix (NxL) is seeded with the sum of the starting (predicted) label transition scores plus the emission scores for the first label. Then we iterate through the sequence of N terms, building up scores in the following way. Within the sequence loop, we iterate through the labels. We then iterate on the labels a second time, allowing us to consider all of the (LxL) possible ways options for the next most probable term in the sequence. We calculate this dynamically by finding the most probably first term, then building on it dynamically with R. We proceed in this fashion, updating R and bp during each LxL inner loop with the most probably next term. Once finished, we need to add the end transition scores.
Finally, we can recover our most probably sequence. First, we find the highest score in the bottom row of R. This represents the end of our most probably sequence. Then, we can track backward through bp to recover the labels of this most probable sequence and return them in order. When run, this algorithm passes the Viterbi test with 100\% accuracy.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% POS Tagging
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Part-of-Speech Tagging Exploration}%
\label{sec:part_of_speech_tagging_exploration}
exploring different implementations of POS tagging. We were provided two models, one that made its predictions with Viterbi parsing and Conditional Random Field (CRF) assumptions, and another that naively produced a sequence without this information. We began our exploration with the latter.
First we incorporated word embeddings to improve the manner in which individual tokens were represented. We made use of GloVe for these embeddings, and saw an immediate jump in performance of over 10\%. In isolation, incorporating a more intelligent encoder, a bi-directional LSTM in this case, led to a nearly identical increate in performance for the simple tagger. Interestingly, the same experiment on the neural tagger showed a non-trivial difference in performance between adding only the embedder versus the encoder. Though the encoder is far more sophisticated than the embedding system GloVe, with far more parameters, the performance boost for POS tagging was more closely tied to dimensional embeddings than how context can be derived from them (encoding).
As a second round of experimentation, we explored how the dimensionality of the encoder impacts the performance. Adding more layers (stacking) tended to lead to worse performance and slower learning. While the slower learning could be due to the increased parameters, the worse performance is likely because the model has too much capacity for the task and overfits. Ultimately, a single layer with dimensionality 50 proved most effective for POS tagging, and this graph is shown for both a GRU- and LSTM-based encoders. Validation performance during learning was most stable with GRUs and while the GRU slightly underperformed the LSTM for the Neural POS tagger, repeated experiments with different initializations were more robust for the GRU implementation. Thus, we considered the GRU with GloVe embeddings as our best model and transitioned to per-label performance.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% POS figures for experimentation and per-label stuff
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\FloatBarrier
\begin{figure*}[h]
\begin{tabular}{cc}
\subfloat{\includegraphics[width=0.48\linewidth]{imgs/val_simple_pos.png}} &
\subfloat{\includegraphics[width=0.44\linewidth]{imgs/val_neural_pos.png}} \\
\subfloat{\includegraphics[width=0.45\linewidth]{imgs/label_simple_POS.png}} &
\subfloat{\includegraphics[width=0.45\linewidth]{imgs/label_neural_POS.png}}
\end{tabular}
\caption{(Top row) POS tagging validation accuracy during training for a variety of
simple (left) and neural crf (right) architectures. (Bottom row) Per-label improvements
between baseline and best POS architectures with simple (left) and neural crf (right)
prediction.}
\end{figure*}
\label{fig:val_pos}
\FloatBarrier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Displayed in the bottom row of Figure 1, the labels that benefitted the most from the GRU encoder and GloVe embeddings are similar between the neural and simple POS tagger, with \texttt{NUM} and \texttt{NOUN} tending to see the most benefit. What is more surprising is that several of these labels had performance notably worse that the baseline early on in training, only to quickly reverse later on. These similarities between neural and simple taggers continue when we consider the baseline and best models for each. Though the neural tagger baseline is substantially more performance than the simple tagger, both GRU implementations perform nearly identical to each other. It appears that the encoders we are using are particularly effective at carrying context between terms in a sequence, negating the additional performance boost from Viterbi parsing.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% NER Tagging
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Named Entity Recognition}%
\label{sec:named_entity_recognition}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Best POS
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\FloatBarrier
\begin{figure}[h]
\centering
\includegraphics[width=0.97\linewidth]{imgs/best_POS.png}
\caption{Validation accuracy for best POS models (simple and neural) as well as their
baselines}%
\label{fig:best_pos}
\vspace{-13pt}
\end{figure}
\FloatBarrier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Named Entity Resolution was a fundamentally different task than POS tagging because of the dataset imbalance. The non-named entities, labeled \texttt{'O'}, comprise a huge portion of the tokens in the dataset. In turn, one problem discovered was that the model would unanimously predict \texttt{'O'},
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% POS figures for experimentation and per-label stuff
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\FloatBarrier
\begin{figure*}[h!]
\begin{tabular}{cc}
\subfloat{\includegraphics[width=0.48\linewidth]{imgs/val_simple_ner.png}} &
\subfloat{\includegraphics[width=0.44\linewidth]{imgs/val_neural_ner.png}} \\
\subfloat{\includegraphics[width=0.51\linewidth]{imgs/label_simple_NER.png}} &
\subfloat{\includegraphics[width=0.46\linewidth]{imgs/label_neural_NER.png}}
\end{tabular}
\caption{(Top row) NER tagging validation accuracy during training for a variety of
simple (left) and neural crf (right) architectures. (Bottom row) Per-label improvements
between baseline and best NER architectures with simple (left) and neural crf (right)
prediction.}
\end{figure*}
\label{fig:val_ner}
\FloatBarrier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
leading to a decent overall performance when considered in aggregate but a poor per-label performance. Our hyperparameter tuning for NER tagging began similarly to POS tagging, which the consideration of different encoders and and embeddings separately. We also considered the impact of single-direction encoders, and have plotted the impact of a single direction LSTM in figure \ref{fig:val_ner}. Though performance was similar, one-directional encoders could not meet the performance of their bidirectional counterparts. This model was difficult to tune due to imbalanced between named and non-named entities. However, the bottom row of Figure \ref{fig:val_ner} shows the results of the final tuned models for the simple and neural CRF taggers. Again, the GRU attained the highest performance.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Best NER
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}[h!]
\centering
\includegraphics[width=0.97\linewidth]{imgs/best_NER.png}
\caption{Validation accuracy for best NER models (simple and neural) as well as their
baselines}%
\label{fig:best_ner}
\vspace{-16pt}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Like many of the sophisticated encoders, the GRU did struggle with predicting well over less common labels. However, it overcame this failure mode faster than other models, leading to a noticeable bump in performance per label. More interesting, however, is that the baseline performance of the NER simple and neural CRF models performed almost identically before tuning. It
\FloatBarrier
\begin{table*}[h]
\begin{tabular}{lrrrrrrrr}
\toprule
{} & gru\_crf & lstm2d\_crf & baseline\_crf & gru\_s & lstm2d\_s & baseline\_s & val\_gru\_crf & val\_gru\_s \\
\midrule
. & 0.96 & 0.96 & 0.94 & 0.96 & 0.96 & 0.94 & 0.95 & 0.96 \\
ADJ & 0.70 & 0.63 & 0.38 & 0.69 & 0.63 & 0.41 & 0.63 & 0.64 \\
ADV & 0.84 & 0.82 & 0.72 & 0.85 & 0.83 & 0.73 & 0.69 & 0.68 \\
NOUN & 0.76 & 0.79 & 0.69 & 0.76 & 0.80 & 0.36 & 0.77 & 0.78 \\
NUM & 0.63 & 0.60 & 0.25 & 0.60 & 0.61 & 0.26 & 0.68 & 0.74 \\
PRON & 0.96 & 0.96 & 0.94 & 0.96 & 0.96 & 0.95 & 0.95 & 0.95 \\
PRT & 0.93 & 0.88 & 0.85 & 0.94 & 0.88 & 0.86 & 0.95 & 0.91 \\
VERB & 0.87 & 0.87 & 0.71 & 0.87 & 0.87 & 0.68 & 0.84 & 0.85 \\
X & 0.82 & 0.84 & 0.53 & 0.82 & 0.84 & 0.25 & 0.80 & 0.81 \\
Accuracy & 0.85 & 0.85 & 0.74 & 0.85 & 0.86 & 0.63 & 0.84 & 0.84 \\
\bottomrule
\end{tabular}
\caption{POS overall test accuracy as well as accuracy on a selection of labels. For comparison the best crf and simple validation performance are given. All scores corresopnd to the test set
unless otherwise specified}
%\end{table*}
%\FloatBarrier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% \FloatBarrier
%\begin{table*}
\vspace{12pt}
\begin{tabular}{lrrrrrrrr}
\toprule
{} & gru\_s & lstm2d\_s & baseline\_s & gru\_crf & lstm2d\_crf & baseline\_crf & val\_gru\_s & val\_gru\_crf \\
\midrule
B-facility & 0.04 & 0.00 & 0.00 & 0.15 & 0.00 & 0.00 & 0.21 & 0.21 \\
B-geo-loc & 0.46 & 0.44 & 0.12 & 0.46 & 0.39 & 0.07 & 0.45 & 0.44 \\
B-movie & 0.00 & 0.00 & 0.00 & 0.00 & 0.00 & 0.00 & 0.00 & 0.00 \\
B-other & 0.00 & 0.00 & 0.03 & 0.02 & 0.03 & 0.02 & 0.06 & 0.06 \\
B-person & 0.52 & 0.49 & 0.08 & 0.55 & 0.53 & 0.08 & 0.73 & 0.73 \\
B-product & 0.09 & 0.00 & 0.01 & 0.09 & 0.01 & 0.01 & 0.26 & 0.26 \\
B-sportsteam & 0.00 & 0.00 & 0.00 & 0.01 & 0.01 & 0.00 & 0.18 & 0.18 \\
I-person & 0.23 & 0.02 & 0.01 & 0.40 & 0.36 & 0.01 & 0.73 & 0.57 \\
accuracy & 0.91 & 0.90 & 0.90 & 0.91 & 0.90 & 0.90 & 0.95 & 0.94 \\
\bottomrule
\end{tabular}
\caption{NER overall test accuracy as well as accuracy on a selection of labels. For comparison the best crf and simple validation performance are given. All scores corresopnd to the test set
unless otherwise specified}
\end{table*}
appears that the neural CRF approach to prediction is not as useful for named entity recognition as it is for POS tagging. Intuitively, this makes sense. POS tagging is far more context dependent in English, where words can take on many different parts of speech. By contrast, named entities tend to be globally defined and rarely change due to context.
% \FloatBarrier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Conclusion
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Conclusion}%
\label{sec:conclusion}
In this assignment we investigated different models for POS and NER tagging. In particular, we compared models that made predictions on terms in a sequence with and without modeling the information of prior predictions (neural CRF assumption). We found that the GRU encoder and GloVe embeddings best generalized to our data, and that the neural CRF prediction protocol with Viterbi parsing was most beneficial for POS tagging, which tends to to rely more heavily on the context of previous parts of speech. These findings were discovered on validation data, and our findings from testing WRITE TESTING FINDINGS
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Statement of Collaboration
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Statement of Collaboration}
Aside from viewing the lecture material and using CampusWire, I
completed this assignment alone.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\bibliography{custom}
\bibliographystyle{acl_natbib}
\appendix
\end{document}% File acl2020.tex
| {
"alphanum_fraction": 0.6391189506,
"avg_line_length": 75.7186440678,
"ext": "tex",
"hexsha": "1499abbcb0a2f8d4812c387f113a6c005110ef46",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6914064018fbcd416c2f9b24ee105319d02b3cb2",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "SamShowalter/CS272-NLP",
"max_forks_repo_path": "hw3/write_up/acl2020.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6914064018fbcd416c2f9b24ee105319d02b3cb2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "SamShowalter/CS272-NLP",
"max_issues_repo_path": "hw3/write_up/acl2020.tex",
"max_line_length": 1209,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6914064018fbcd416c2f9b24ee105319d02b3cb2",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "SamShowalter/CS272-NLP",
"max_stars_repo_path": "hw3/write_up/acl2020.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5078,
"size": 22337
} |
\subsection{Hello World in C++} % (fold)
\label{sub:hello_world_in_c}
When you follow the SplashKit install instructions, select the C++ language and install the clang++ or g++ compiler.
Once you have the compiler installed, you can create your first program: the famous \textbf{Hello World} discussed in \sref{sub:hello world}. The C++ code for this is shown in \lref{lst:hello-world-c-c}. This code tells the computer to `write' the text \emph{Hello World!} to the Terminal, and follow it with a new `line'. Do the following to create this program for yourself, see the notes below for hints:
\begin{enumerate}
\item Open a Terminal
\item Navigate to where you want to save your code, and create a new folder using \texttt{mkdir} and the project name (without spaces).
\item Move into the project folder using the \texttt{cd} command.
\item Use \texttt{skm} to create a new C++ project.
\item Open Visual Studio Code, and open the \textbf{folder} that contains your project code.
\item Type\footnote{Do not just copy and paste it out of the text, type it in yourself as this will help you learn the concepts being covered.} in the text below, making sure you get every character correct.
\item Compile the program using \texttt{skm clang++ program.cpp -o HelloWorld}
\item Run the program using \texttt{./HelloWorld}
\end{enumerate}
Well done, you have now created and run your first C++ program!
\csection
{
\ccode{lst:hello-world-c-c}{Hello World code in C++.}{code/c/program-creation/hello-world.c}
}
\mynote{
\begin{itemize}
\item See \sref{subs:install} \nameref{subs:install} for details on installing the tools you need.
\item See \nameref{ssub:bash} in \sref{sub:terminal} for an example of how to use the Terminal.
\item See \sref{sec:using_these_concepts_compiling_a_program} \nameref{sec:using_these_concepts_compiling_a_program} for the overall process and the output you should expect from the program.
\end{itemize}
}
% subsection hello_world_in_c (end) | {
"alphanum_fraction": 0.7597597598,
"avg_line_length": 58.7647058824,
"ext": "tex",
"hexsha": "041d6f668f3aaf16ef384407cfb77a638b9c7710",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2022-03-24T07:42:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-02T03:18:37.000Z",
"max_forks_repo_head_hexsha": "8f3040983d420129f90bcc4bd69a96d8743c412c",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "macite/programming-arcana",
"max_forks_repo_path": "topics/programs-and-compilers/c/HelloWorldC.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07",
"max_issues_repo_issues_event_max_datetime": "2021-12-29T19:45:10.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-29T19:45:10.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "thoth-tech/programming-arcana",
"max_issues_repo_path": "topics/programs-and-compilers/c/HelloWorldC.tex",
"max_line_length": 407,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "thoth-tech/programming-arcana",
"max_stars_repo_path": "topics/programs-and-compilers/c/HelloWorldC.tex",
"max_stars_repo_stars_event_max_datetime": "2021-08-10T04:50:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-10T04:50:54.000Z",
"num_tokens": 533,
"size": 1998
} |
\documentclass[12pt, a4paper]{article} % book, report, article, letter, slides
% letterpaper/a4paper, 10pt/11pt/12pt, twocolumn/twoside/landscape/draft
%%%%%%%%%%%%%%%% PACKAGES %%%%%%%%%%%%%%%%%%%%%
\usepackage[utf8]{inputenc} % encoding
\usepackage[english]{babel} % use special characters and also translates some elements within the document.
\usepackage{amsmath} % Math
\usepackage{amsthm} % Math, \newtheorem, \proof, etc
\usepackage{amssymb} % Math, extended collection
\usepackage{bm} % $\bm{D + C}$
\newtheorem{theorem}{Theorem}[section] % \begin{theorem}\label{t:label} \end{theorem}<Paste>
\newtheorem{corollary}{Corollary}[theorem]
\newtheorem{lemma}[theorem]{Lemma}
\newenvironment{claim}[1]{\par\noindent\underline{Claim:}\space#1}{}
\newenvironment{claimproof}[1]{\par\noindent\underline{Proof:}\space#1}{\hfill $\blacksquare$}
\usepackage{hyperref} % Hyperlinks \url{url} or \href{url}{name}
\usepackage{parskip} % \par starts on left (not idented)
\usepackage{abstract} % Abstract
\usepackage{graphicx} % Images
\graphicspath{{./images/}}
\usepackage[vlined,ruled]{algorithm2e} % pseudo-code
% \usepackage[document]{ragged2e} % Left-aligned (whole document)
% \begin{...} ... \end{...} flushleft, flushright, center
%%%%%%%%%%%%%%%% CODE %%%%%%%%%%%%%%%%%%%%%
\usepackage{minted} % Code listing
% \mint{html}|<h2>Something <b>here</b></h2>|
% \inputminted{octave}{BitXorMatrix.m}
%\begin{listing}[H]
%\begin{minted}[xleftmargin=20pt,linenos,bgcolor=codegray]{haskell}
%\end{minted}
%\caption{Example of a listing.}
%\label{lst:example} % You can reference it by \ref{lst:example}
%\end{listing}
\newcommand{\code}[1]{\texttt{#1}} % Define \code{foo.hs} environment
\newcommand{\haskell}[1]{\mintinline{haskell}{#1}}
%%%%%%%%%%%%%%%% COLOURS %%%%%%%%%%%%%%%%%%%%%
\usepackage{xcolor} % Colours \definecolor, \color{codegray}
\definecolor{codegray}{rgb}{0.9, 0.9, 0.9}
% \color{codegray} ... ...
% \textcolor{red}{easily}
%%%%%%%%%%%%%%%% CONFIG %%%%%%%%%%%%%%%%%%%%%
\renewcommand{\absnamepos}{flushleft}
\setlength{\absleftindent}{0pt}
\setlength{\absrightindent}{0pt}
%%%%%%%%%%%%%%%% GLOSSARIES %%%%%%%%%%%%%%%%%%%%%
%\usepackage{glossaries}
%\makeglossaries % before entries
%\newglossaryentry{latex}{
%name=latex,
%description={Is a mark up language specially suited
%for scientific documents}
%}
% Referene to a glossary \gls{latex}
% Print glossaries \printglossaries
\usepackage[acronym]{glossaries} %
% \acrshort{name}
% \acrfull{name}
%\newacronym{kcol}{$k$-COL}{$k$-coloring problem}
\usepackage{enumitem}
%%%%%%%%%%%%%%%% HEADER %%%%%%%%%%%%%%%%%%%%%
\usepackage{fancyhdr}
\pagestyle{fancy}
\fancyhf{}
\rhead{Arnau Abella}
\lhead{Advanced Data Structures}
\rfoot{Page \thepage}
%%%%%%%%%%%%%%%% TITLE %%%%%%%%%%%%%%%%%%%%%
\title{%
Purely Functional Data Structures\\
%by Chris Okasaki\\
\large{Advanced Data Structures \\ Final Work}
}
\author{%
Arnau Abella \\
\large{Universitat Polit\`ecnica de Catalunya}
}
\date{\today}
%%%%%%%%%%%%%%%% DOCUMENT %%%%%%%%%%%%%%%%%%%%%
\begin{document}
\maketitle
%%%%%%%%%%%%%%%%%%%%%%%
\section{Summary of the paper}%
\label{sec:Summary of the paper}
The chosen paper, in fact a book, is \mbox{\textit{Purely Functional Data Structures} \cite{oka98}}.
The book has around 300 pages, 11 chapters and covers a wide variety of contents which, most of them, can't be given without previous content. For this reason, this work will focus only in chapter 5,6 and 7, which are, in my opinion, the most relevant introductory chapters. The list topics presented in these chapters are the following:
\begin{itemize}
\item Chapter 5. Fundamentals of Amortization
\item Chapter 6. Amortization and Persistence via Lazy Evaluation
\item Chapter 7. Eliminating Amortization
\end{itemize}
I'd love to talk about all different data structures mentioned in the book such as Binomial Heaps, Splay Heaps, Pairing Heaps, Bottom-Up Merge with Sharing, Hood-Melville Real-Time Queues, Binary Random-Access Lists, Skew Binomial Heaps, etc., and also about more advanced topics like \textit{lazy rebuilding}, \textit{numerical representations}, \textit{data-strutural bootstrapping}, \textit{implicit recursive slowdown}, etc. but, for today, we will work those 3 previously mentioned chapters and focus on a very simple and educative data structure, a \textit{Queue}.
\subsection{Fundamentals of Amortization}%
\label{sub:Fundamentals of Amortization}
Given a sequence of operations, we may wish to know the running time of the entire sequence, but not care about the running time of any individual operation. Given a sequence of $n$ operations, we wish to bound the total running time of the sequence by $O(n)$.
To prove an amortized bound, one defines the amortized cost of each operation and then proves that, for any sequence of operations, the total amortized cost of the operations is an upper on the total actual cost, i.e.,
\begin{equation}%
\label{amortized cost formula}
\sum_{i=1}^m a_i \geq \sum_{i=1}^m t_i
\end{equation}
where $a_i$ is the amortized cost of operation $i$, $t_i$ is the actual cost of operation $i$, and $m$ is the total number of operations.
%The difference between the accumulated amortized costs and the accumulated actual costs is called the \textit{accumulated savings}.
%Amortization allows for occasional operations to have actual costs that exceed their amortized costs. Such operations are called \textit{expensive}. Operations whose actual costs are less than their amortized costs are called \textit{cheap}.
The key to proving amortized bounds is to show that expensive operations occur only when the accumulated savings are sufficient to cover the remaining cost.
Tarjan \cite{tar85} describes two techniques for analyzing amortized data structures: the \textit{banker's method} and the \textit{physicist's method}.
In the \textit{banker's method}, the accumulated savings are represented as \textit{credits} that are associated with individual locations in the data structure. The amortized cost of any operation is defined to be the actual cost of the operation plus the credits allocated by the operation minus the credits spent by the operation, i.e.
\begin{equation}%
\label{banker's method equation}
a_i = t_i + c_i - \bar{c_i}
\end{equation}
where $c_i$ is the number of credits allocated by the operation $i$ and $\bar{c_i}$ is the number of credits spent by operation $i$. Every credit must be allocated before it is spent, and no credit may be spent more than once. Therefore, $\sum c_i \geq \sum \bar{c_i}$, which in turn guarantees that $\sum a_i \geq \sum t_i$. Proofs using the banker's method typically define a \textit{credit invariant} that regulates the distribution of credits in such a way that, whenever an expensive operation might occur, sufficient credits have been allocated in the right locations to cover its cost.
In the \textit{physicist's method}, one describes a function $\Phi$ that maps each object $d$ to a real number called the \textit{potential} of $d$. The function $\Phi$ is tipically chosen so that the potential is initially zero and is always non-negative. Then, the potential represents a lower bound on the accumulated savings.
Let $d_i$ be the output of operation $i$ and the input of operation $i+1$. Then, the amortized cost of operation $i$ is defined to be the actual cost plus the change in potential between $d_{i-1}$ and $d_i$, i.e.,
\begin{equation}%
\label{physicist's method equation}
a_i = t_i + \Phi(d_i) - \Phi(d_{i-1})
\end{equation}
The accumulated actual cost of the sequence of operations are
\begin{align*}%
\label{physicist's cost}
\sum_{i=1}^{j} t_i &= \sum_{i=1}^{j} (a_i + \Phi(d_{i-1}) - \Phi(d_i)) \\
&= \sum_{i=1}^{j} a_i + \sum_{i=1}^{j} (\Phi(d_{i-1}) - \Phi(d_i)) \\
&= \sum_{i=1}^{j} a_i + \Phi(d_0) - \Phi(d_j)
\end{align*}
%We can convert the banker's method to the pysicist's method by ignoring locations and taking the potential to be the total number of credits in the object, as indicated by the credit invariant. Similarly, we can convert the physicist's method to the banker's method by converting the potential to credits, and placing all credits on the root.
\subsubsection{Queues}%
\label{subsub:Queues}
We next illustrate the banker's and pysicist's methods by analyzing a simple functional implementation of the FIFO queue abstraction (see Listing \ref{lst:queue}).
Both \code{snoc} and \code{head} run in $O(1)$ worst-case time, but \code{tail} takes $O(n)$ time in the worst-case. However, we can show that \code{snoc} and \code{tail} both take $O(1)$ amortized time using either the banker's method or the physicist's method.
Using the banker's method, we maintain a credit invariant that every element in the rear list is associated with a single credit. Every \code{snoc} into a non-empty queue takes one actual step, and allocates a credit to the new element of the rear list, for an amortized cost of two. Every \code{tail} that does not reverse the rear list takes one actual step and neither allocates nor spends any credits, for an amortized cost of one. Finally, every \code{tail} that does reverse the rear list takes $m + 1$ actual steps, where $m$ is the length of the rear list, and spends the $m$ credits contained by that list, for an amortized cost of $m + 1 - m = 1$.
\begin{listing}[H]
\inputminted[breaklines=true]{haskell}{../../Chapter5/BatchedQueue.hs}
\caption{Functional Queue}
\label{lst:queue}
\end{listing}
Using the physicist's method, we define the potential function $\Phi$ to be the length of the rear list. Then every \code{snoc} into a non-empty queue takes one actual step and increases the potential by one, for an amortized cost of two. Every \code{tail} that does not reverse the rear list takes one actual step and leaves the potential unchanged, for an amortized cost of one. Finally, every \code{tail} that does reverse the rear list takes $m+1$ actual steps and sets the new rear list to \code{[]}, decreasing the potential by $m$, for an amortized cost of $m + 1 - m = 1$.
%%%%%%%%%%%% Amortization and Persistence via Lazy Evaluation}
\subsection{Amortization and Persistence via Lazy Evaluation}%
\label{sub:Amortization and Persistence via Lazy Evaluation}
The amortized bounds break in the presence of persistence. In this chapter, we demonstrate how lazy evaluation can mediate the conflict between amortization and persistence, and adapt both the banker's and physicit's methods to account for lazy evaluation. We then illustrate the use of these new methods on the well-known \textit{Queue}.
\subsubsection{Execution Traces and Logical Time}%
Traditional methods of amortization break in the presence of persistence because they assume a unique future, in which the accumulated savings will be spent at most once. However, with persistence, multiple logical futures might all try to spend the same savings.
We model logical time with \textit{execution traces}, which give an abstract view of the history of computation. An \textit{execution trace} is a directed graph whose nodes represent operations of interest, usually just update operations on the data type in question. An edge from $v$ to $v'$ indicates that the operation $v'$ uses some result of operation $v$. The \textit{logical history} of operation $v$, denote $\hat{v}$, is the set of all operation on which the result of $v$ depends (including $v$ itself). In other words, $\hat{v}$ is the set of all nodes $w$ such that there exists a path from $w$ to $v$. A \textit{logical future} of node $v$ is any path from $v$ to a terminal node. If there is more than one such path, then the node $v$ has multiple logical futures.
Execution traces generalize the notion of \textit{version graphs} \cite{dsst89}, which are often used to model the histories of persistent data structures.
\subsubsection{Reconciling Amortization and Persistence}%
In this section, we show how the banker's and physicist's methods can be repaired by replacing the notion of accumulated savings with accumulated debt, where debt measure the cost of unevaluated lazy computations.
We must finde a way to guarantee that if the first application of $f$ to $x$ is expensive, then subsequent applications of $f$ to $x$ will not be. Without side-effects, this is impossible under \mbox{\textit{call-by-value}} (i.e., strict evaluation) or \mbox{\textit{call-by-name}} (i.e., lazy evaluation without memoization). Therefore, amortization cannot be usefully combined with persistence in languages supporting only these evaluation orders.
But now consider \mbox{\textit{call-by-need}} (i.e., lazy evaluation with memoization). If $x$ contains some suspended component that is needed by $f$, then the first application of $f$ to $x$ forces the (potentially expensive) evaluation of that component and memoizes the result. Subsequent operations may then access the memoized result directly. This is exactly the desired behavior!
\subsubsection{A Framework for Analyzing Lazy Data Structures}%
\label{ssub:A Framework for Analyzing Lazy Data Structures}
Historically, the most common technique for analyzing lazy programs has been to pretend that they are actually strict. We next describe a basic framework to support such analysis. In the remainder of this chapter, we adapt the banker's and physicist's methods to this framework, yielding both the first techniques for analyzing persistent amortized data structures and the first practical techniques for analysing non-trivial lazy programs.
We classify the costs of any given operation into several categories. The \textit{unshared cost} of an operation is the actual time it would take to execute the operation under the assumption that every suspension in the system at the beginning of the operation has already been forced and memoized. The \textit{shared cost} of an operation is the time that it would take to execte every suspension created but not evaluated by the operation. The \textit{complete cost} of an operation is the sum of its shared and unshared costs. Note that the complete cost is what the actual cost of the operation would be if lazy evaluation were replaced with strict evaluation.
\textit{Realized costs} are the shared costs for suspensions that are executed during the overall computation.
\textit{Unrealized costs} are the shared costs for suspensions that are never executed.
The \textit{total actual cost} of a sequence of operations is the sum of the unshared costs and the realized shared costs.
We account for shared costs using the notion of \textit{accumulated debt}. Initially, the debt is zero, but every time a suspension is created, we increase the accumulated debt by the shared cost of the suspension (and any nested suspensions). Each operation then pays off a portion of the accumulated debt. The \textit{amortized cost} of an operation is the unshared cost of the operation plus the amount of accumulated debt paid off by the operation. We are not allowed to force a suspensions until the debt associated with the suspension is entirely paid off.
We avoid the problem of reasoning about multiple logical futures by reasoning each logical future \textit{as if it were the only one}. From the point of view of the operation that creates a suspension, any logical future that forces the suspension must itself pay for the suspension. Using this method, we sometimes pays off a debt more than once, thereby overestimating the total time required for a particular computation, but this does no harm and is a small price to pay for the simplicity of the resulting analyses.
\subsubsection{The Banker's Method}%
\label{ssub:The Banker's Method}
We adapt the banker's method to account for accumulated debt rather than accumulated savings by replacing credits with debits. Each debit represents a constant amount of suspended work. When we initially suspend a given computation, we create a number of debits proportional to its shared cost and associate each debit with a location in the object. If the computation is \textit{monolithic} (i.e., once begun, it runs to completition), then all debits are usually assigned to the root of the result. On the other hand, if the computation is \textit{incremental} (i.e., decomposable into fragments that may be executed independently), then the debits may be distributed among the roots of the partial results.
The amortized cost of an operation is the unshared cost of the operation plus the number of debits discharged by the operation. Note that the number of debits created by an operation is \textit{not} included in its amortized cost. To prove an amortized bound, we must show that, whenever we access a location (possibly triggering the execution of a suspension), all debits associated with that location have already been discharged. Debits leftover at the end of the computation correspond to unrealized shared costs, and are irrelevant to the total actual cost.
\textit{Incremental} functions play an important role in the banker's method because they allow debits to be dispersed to different locations in a data structure. Then, each location can be accessed as soon as its debits are discharged, without waiting for the debits at other locations to be discharged. In practice, this means that the initial partial result of an incremental computation can be paid for very quickly, and that subsequent partial results may be paid for as they are needed. Monolithic functions, on the other hand, are much less flexible.
The proof of this method is omitted for brevity.
\subsubsection{Banker's Queues}%
\label{ssub:Banker's Queues}
We next develop an efficient persistent implementation of queues, and prove that every operation runs in $O(1)$ amortied time using the banker's method.
Now, waiting until the front list becomes empty to reverse the rear list does not leave sufficient time to pay for the reverse. Instead, we periodically \textit{rotate} the queue by moving all the elements of the rear stream to the end of the front stream. When should we rotate the queue ? Recall that \code{reverse} is a monolithic function. We must therefore set up the computation far enough in advance to be able to discharge all its debits by the time its result is needed. The \code{reverse} computation takes $|r|$ steps, so we allocate $|r|$ debits to account for its cost. The earliest the reverse suspensions could be forced is after $|f|$ applications of \code{tail}, so if we rotate the queue when $|r| \approx |f|$ and discharge one debit per operation, then we will have paid for the reverse by the time it is executed. In fact, we rotate the queue whenever $r$ becomes one longer than $f$, thereby maintaining the invariant that $|f| \geq |r|$. Incidentally, this guarantees that $f$ is empty only if $r$ is also empty.
The complete code for this implementation appears in Listing \ref{lst:banker's queue}.
To understand how this implementation deals efficiently with persistence, consider the following scenario. Let $q_0$ be some queue whose front and rear streams are both of length $m$, and let $q_i = tail(q_{i-1})$, for $0 < i \leq m + 1$. The queue is rotated during the first application of \code{tail}, and the reverse suspension is created by the rotation is forced during the last application of \code{tail}. This reversal takes $m$ steps, and its cost is amortized over the sequence $q_1 \ldots q_m$.
Now, choose some branch point $k$, and repeat the calculation from $q_k$ to $q_{m+1}$. Do this $d$ times. How often is the reverse executed? It depends on whether the branch point $k$ is before or after the rotation. Suppose $k$ is after the rotation. In fact, suppose $k = m$ so that each of the repeated branches is a single \code{tail}. Each of these branches forces the reverse suspension, but they each force the \textit{same} suspension, so the reverse is executed only once. Memoization is crucal here --- without memoization, the reverse would be re-executed each time, for a total cost of ${m(d+1)}$ steps, with only ${m + 1 + d}$ operations over which to amortize this cost. Memoization gives us an amortized cost of only $O(1)$ per operation.
It is possible to re-execute the reverse however. Simply take $k = 0$. Then the first \code{tail} of each branch repeats the rotation and creates a new reverse suspension. This new suspenson is forced in the last tail of each branch, executing the reverse. Because these are different suspensions, memoization does not help at all. The total cost of all the reversals is $m \cdot d$, but now we have ${(m + 1)(d + 1)}$ operations over which to amortize this cost, again yielding an amortized cost of $O(1)$ per operation.
\begin{listing}[H]
\inputminted[breaklines=true]{haskell}{../../Chapter6/BankersQueue.hs}
\caption{Banker's Queue}
\label{lst:banker's queue}
\end{listing}
By inspection, the unshared cost of every queue operation is $O(1)$. Therefore, to show that the amortized cost of every queue operation is $O(1)$, we must prove that discharging $O(1)$ debits per operation suffices to pay off every suspension before it is forced. In fact, only \code{snoc} and \code{tail} discharge any debits.
\newpage
Let $d(i)$ be the number of debits on the $i$th node of the front stream and let $D(i) = \sum_{j = 0}^{i} d(j)$ be the cumulative number of debits on all nodes up to and including the $i$th node. We maintain the following \textit{debit invariant}:
\[
D(i) \leq min(2i, |f| - |r|)
\]
The $2i$ term guarantees that all debits on the first node of the front stream have been discharged (since $d(0) = D(0) \leq 2 \cdot 0 = 0$). The $|f| - |r|$ term guarantees that all debits in the entire queue have been discharged whenever the streams are of equal length, which happens just before the next rotation.
\begin{theorem}
\code{snoc} and \code{tail} maintain the debit invariant by discharging one and two debits, respectively.
\end{theorem}
\begin{proof}
\textit{Purely Functional Data Structures, pg.66}.
\end{proof}
\subsubsection{The Physicist's Method}%
Like the banker's method, the physicist's method can also be adapted to work with accumulated debt rather than accumulated savings. In the traditional physicist's method, one describes a potential function $\Phi$ that represents a lower bound on the accumulated savings. To work with debt instead of savings, we replace $\Phi$ with a function $\Psi$ that maps each object to a potential representing an upper bound on the accumulated debt. Roughly speaking, the amortized cost of an operation is then the complete cost of the operation (i.e., the shared and unshared cost) minus the change in potential. Recall that an easy way to calculate the complete cost of an operation is to pretend that all computation is strict.
This method is explored further in the book using as an example \textit{Binomial Heaps}, \textit{Physicist's Queues}, \textit{Bottom-Up Mergesort with Sharing} and \textit{Lazy Pairing Heaps} which is omitted in this document for brevity.
The complete code for \textit{Phisicist's Queues} appears in Listing \ref{lst:physicist's queue}.
\begin{listing}[H]
\inputminted[breaklines=true]{haskell}{../../Chapter6/PhysicistQueue.hs}
\caption{Physicist's Queue}
\label{lst:physicist's queue}
\end{listing}
%%%%%%%%%%%% Eliminating Amortization
\subsection{Eliminating Amortization}%
\label{sub:Eliminating Amortization}
In some application areas, such as \textit{real-time systems} or \textit{interactive systems}, it is important to bound the running times of individual operations, rather than the sequences of operations. In these situations, a worst-case data structure will often be preferable to an amortized data structure, even if the amortized data structure is simpler and faster overall.
\subsubsection{Scheduling}%
\label{ssub:Scheduling}
Amortized and worst-case data structures differ mainly in when the computations charged to a given operation occur. In a worst-case data structure, all computations charged to an operation occur during the operation. In an amortized data structure, some computations charged to an operation may actually occur during later operations.
In a lazy amortized data structure, any operation might take longer than the stated bounds. However, this only occurs when the operation forces a suspension that has been paid off, but that takes a long time to execute. To achieve worst-case bounds, we must guarantee that every suspension executes in no more than the allotted time.
Define the \textit{intrinsic cost} of a suspension to be the amount of time it takes to force the suspension under the assumption that all other suspensions on which it depends have already been forced and memoized, and therefore each take only $O(1)$ time to execute.
The first step in converting an amortized data structure to a worst-case data structure is to \textit{reduce the intrinsic cost of every suspension to less than the desired bounds}. Usually, this involves rewriting expensive monolithic functions to make them incremental, either by changing the underlying algorithms slightly or by switching from a representation that supports only monolithic functions, such as suspended lists, to one that supports incremental functions as well, such as streams.
Even if every suspension has a small intrinsic cost, some suspensions might still take longer than the allotted time to execute. This happens when one suspension depends on another suspension, which in turn depends on a third, and so on. If none of the suspensions have been executed previously, then forcing the first suspension results in a cascade of forces.
The second step in converting an amortized data structure to a worst-case data structure is to \textit{avoid cascading forces by arranging that, whenever we force a suspension, any other suspensions on which it depends have already been forced and memoized}. Then, no suspension takes longer than its intrinsic cost to execute. We accomplish this by systematically \textit{scheduling} the execution of each suspension so that each is ready by the time we need it. The trick is to regard paying off debt as a literal activity, and to force each suspension as it is paid for.
We extends every object with an extra component, called the \textit{schedule}, that, at least conceptually, contains a pointer to every unevaluated suspension in the object. Some of the suspensions in the schedule may have already been evaluated in a different logical future, but forcing these suspensions a second time does no harm since it can only make an algorithm run faster than expected, not slower. Every operation, in addition to whatever other manipulations it performs on an object, forces the first few suspensions in the schedule. The exact number of suspensions forced is governed by the amortized analysis; typically, every suspension takes $O(1)$ time to execute, so we force a number of suspensions proportional to the amortized cost of the operation. Depending on the data structure, maintaining the schedule can be non-trivial. For this technique to apply, adding a new suspension to the schedule, or retrieving the next suspension to be forced, cannot require more time than the desired worst-case bounds.
\subsubsection{Real-Time Queue}%
\label{ssub:Real-Time Queue}
As an example of this technique, we convert the amortized banker's queues of section \ref{ssub:Banker's Queues} to worst-case queues. Queues such as these that support all operations in $O(1)$ worst-case time are called \textit{real-time queues} \cite{hm81}.
In the original data structure, queues are rotated using \haskell{++} and \code{reverse}. Since reverse is monolithic, our first task is finding a way to perform rotations incrementally. This can be done by executing one step of the reverse for every step of the \haskell{++}. We define a function \code{rotate} such that
\begin{minted}[escapeinside=||,mathescape=true]{haskell}
rotate xs ys a |$\equiv$| xs ++ reverse ys ++ a
\end{minted}
Then
\begin{minted}[escapeinside=||,mathescape=true]{haskell}
rotate f r [] |$\equiv$| f ++ reverse r
\end{minted}
The extra argument, $a$, is called an \textit{accumulating parameter} and is used to accumulate the partial results of reversing $ys$. It is initially empty.
\newpage
Rotations occur when $|r| = |f| + 1$, so initially $|ys| = |xs| + 1$. This relationship is preserved throughout the rotation, so when $xs$ is empty, $ys$ contains a single element. The base case is therefore
\begin{minted}[escapeinside=||,mathescape=true]{haskell}
rotate [] (y : []) a
|$\equiv$| [] ++ reverse (y : []) ++ a
|$\equiv$| y : a
\end{minted}
In the recursive case,
\begin{minted}[escapeinside=||,mathescape=true]{haskell}
rotate (x:xs) (y:ys) a
|$\equiv$| (x:xs) ++ reverse (y:ys) ++ a
|$\equiv$| x : (xs ++ reverse (y:ys) ++ a)
|$\equiv$| x : (xs ++ reverse ys ++ (y:a))
|$\equiv$| x : (rotate xs ys (y:a))
\end{minted}
Puttng these cases together, we get
\begin{minted}[breaklines=true]{haskell}
rotate' :: [a] -> List a -> [a] -> [a]
rotate' [] (Cons y _) a = y : a
rotate' (x:xs) (Cons y ys) a = x : rotate' xs ys (y:a)
\end{minted}
Note that the intrinsic cost of every suspension created by rotate is $O(1)$.
Next, we add a schedule to the datatype. The original datatype was
\begin{minted}[breaklines=true]{haskell}
data Queue a = Queue Int [a] Int [a]
\end{minted}
We extend this type with a new field $s$ of type \code{[a]} that represents a \textit{schedule} for forcing the nodes of $f$. Besides adding $s$, we make two further changes to the datatype. First, to emphasize the fact that the nodes of $r$ need not be scheduled, we change $r$ from a stream to a list. Second, we eliminate the length fields. We no longer need the length fields to determine when $r$ becomes longer than $f$ --- instead, we can obtain this information from the schedule. The new datatype is thus
\begin{minted}[breaklines=true]{haskell}
data Queue a = Queue [a] (List a) [a]
\end{minted}
With this representation, the major queue functions are simply
\begin{minted}[breaklines=true]{haskell}
snoc :: a -> Queue a -> Queue a
snoc x (Queue f r s) = let r' = Cons x r in exec f r' s
head :: Queue a -> a
head (Queue (x:_) _ _) = x
tail :: Queue a -> Queue a
tail (Queue (_:f) r s) = exec f r s
\end{minted}
The helper function \code{exec} executes the next suspension in the schedule and maintains the invariant that $|s| = |f| - |r|$ (which incidentally guarantees that $|f| \geq |r|$ since $|s|$ cannot be negative). \code{snoc} increases $|r|$ by one and \code{tail} decreases $|f|$ by one, so when \code{exec} is called, $|s| = |f| - |r| + 1$. If $s$ is non-empty, then we restore the invariant simply by taking the tail of $s$. If $s$ is empty, then $r$ is one longer than $f$, so we rotate the queue. In either case, the very act of pattern matching against $s$ to determine whether or not it is empty forces and memoizes the next suspension in the schedule.
\begin{minted}[breaklines=true]{haskell}
exec :: [a] -> List a -> [a] -> Queue a
exec f r (x:s) = x `seq` Queue f r s
exec f r [] = let f' = rotate f r in Queue f' Nil f'
\end{minted}
The complete code for this implementation appears in Listing \ref{lst:Real-Time Queue}
By inspection, every queue operation does only $O(1)$ work outside of forcing suspensions, and no operation forces more than three suspensions. Hence, to show that all queues operations run in $O(1)$ worst-case time, we must prove that no suspension takes more than $O(1)$ time to execute.
Only three forms of suspensions are created by the various queues functions.
\begin{itemize}
\item \code{Nil} is created by \code{empty} and \code{exec}. This suspension is trivial and therefore executes in $O(1)$.
\item \code{y:a} is creted in both lines of \code{rotate} and is also trivial.
\item \code{x:rotate xs ys (y:a)} is created in the second line of \code{rotate}. This suspensions allocates a \code{Cons}, builds a new suspension, and makes a recursive call to \code{rotate}, which pattern matches agaist the first node in $xs$ and immediately creates another suspension. Of these actions, only the force inherent in the pattern match has even the possibility of taking more than $O(1)$ time. But note that $xs$ is a suffix of the front stream that existed just before the previous rotation. The treatment of the schedule $s$ guarantees that \textit{every} node in that stream was forced and memoized prior to the rotation, so forcing this node again takes only $O(1)$ time.
\end{itemize}
Since every suspension executes in $O(1)$ time, every queue operation runs in $O(1)$ worst-case time.
\begin{listing}[H]
\inputminted[breaklines=true,fontsize=\footnotesize]{haskell}{../../Chapter7/RealTimeQueue.hs}
\caption{Real-Time Queue}
\label{lst:Real-Time Queue}
\end{listing}
%%%%%%%%%%%% Why the paper is relevant %%%%%%%%%%%
\section{The importance of Okasaki's work}%
\label{sec:importance}
Historically, the most common technique for analyzing lazy programs has been to pretend that they are actually strict.
C. Okasaki yields both the first techniques for analyzing persistent amortized data structures and the first practical techniques for analyzing non-trivial lazy programs.
\textit{Purely Functional Data Structures} also present new designing techniques of functional data structures such as \textit{lazy rebuild}, a variant of \textit{global rebuilding} \cite{ove83}, \textit{numerical represetation} for functional data structures, \textit{data-structural bootstrapping} for functional data structures, and a framework called \textit{implicit recursive slowdown}, a variant of \textit{recursive slowdown}, that is based on lazy binary numbers instead of segmented binary numbers.
%%%%%%%%%%%% Experiment and results
\section{Experiment \& Results}%
\label{sec:experiment}
In order to empirically proof that the implementation from Real-Time Queues of section \ref{ssub:Real-Time Queue} runs in worst-case $O(1)$ time, we are going to implement the following experiment design:
\begin{enumerate}
\item Build a \textit{Real-Time Queue} with a random permutation of operations (to prevent speculation of the performance) of size $n = \{ 1000, 10000, 100000 \}$.
\item For each queue, analyze the performance of a sequence of \code{snoc/tail} operations.
\item Evaluate if all operations are executed in constant time, independently of the size or the order of operations.
\end{enumerate}
Benchmarking a lazy programming language is an arduous task and it is easy to end up not measuring the right thing. For this reason, the implemenation of the experiment uses the following frameworks:
\begin{itemize}
\item \textit{Criterion}: provides both a framework for executing and analysing benchmarks and a set of driver functions that makes it easy to build and run benchmarks, and to analyse their results.
\item \textit{QuickCheck}: is a library for random testing of program properties.
\end{itemize}
The complete code for this experiment appears in Listing \ref{lst:bench-impl}.
\begin{listing}[H]
\inputminted[breaklines=true,fontsize=\scriptsize]{haskell}{../../Experiments/RealTimeQueues/Main.hs}
\caption{Benchmark implementation}
\label{lst:bench-impl}
\end{listing}
\begin{figure}[H]
\centering
\includegraphics[scale=0.7]{experiment}
\caption{Benchmarks results}
\label{fig:experiment results}
\end{figure}
The results of the experiment appear in Figure \ref{fig:experiment results}.
The \textit{standard deviation} is low which is a good indicator that the values are near the mean of the set.
The \textit{real} execution time of these operations is inside the \textit{confidence interval}. As the range is narrow, we can assume that the values are significant.
Furthemore, the \textit{coefficient of determination} ($R^2$) is near $1$ which means that the observed outcomes replicate well the model with respect to the total variation.
\newpage
The results display similar \textit{mean} execution time for all operations independently of the size of the queue and the order of the operations.
Applying a \textit{Student's t-test} we can determine if all the operations do follow the same execution time.
From the previous results, we can conclude that the functional \textit{Real-Time Queues} from section \ref{ssub:Real-Time Queue} has a $O(1)$ worst-case running time.
%%%%%%%%%%%% Conclusion
\section{Conclusion}%
\label{sec:conclusion}
Reading \textit{Purely Functional Data Structures} has been very educative. The book is well structured and easy to follow. It has helped me to get a deeper understanding on how to design and implement purely functional data structures. And, I also learnt \textit{Standard M.L}, while reading the book, which is interesting from a programming language designer point of view.
Finally, it has helped me to get a better understanding of \textit{lazy evaluation} which, in conjunction with \textit{equational reasoning} and \textit{denotational semantics}, are still not very explored techniques.
%%%%%%%%%%%%%%%% BIBLIOGRAPHY %%%%%%%%%%%%%%%%%%%%%
\bibliographystyle{alpha}
\bibliography{refs}
\end{document}
| {
"alphanum_fraction": 0.7537472662,
"avg_line_length": 71.5534351145,
"ext": "tex",
"hexsha": "8e60380bd76df649c10571683752460c4f86e586",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0fedee3dcefd0f39b97e2538fe4048043a53ae26",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "monadplus/okasaki",
"max_forks_repo_path": "docs/final-work/report.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0fedee3dcefd0f39b97e2538fe4048043a53ae26",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "monadplus/okasaki",
"max_issues_repo_path": "docs/final-work/report.tex",
"max_line_length": 1035,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "0fedee3dcefd0f39b97e2538fe4048043a53ae26",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "monadplus/red-black-trees",
"max_stars_repo_path": "docs/final-work/report.tex",
"max_stars_repo_stars_event_max_datetime": "2020-12-11T21:04:42.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-11T21:04:42.000Z",
"num_tokens": 9246,
"size": 37494
} |
\documentclass[a4paper]{article}
%% Language and font encodings
\usepackage[english]{babel}
\usepackage[utf8x]{inputenc}
\usepackage[T1]{fontenc}
%% Sets page size and margins
\usepackage[a4paper,top=3cm,bottom=2cm,left=3cm,right=3cm,marginparwidth=1.75cm]{geometry}
%% Useful packages
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{graphicx}
\usepackage[colorinlistoftodos]{todonotes}
\usepackage[colorlinks=true, allcolors=blue]{hyperref}
\title{Judson's Abstract Algebra: Chapter 1}
\date{}
\begin{document}
\maketitle
\section*{1}
Suppose that
\begin{align*}
A &= \{ x : x \in \mathbb{N} \text{ and } x \text{ is even}\}, \\
B &= \{ x : x \in \mathbb{N} \text{ and } x \text{ is prime}\}, \\
C &= \{ x : x \in \mathbb{N} \text{ and } x \text{ is a multiple of 5}\}.
\end{align*}
Then
\begin{align*}
A \cap B &= \{ 2 \} \\
B \cap C &= \{ 5 \} \\
A \cup B &= \{ x : x \in \mathbb{N} \text{ and } x \text{ is even or } x \text{ is prime}\} \\
A \cap (B \cup C) &= \{ x : x \in \mathbb{N} \text{ and } x \text{ is a multiple of 10}\}
\end{align*}
\section*{2}
If $A = \{ a, b, c \}, B = \{ 1, 2, 3 \}, C = \{ x \}, D = \emptyset$ then
\begin{align*}
A \times B &= \{ (a,1), (a,2), (a,3), (b,1), (b,2), (b,3), (c,1), (c,2), (c,3)\} \\
B \times C &= \{ (1,a), (1,b), (1,c), (2,a), (2,b), (2,c), (3,a), (3,b), (3,c)\} \\
A \times B \times C &= \{ (a,1,x), (a,2,x), (a,3,x), (b,1,x), (b,2,x), (b,3,x), (c,1,x), (c,2,x), (c,3,x)\} \\
A \times D &= \emptyset.
\end{align*}
\section*{3}
Find an example of two nonempty sets $A$ and $B$ for which $A \times B = B \times A$.
Consider any nonempty set $A = B$.
\section*{4}
Prove $A \cup \emptyset = A$.
\vspace{\baselineskip}
By definition $A \cup \emptyset = \{ x : x \in A \lor x \in \emptyset \}$. Note that the second condition is always false and hence
$$A \cup \emptyset = \{ x : x \in A \lor x \in \emptyset \} = \{ x : x \in A \} = A.$$
Prove $A \cap \emptyset = \emptyset$.
\vspace{\baselineskip}
This is very similar to the previous proof. By definition $A \cap \emptyset = \{ x : x \in A \land x \in \emptyset \}$. Note that the second condition is always false and hence
$$A \cap \emptyset = \{ x : x \in A \land x \in \emptyset \} = \{ \} = \emptyset.$$
\section*{5}
Prove $A \cup B = B \cup A$.
\vspace{\baselineskip}
This follows directly from the definition
$$A \cup B = \{x : x \in A \lor x \in B\} = B \cup A.$$
Prove $A \cap B = B \cap A$.
\vspace{\baselineskip}
This follows directly from the definition
$$A \cap B = \{x : x \in A \land x \in B\} = B \cap A.$$
\section*{6}
Prove $A \cup (B \cap C) = (A \cup B) \cap (A \cup C)$.
\vspace{\baselineskip}
Let $x \in A \cup (B \cap C)$ then $x \in A$ or $x \in B \cap C$. If $x \in A$ then $x \in (A \cup B) \cap (A \cup C)$. If $x \in B \cap C$ then $x \in (A \cup B) \cap (A \cup C)$. Hence $A \cup (B \cap C) \subset (A \cup B) \cap (A \cup C)$.
\vspace{\baselineskip}
Let $x \in (A \cup B) \cap (A \cup C)$. Then $x \in A \cup B$ and $x \in A \cup C$. If $x \in A$, then clearly $x \in A \cup (B \cap C)$. If $x \not\in A$ then $x \in B$ and $x \in C$ hence $x \in A \cap B$. From these two facts we have $(A \cup B) \cap (A \cup C) \subset A \cup (B \cap C)$.
\section*{7}
Prove $A \cap (B \cup C) = (A \cap B) \cup (A \cap C)$.
\vspace{\baselineskip}
Let $x \in A \cap (B \cup C)$. Then $x\in A$ and $x \in B \cup C$. There are two cases. If $x \in B$ then $x \in A \cap B$ by definition. Similarly, if $x \in C$ then $x \in A \cap C$. Hence, by definition, $x \in (A \cap B) \cup (A \cap C)$. This shows $A \cap (B \cup C) \subset (A \cap B) \cup (A \cap C)$.
\vspace{\baselineskip}
Let $x \in (A \cap B) \cup (A \cap C)$. There are two cases. If $x \in A \cap B$ then clearly $x \in A \cap (B \cup C)$. Similarly, if $x \in A \cap C$ then clearly $x \in A \cap (B \cup C)$. This shows $(A \cap B) \cup (A \cap C) \subset A \cap (B \cup C)$. This proves $A \cap (B \cup C) = (A \cap B) \cup (A \cap C)$.
\section*{8}
Prove $A \subset B$ if and only if $A \cap B = A$.
\vspace{\baselineskip}
Assume $A \cap B = A$. Then for all $a \in A$ it is true that $a \in A \cap B$ and thus $a \in B$. Hence $A \subset B$.
\vspace{\baselineskip}
Assume $A \subset B$. Then for all $a \in A$ it is true that $a \in B$. Since $a \in A$ and $a \in B$ we know $A \subset A \cap B$. Clearly $A \cap B \subset A$, therefore $A \cap B = A$.
\section*{9}
Prove $(A \cap B)' = A' \cup B'$.
\vspace{\baselineskip}
Let $x \in (A \cap B)'$. Then $x \not \in (A \cap B)$. By the definition of intersection either $x \not \in A$ or $x \not \in B$ so $x \in A'$ or $x \in B'$. By definition of union $x \in A' \cup B'$. This shows $(A \cap B)' \subseteq A' \cup B'$.
\vspace{\baselineskip}
Let $x \in A' \cup B'$. Then $x \in A'$ or $x \in B'$ by the definition of union. By the definition of the complement $x \not \in A$ or $x \not \in B$ which implies that $x \not \in A \cap B$. Then, by the definition of the complement, $x \in (A \cap B)'$. This shows $A' \cup B' \subseteq (A \cap B)'$. Hence $(A \cap B)' = A' \cup B'$.
\section*{10}
\section*{11}
\section*{12}
Prove $(A \cap B) \setminus B = \emptyset$.
\vspace{\baselineskip}
Let $ x \in (A \cap B) \setminus B$. Then $x \in A \cap B$ and $x \not\in B$. This is a contradiction since $x \in A \cap B$ implies $x \in B$. Hence, there are no such $x$. This is what we wanted to prove.
\section*{13}
\section*{14}
\section*{15}
\section*{16}
\section*{17}
\section*{18}
Determine which of the following functions are one-to-one and which are onto. If the function is not onto, determine it's range.
\vspace{\baselineskip}
$f : \mathbb{R} \rightarrow \mathbb{R}$ defined by $f(x) = e^x$ is one-to-one (note that it is a continuous, strictly increasing function), but not onto. It's range is the positive real numbers.
\vspace{\baselineskip}
$f : \mathbb{Z} \rightarrow \mathbb{Z}$ defined by $f(n) = n^2 + 3$ is not one-to-one or onto. Note that $f(-1) = 4 = f(1)$ and that there is no solution to $f(n) = 1$. The range of $f$ is $\{ n^ + 3, n \in \mathbb{N} \}$.
\vspace{\baselineskip}
$f : \mathbb{R} \rightarrow \mathbb{R}$ defined by $f(x) = \sin(x)$ is not one-to-one or onto. Note that $\sin(0) = \sin(2\pi)$. The range of $f$ is $[-1, 1]$.
\vspace{\baselineskip}
$f : \mathbb{Z} \rightarrow \mathbb{Z}$ defined by $f(x) = x^2$ is not one-to-one or onto. Note that $f(-1) = 1 = f(1)$ and that there is no solution to $f(x) = -1$. The range of $f$ is $\{ x^2, x \in \mathbb{N}\}$.
\section*{19}
Let $f: A \rightarrow B$ and $g: B \rightarrow C$ be invertible mappings; that is, mappings such that $f^{-1}$ and $g^{-1}$ exist. Show that $(g \circ f)^{-1} = f^{-1} \circ g^{-1}$.
\vspace{\baselineskip}
Let $c \in C$. Note that there exists $a \in A$ such that
$$(g \circ f)^{-1}(c) = a$$
and
$$(g \circ f)(a) = c.$$
Consider the second equation
\begin{align*}
(g \circ f)(a) &= c \\
g(f(a)) &= c \\
f^{-1}(g^{-1}(g(f(a)))) &= f^{-1}(g^{-1}(c)) \\
a &= f^{-1}(g^{-1}(c)) \\
a &= (f^{-1} \circ g^{-1}) (c).
\end{align*}
Combining this result and the fact that $(g \circ f)^{-1}(c) = a$ yields
$$(g \circ f)^{-1}(c) = a = (f^{-1} \circ g^{-1})(c).$$
This is what we are trying to prove.
\section*{20}
Define a function $f: \mathbb{N} \rightarrow \mathbb{N}$ that is one-to-one but not onto.
\vspace{\baselineskip}
Let $m,n \in \mathbb{N}$. Consider the function $f(x) = 2x$. We know $f$ is one-to-one since
\begin{align*}
f(m) &= f(n) \\
2m &= 2n \\
m &= n
\end{align*}
but notice that $f$ is not onto since there is no solution to f(x) = 3 (or any other odd number).
\vspace{\baselineskip}
Define a function $f: \mathbb{N} \rightarrow \mathbb{N}$ that is onto but not one-to-one.
\vspace{\baselineskip}
Let $m,n \in \mathbb{N}$. Consider the function $f(x) = x - 1$ if $n > 1$ and $f(1) = 1$. Notice that $f$ is onto since $f(n+1) = n$ where $n$ is arbitrary. A counterexample to $f$ being one-to-one is $f(1) = f(2) = 1$.
\section*{21}
Prove the relation defined on $\mathbb{R}^2$ by $(x_1, y_2) \sim (x_2, y_2)$ if $x_1^2 + y_1^2 = x_2^2 + y_2^2$ is an equivalence relation.
\vspace{\baselineskip}
Let $(x_1, y_1), (x_2, y_2), (x_3, y_3) \in \mathbb{R}^2$. First, the relation is trivially reflexive since equality is reflexive. Also, the relation is trivially symmetric since equality is symmetric. To show the relation is transitive assume $(x_1, y_1) \sim (x_2, y_2)$ and $(x_2, y_2) \sim (x_3, y_3)$ and consider
$$x_1^2 + y_1 ^2 = x_2^2 + y_2^2 = x_3^2 + y_3^2.$$
This shows the relation is transitive because of the transitivity of equality.
\section*{26}
Define a relation $\sim$ on $\mathbb{R}^2$ by stating that $(a,b) \sim (c,d)$ if and only if $a^2 + b^2 \leq c^2 + d^2$. Show that $\sim$ is reflexive and transitive, but not symmetric.
\vspace{\baselineskip}
Let $(a,b), (c,d), (x,y) \in \mathbb{R}^2$. Note that $\sim$ is trivially reflexive since $a^2 + b^2 \leq a^2 + b^2$. Assume that $(a,b) \sim (c,d)$ and $(c,d) \sim (x,y)$. It follows that
$$a^2 + b^2 \leq c^2 + d^2 \leq x^2 + y^2$$
$$a^2 + b^2 \leq x^2 + y^2.$$
This shows $\sim$ is transitive. The following counterexample shows $\sim$ is not reflexive. Note $(0,0) \sim (1,1)$ since $0^2 + 0^2 = 0 \leq 2 = 1^2 + 1^2$, but $(1,1) \not \sim (0,0)$ since $2 \not \leq 0$.
\end{document} | {
"alphanum_fraction": 0.5761534474,
"avg_line_length": 33.606271777,
"ext": "tex",
"hexsha": "faf3cb3345129dad772619102f9f6b3da3d20fd8",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-11-25T20:25:48.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-12T10:11:48.000Z",
"max_forks_repo_head_hexsha": "7e9e9c7126741f31c32bed97a8278b3866afbd63",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "agdenadel/judson-abstract-algebra-solutions",
"max_forks_repo_path": "judson-solutions/Chapter01.tex",
"max_issues_count": 24,
"max_issues_repo_head_hexsha": "7e9e9c7126741f31c32bed97a8278b3866afbd63",
"max_issues_repo_issues_event_max_datetime": "2017-10-26T03:44:24.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-10-19T17:09:07.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "agdenadel/judson-abstract-algebra-solutions",
"max_issues_repo_path": "judson-solutions/Chapter01.tex",
"max_line_length": 339,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "7e9e9c7126741f31c32bed97a8278b3866afbd63",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "agdenadel/judson-abstract-algebra-solutions",
"max_stars_repo_path": "judson-solutions/Chapter01.tex",
"max_stars_repo_stars_event_max_datetime": "2021-11-12T10:11:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-10-20T22:41:24.000Z",
"num_tokens": 3694,
"size": 9645
} |
%% Package and Class "uiucthesis2018" for use with LaTeX2e.
\documentclass[edeposit,fullpage,11pt]{uiucthesis2018}
\usepackage[acronym,toc]{glossaries}
\makeglossaries
\include{acros}
\usepackage{xspace}
\usepackage{graphics}
\graphicspath{{images/}}
\usepackage{placeins}
\usepackage{booktabs} % nice rules (thick lines) for tables
\usepackage{microtype} % improves typography for PDF
\usepackage[hyphens]{url}
\usepackage[hidelinks]{hyperref}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{hhline}
\usepackage{amsmath}
\allowdisplaybreaks
\usepackage{color}
\usepackage{multirow}
\usepackage{siunitx}
\usepackage{booktabs}
\usepackage{threeparttable, tablefootnote}
\usepackage{environ}
\makeatletter
\usepackage{tabularx}
\usepackage{cleveref}
\usepackage{datatool}
\usepackage[numbers]{natbib}
\usepackage{notoccite}
\usepackage{tikz}
\title{Advancement and Verification of Moltres for Molten Salt Reactor Safety
Analysis}
\author{Sun Myung Park}
\department{Nuclear, Plasma \& Radiological Engineering}
%\schools{}
\msthesis
\advisor{Kathryn D. Huff}
\degreeyear{2020}
\committee{Assistant Professor Kathryn D. Huff, Advisor \\ Associate Professor
Tomasz Kozlowski}
\begin{document}
\maketitle
\frontmatter
%% Create an abstract that can also be used for the ProQuest abstract.
%% Note that ProQuest truncates their abstracts at 350 words.
\begin{abstract}
\input{abstract}
\end{abstract}
\chapter*{Acknowledgments}
\input{acks}
%% The thesis format requires the Table of Contents to come
%% before any other major sections, all of these sections after
%% the Table of Contents must be listed therein (i.e., use \chapter,
%% not \chapter*). Common sections to have between the Table of
%% Contents and the main text are:
%%
%% List of Tables
%% List of Figures
%% List Symbols and/or Abbreviations
%% etc.
\tableofcontents
\listoftables
\listoffigures
%% Create a List of Abbreviations. The left column
%% is 1 inch wide and left-justified
\printglossary[title=List of Abbreviations,type=\acronymtype,nonumberlist,
nogroupskip=true]
%% Create a List of Symbols. The left column
%% is 0.7 inch wide and centered
\pagebreak
\mainmatter
\glsresetall
\chapter{Introduction}
\label{chap:intro}
\input{introduction}
\chapter{Molten Salt Reactors}
\label{chap:msr}
\input{msr}
\chapter{Methodology}
\label{chap:model}
\input{model}
\chapter{Neutronics Results}
\label{chap:nts}
\input{neutronics}
\chapter{Coupled Neutronics/Thermal-Hydraulics Steady-State Results}
\label{chap:ss}
\input{steadystate}
\chapter{Transient Scenarios}
\label{chap:transient}
\input{transient}
\chapter{Conclusion}
\label{chap:conclusion}
\input{conclusion}
%
%\chapter*{Appendix}
%\label{chap:appendix}
%\input{appendix}
\backmatter
\bibliographystyle{ieeetr}
\bibliography{bibliography}
\end{document}
\endinput
%%
| {
"alphanum_fraction": 0.7773079634,
"avg_line_length": 20.5652173913,
"ext": "tex",
"hexsha": "a33171baaa881a5ceb497dbacd81f8b9effc9f02",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-11-09T10:42:56.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-09T10:42:56.000Z",
"max_forks_repo_head_hexsha": "a4620ab17144322c928a5aad451d0d959ee6f23d",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "smpark7/masters-thesis",
"max_forks_repo_path": "main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a4620ab17144322c928a5aad451d0d959ee6f23d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "smpark7/masters-thesis",
"max_issues_repo_path": "main.tex",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a4620ab17144322c928a5aad451d0d959ee6f23d",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "smpark7/masters-thesis",
"max_stars_repo_path": "main.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 842,
"size": 2838
} |
\section{parameter names (without types) in function declaration}\label{sec:typeless-params}
\begin{figure}[htb]
\begin{lstlisting}
void printNum(n);
\end{lstlisting}
\warnmsg{parameter names (without types) in function declaration}
\label{ex:typeless-params}
\end{figure}
This warning means that you declared a function that takes parameters but didn't say what types those parameters are supposed to have.
This is similar to \ref{sec:default-param-type} except that it is for the function declaration rather than the function implementation.
You can fix this by supplying the missing types for the parameters.
In Example \ref{ex:typeless-params}, the parameter \variable{n} doesn't have a type.
We can fix this by writing \code{int n} instead.
\newpage | {
"alphanum_fraction": 0.7912813738,
"avg_line_length": 44.5294117647,
"ext": "tex",
"hexsha": "1a9fd9f429fe1dc442e529c2d799cfd7bea7ae86",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8775c6fd83b13ad96cc5dfa5e366cd44a627f95d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jdalyuml/compile-error-book",
"max_forks_repo_path": "c-book/compile-errors-cpp/errors/typeless-params.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8775c6fd83b13ad96cc5dfa5e366cd44a627f95d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jdalyuml/compile-error-book",
"max_issues_repo_path": "c-book/compile-errors-cpp/errors/typeless-params.tex",
"max_line_length": 135,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8775c6fd83b13ad96cc5dfa5e366cd44a627f95d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jdalyuml/compile-error-book",
"max_stars_repo_path": "c-book/compile-errors-cpp/errors/typeless-params.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 178,
"size": 757
} |
\documentclass[a4paper,12pt]{article} % This defines the style of your paper
\usepackage[top = 2.5cm, bottom = 2.5cm, left = 2.5cm, right = 2.5cm]{geometry}
\usepackage[utf8]{inputenc} %utf8 % lettere accentate da tastiera
\usepackage[english]{babel} % lingua del documento
\usepackage[T1]{fontenc} % codifica dei font
\usepackage{multirow} % Multirow is for tables with multiple rows within one
%cell.
\usepackage{booktabs} % For even nicer tables.
\usepackage{graphicx}
\usepackage{setspace}
\setlength{\parindent}{0in}
\usepackage{float}
\usepackage{fancyhdr}
\usepackage{caption}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{mathtools}
\usepackage{color}
\usepackage[hidelinks]{hyperref}
\usepackage{csquotes}
\usepackage{subfigure}
\pagestyle{fancy}
\setlength\parindent{24pt}
\fancyhf{}
\lhead{\footnotesize Deep Learning Lab: Assignment 1}
\rhead{\footnotesize Giorgia Adorni}
\cfoot{\footnotesize \thepage}
\begin{document}
\thispagestyle{empty}
\noindent{
\begin{tabular}{p{15cm}}
{\large \bf Deep Learning Lab} \\
Università della Svizzera Italiana \\ Faculty of Informatics \\ \today \\
\hline
\\
\end{tabular}
\vspace*{0.3cm}
\begin{center}
{\Large \bf Assignment 1: Polynomial Regression}
\vspace{2mm}
{\bf Giorgia Adorni ([email protected])}
\end{center}
}
\vspace{0.4cm}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\noindent {Consider the polynomial $p$ given by}
\begin{equation*}
p(x)=x^3+2x^2-4x-8=\sum_{i=1}^4 w_i^*x^{i-1} \mbox{,}
\end{equation*}
where $\textbf{w}^*=[-8,-4,2,1]^T$.
Consider also an iid dataset $\mathcal{D} = \{(x_i, y_i)\}^N_{i=1}$, where
$y_i = p(x_i)+\epsilon_i$, and each $\epsilon_i$ is drawn from a normal
distribution with mean zero and standard deviation $\sigma = \frac{1}{2}$.
If the vector $\textbf{w}^*$ were unknown, linear regression could estimate it given the dataset $\mathcal{D}$. This would require applying a feature map to transform the
original dataset $\mathcal{D}$ into an expanded dataset $\mathcal{D}'= \{(x_i, y_i)\}^N_{i=1}$ , where $x_i = [1,x_i,x_i^2,x_i^3]$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Introduction}
The scope of this {project} is to perform polynomial regression using a dataset $\mathcal{D}'$, in particular finding an estimate of $\textbf{w}^*=[-8,-4,2,1]^T$ supposing that such vector is unknown.\\
An interval $[-3, 2]$ for $x_i$, a sample of size $100$ created with a seed of $0$ for training, and a sample of size $100$ created with a seed of $1$ for validation, and $\sigma = \frac{1}{2}$ were assumed.
\section{Tuning the Learning Rate}
The learning rate is a configurable hyper-parameter that represents the
scaling factor of the gradient by which weights are updated during gradient
descent.\\
I tried to discover a suitable learning rate via trial and error, setting
the initial number of iterations to $2000$, a relatively high value for the
amount of parameters present.
In the first test, I set the learning rate to a traditional default value
of $0.1$. In this case, the value is so high that the algorithm diverges.\\
Hence, I chose to decrease the value to $0.01$ obtaining a validation loss
of $0.22$, that is a good enough result for the purpose of this assignment.
\section{Iterations and Early Stopping}
Fixed the learning rate, I decided to reduce the number of iterations using
the \textit{early stopping} technique. This rule can to abort the
training procedure when the performance on the validation set stops
improving and therefore it avoids overfitting. \\
In particular, I measured the validation loss after each iteration, keeping
track of the lowest one, and I stopped the training when the validation
loss did not improve, compared to the best, after $10$ steps.\\
In this case, after $1248$ iterations the model reached the best loss of
$0.2177$.
\section{Loss}
I used {TensorBoard} to display the loss curve as a function of the
gradient
descent iterations, for both the training and validation set, which are
shown in Figure \ref{fig:model1-loss}.
\begin{figure}[htb]
\centering
\includegraphics[width=0.65\linewidth]{../src/img/loss/model1-es-loss.jpg}
\captionof{figure}{Training and validation loss}
\label{fig:model1-loss}
\end{figure}
The two training and validation curves start at $29.69$ and $29.10$
respectively and go down to $0.24$ and $0.22$ at the $1248\mathrm{th}$
iteration.
It is therefore seen that the validation loss is lower than the training
loss. {\color{red}The reason why this happens is that the training loss is
measured during each epoch while validation loss is computed at the end of
the learning phase of the same epoch.}
\section{Polynomial Regression}
Considered the polynomial defined by $\textbf{w}^*=[-8,-4,2,1]^T$, Figure
\ref{fig:model1-dataset} visualises the training and the validation
datasets generated, while Figure \ref{fig:model1-polynomial}, shows
the polynomial defined by $\textbf{w}^*$ and the polynomial estimated
$\hat{\textbf{w}}$.
\begin{figure}[H]
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{../src/img/model1-dataset.png}
\caption{Training and validation datasets}
\label{fig:model1-dataset}
\end{minipage}
~
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{../src/img/model1-polynomial.png}
\caption{True and estimated polynomials}
\label{fig:model1-polynomial}
\end{minipage}
\end{figure}
The true polynomial defined by the coefficients
$\textbf{w}^*=[-8,-4,2,1]^T$ and the one that has been estimated
$\hat{\textbf{w}}=[-7.86,-4.08,1.98,1.01]^T$ are very close.
From the plot in Figure \ref{fig:model1-polynomial+dataset} it is clear
that the estimated polynomial curve is fitting well enough the data.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\linewidth]{../src/img/model1-polynomial+dataset.png}
\caption{Polynomial curves and datasets}
\label{fig:model1-polynomial+dataset}
\end{figure}
\section{Training Set Reduction}
It is a fact that the behaviour of the model depends on the number of
samples in te training set.
From the below pictures we can observe that reducing the training dataset
from $100$ to $50$, $10$, and finally $5$ observations, the model is not
able to understand the pattern in the set anymore. \\
While with $50$ observations the loss curve shows that the model continues
performs well. \\
By reducing the number of samples down to $10$, the validation loss starts
fluctuating but with a tendency to decrease.\\
Finally, reducing the number of observations to $5$ the model overfits.
\begin{figure}[H]
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=0.85\linewidth]{../src/img/loss/model2-loss.jpg}
%\caption*{model2-loss}
%\label{fig:model2-loss}
\end{minipage}
~
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{../src/img/model2-polynomial+dataset.png}
%\caption*{model2-polynomial+dataset}
%\label{fig:model2-polynomial+dataset}
\end{minipage}
\caption{Loss and polynomial curve for training dataset with $50$
observations}
\end{figure}
\begin{figure}[H]
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=0.85\linewidth]{../src/img/loss/model3-loss.jpg}
%\caption*{model3-loss}
%\label{fig:model3-loss}
\end{minipage}
~
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{../src/img/model3-polynomial+dataset.png}
%\caption*{model3-polynomial+dataset}
%\label{fig:model3-polynomial+dataset}
\end{minipage}
\caption{Loss and polynomial curve for training dataset with $10$
observations}
\end{figure}
\begin{figure}[H]
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=0.85\linewidth]{../src/img/loss/model4-loss.jpg}
%\caption*{model4-loss}
%\label{fig:model4-loss}
\end{minipage}
~
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{../src/img/model4-polynomial+dataset.png}
%\caption*{model4-polynomial+dataset}
%\label{fig:model4-polynomial+dataset}
\end{minipage}
\caption{Loss and polynomial curve for training dataset with $5$
observations}
\end{figure}
\section{Sigma Increase}
From the below pictures we can observe that if we further increase the
standard deviation, during the creation of the dataset, from $0.5$ to $2$,
$4$, and finally $8$, the model performance worsen a lot. The losses
increase respectively to $3.48$, $13.93$ and $55.74$. Moreover, the
gap between training and validation loss growth.
One solution to improve the performance could be to increase the
observations in the training set.
\begin{figure}[H]
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=0.85\linewidth]{../src/img/loss/model5-loss.jpg}
%\caption*{model5-loss}
%\label{fig:model5-loss}
\end{minipage}
~
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{../src/img/model5-polynomial+dataset.png}
%\caption*{model5-polynomial+dataset}
%\label{fig:model5-polynomial+dataset}
\end{minipage}
\caption{Loss and polynomial curves for training dataset with sigma $2$}
\end{figure}
\begin{figure}[H]
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=0.85\linewidth]{../src/img/loss/model6-loss.jpg}
%\caption*{model6-loss}
%\label{fig:model6-loss}
\end{minipage}
~
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{../src/img/model6-polynomial+dataset.png}
%\caption*{model6-polynomial+dataset}
%\label{fig:model6-polynomial+dataset}
\end{minipage}
\caption{Loss and polynomial curves for training dataset with sigma $4$}
\end{figure}
\begin{figure}[H]
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=0.85\linewidth]{../src/img/loss/model7-loss.jpg}
%\caption*{model7-loss}
%\label{fig:model7-loss}
\end{minipage}
~
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{../src/img/model7-polynomial+dataset.png}
%\caption*{model7-polynomial+dataset}
%\label{fig:model7-polynomial+dataset}
\end{minipage}
\caption{Loss and polynomial curves for training dataset with sigma $8$}
\end{figure}
\section{Higher-degree Polynomial}
The last experiment consists in reducing the training dataset to $10$
observations and comparing a fitted polynomial of degree three with
one of degree four.
The results are visualised in Figure \ref*{fig:model8}.
\begin{figure}[H]
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=0.85\linewidth]{../src/img/loss/model8-loss.jpg}
%\caption{Loss of a polynomial of $4\mathrm{th}$ degree}
%\label{fig:model8-loss}
\end{minipage}
~
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{../src/img/model8-polynomial+dataset.png}
%\caption{Polynomial curves of $4\mathrm{th}$ degree}
%\label{fig:model8-polynomial+dataset}
\end{minipage}
\caption{Polynomial of 4th degree}
\label{fig:model8}
\end{figure}
For what concerns the loss, the validation loss continues to grow in the
first $400$ iterations and after that starts going down, while the
training loss is constantly decreasing.
In Figure \ref*{fig:model8-test} is clear that the estimated
polynomial of $4\mathrm{th}$ degree is capable of fit well the data in
the interval $[-1,2]$ while the polynomial of $3\mathrm{rd}$ degree
in the interval $[-1.5,1]$.\bigskip
Increasing the number of observations in the training set up to $8000$, it
is possible to have a good understanding of the validation loss behaviour
over time.
\begin{figure}[H]
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=0.85\linewidth]{../src/img/loss/model8-loss-test.jpg}
%\caption*{model8-loss-test}
%\label{fig:model8-loss-test}
\end{minipage}
~
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=1\linewidth]{../src/img/model8-test-polynomial+dataset.png}
%\caption*{model8-test-polynomial+dataset}
%\label{fig:model8-test-polynomial+dataset}
\end{minipage}
\caption{Polynomial of 4th degree over $8000$ iterations}
\label{fig:model8-test}
\end{figure}
\end{document}
| {
"alphanum_fraction": 0.7132772843,
"avg_line_length": 35.8181818182,
"ext": "tex",
"hexsha": "e3b20abda17ff806b84c9e5ac68d5a39189d5987",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4d0f6c8575c52e88ef21fdc2d7d3906438f8cb7d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "GiorgiaAuroraAdorni/polynomial-regression",
"max_forks_repo_path": "report/scifile.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4d0f6c8575c52e88ef21fdc2d7d3906438f8cb7d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "GiorgiaAuroraAdorni/polynomial-regression",
"max_issues_repo_path": "report/scifile.tex",
"max_line_length": 208,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "4d0f6c8575c52e88ef21fdc2d7d3906438f8cb7d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "GiorgiaAuroraAdorni/polynomial-regression",
"max_stars_repo_path": "report/scifile.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3913,
"size": 12608
} |
% JuliaCon proceedings template
\documentclass{juliacon}
\setcounter{page}{1}
\begin{document}
\input{header}
\maketitle
\begin{abstract}
AbstractLogic.jl provides an intuitive and effective non-deterministic tool for
analytical reasoning programming. It provides tools for mapping a potential
solution space, contraining solutions based on input contraints, checking to the
validity of assertions, as well as searching of potential solutions.
AbstractLogic.jl has its own language which attempts to mimic intuitive logical
expressions where at all possible.
\end{abstract}
\section{Introduction}
AbstractLogic.jl provides an intuitive and effective tool for analytical
reasoning programming. Abstract logic of problems appear prominently in the
academic environment on the Law School Entrance Exam, while also appearing more
generally in a variety of contexts in which assessors whish to understand the
cognitive limits of test human solvers. More generally, abstract logic problems
are often faced by businesses and indivduals having to find solutions that
satisfy a variety of steakholders' requirements.\vskip 6pt
AbstractLogic.jl is not the first abstract logic programming language, the
most extensible, nor the most efficient. What it aims to be, is the most
intuitive and easily accessible. This is accomplished by drawing as much as
possible on notation developed for the communication of abstract ideas by
logisticians and mathemticians over the centuries.\vskip 6pt
It provides tools for mapping a potential solution space,
contraining solutions based on input contraints, checking to the validity of
assertions, as well as searching of potential solutions. Abstract Logic has its
own inlince language which attempts to mimic intuitive logical expressions
when at all possible.
Moreover, \LaTeX{} provides a consistent and comprehensive document
preparation interface. There are simple-to-use commands for
generating a table of contents, lists of figures and/or tables, and indexes.
\LaTeX{} can automatically number list entries, equations, figures,
tables, and footnotes, as well as articles, sections, and subsections.
Using this numbering system, bibliographic citations, page
references, and cross references to any other numbered entity (e.g.
article, section, equation, figure, list entry, etc.) become quite simple
and straightforward. The use of \LaTeX{} document classes allows
a simple change of class to transform the appearance of your document.\vskip 6pt
\LaTeX{} is a powerful tool for managing long and complex documents.
In particular, partial processing enables long documents to
be produced article by article without losing sequential information.
The use of document classes allows a simple change of style
(or style option) to transform the appearance of your document.
\section{The JuliaCon Article Class}
\label{sec:documentclass}
%
The juliacon class file preserves the standard LATEX{} interface such
that any document that can be produced using the standard LATEX{}
article class can also be produced with the class file.\vskip 6pt
It is likely that the make up will change after file submission. For
this reason, we ask you to ignore details such as slightly long lines,
page stretching, or figures falling out of synchronization, as these
details can be dealt with at a later stage.\vskip 6pt
Use should be made of symbolic references (\verb \ref ) in order to
protect against late changes of order, etc.
\section{USING THE JuliaCon Article CLASS FILE}
If the file \verb juliacon.cls is not already in the appropriate system directory
for \LaTeX{} files, either arrange for it to be put there or copy
it to your working directory. The \verb juliacon document class is implemented
as a complete class, not a document style option. In order to
use the \verb juliacon document class, replace \verb article by \verb juliacon in the
\verb \documentclass command at the beginning of your document:
\vskip 6pt
\begin{centering}
\verb \documentclass{article} \end{centering}
\vskip 6pt
replace by
\vskip 6pt
\verb \documentclass{juliacon} \vskip 6pt
In general, the following standard document \verb style options should
{ \itshape not} be used with the {\footnotesize \itshape article} class file:
\begin{enumerate}
\item[(1)] \verb 10pt, \verb 11pt, \verb 12pt ? unavailable;
\item[(2)] \verb twoside (no associated style file) ? \verb twoside is the default;
\item[(3)] \verb fleqn, \verb leqno, \verb titlepage ? should not be used;
\end{enumerate}
\section{Additional Document Style Options}
\label{sec:additional_doc}
%
The following additional style option is available with the \verb juliacon class file:
\vskip 6pt
Please place any additional command definitions at the very start of
the \LaTeX{} file, before the \verb \begin{document} . For example, user-defined
\verb \def and \verb \newcommand commands that define macros for
technical expressions should be placed here. Other author-defined
macros should be kept to a minimum.
\vskip 6pt
Commands that differ from the standard \LaTeX{} interface, or that
are provided in addition to the standard interface, are explained in
this guide. This guide is not a substitute for the \LaTeX{} manual itself.
Authors planning to submit their papers in \LaTeX{} are advised to use
\verb \juliacon.cls as early as possible in the creation of their files.
%
%
%
%
\begin{table*}[t]
\tabcolsep22pt
\tbl{If necessary, the tables can be extended both columns.}{
\begin{tabular}{|l|l|c|c|}\hline
Label & \multicolumn{1}{c|}{Description}
& Number of Users &
Number of Queries\\\hline
Test 1 & Training Data &
\smash{\raise-7pt\hbox{70}} & 104\\
\cline{1-2}\cline{4-4}
Test 2 & Testing Data I & & 105\\\hline
Test 3 & Testing Data II & 30 & 119\\\hline
& Total & 100 & 328\\\hline
\end{tabular}}
\label{tab:symbols}
\begin{tabnote}
This is an example of table footnote.
\end{tabnote}
\end{table*}
% \begin{figure*}[t]
% \centerline{\includegraphics[width=11cm]{juliagraphs.png}}
% \caption{If necessary, the images can be extended both columns.}
% \label{fig:sample_image}
% \end{figure*}
\section{Additional features}
\label{sec:additional_faci}
In addition to all the standard \LaTeX{} design elements, the \verb juliacon class file includes the following features:
In general, once you have used the additional \verb juliacon.cls facilities
in your document, do not process it with a standard \LaTeX{} class
file.
\subsection{Titles, Author's Name, and Affiliation}
\label{subsub:title_auth}
The title of the article, author's name, and affiliation are used at the
beginning of the article (for the main title). These can be produced
using the following code:
\begin{verbatim}
\title{ This is an example of article title} }
\author{
\large 1st Author \\[-3pt]
\normalsize 1st author's affiliation \\[-3pt]
\normalsize 1st line of address \\[-3pt]
\normalsize 2nd line of address \\[-3pt]
\normalsize 1st author's email address \\[-3pt]
\and
\large 2nd Author \\[-3pt]
\normalsize 2nd author's affiliation \\[-3pt]
\normalsize 1st line of address \\[-3pt]
\normalsize 2nd line of address \\[-3pt]
\normalsize 2nd author's email address \\[-3pt]
\and
\large 3rd Author \\[-3pt]
\normalsize 3rd author's affiliation \\[-3pt]
\normalsize 1st line of address \\[-3pt]
\normalsize 2nd line of address \\[-3pt]
\normalsize 3rd author's email address \\[-3pt]
}
\maketitle
\end{verbatim}
\subsection{Writing Julia code}
A special environment is already defined for Julia code,
built on top of \textit{listings} and \textit{jlcode}.
\begin{verbatim}
\begin{lstlisting}[language = Julia]
using Plots
x = -3.0:0.01:3.0
y = rand(length(x))
plot(x, y)
\end{lstlisting}
\end{verbatim}
\begin{lstlisting}[language = Julia]
using Plots
x = -3.0:0.01:3.0
y = rand(length(x))
plot(x, y)
\end{lstlisting}
\subsection{Abstracts, Key words, term etc...}
\label{subsub:abs_key_etc}
At the beginning of your article, the title should be generated
in the usual way using the \verb \maketitle command. For genaral tem and keywords use
\verb \terms ,
\verb \keywords commands respectively. The abstract should be enclosed
within an abstract environment, All these environment
can be produced using the following code:
\begin{verbatim}
\terms{Experimentation, Human Factors}
\keywords{Face animation, image-based modelling...}
\begin{abstract}
In this paper, we propose a new method for the
systematic determination of the model's base of
time varying delay system. This method based on
the construction of the classification data related
to the considered system. The number, the orders,
the time delay and the parameters of the local
models are generated automatically without any
knowledge about the full operating range of the
process. The parametric identification of the local
models is realized by a new recursive algorithm for
on line identification of systems with unknown time
delay. The proposed algorithm allows simultaneous
estimation of time delay and parameters of
discrete-time systems. The effectiveness of
the new method has been illustrated through
simulation.
\end{abstract}
\end{verbatim}
\section{Some guidelines}
\label{sec:some_guide}
The following notes may help you achieve the best effects with the
\verb juliacon class file.
\subsection{Sections}
\label{subsub:sections}
\LaTeXe{} provides four levels of section headings and they are all
defined in the \verb juliacon class file:
\begin{itemize}
\item \verb \section \item \verb \subsection \item \verb \subsubsection \item \verb \paragraph \end{itemize}
Section headings are automatically converted to allcaps style.
\subsection{Lists}
\label{sec:lists}
%
The \verb juliacon class file provides unnumbered lists using the
\verb unnumlist environment for example,
\begin{unnumlist}
\item First unnumbered item which has no label and is indented from the
left margin.
\item Second unnumbered item.
\item Third unnumbered item.
\end{unnumlist}
The unnumbered list which has no label and is indented from the
left margin. was produced by:
\begin{verbatim}
\begin{unnumlist}
\item First unnumbered item...
\item Second unnumbered item...
\item Third unnumbered item...
\end{unnumlist}
\end{verbatim}
The \verb juliacon class file also provides hyphen list using the
\verb itemize environment for example,
\begin{itemize}
\item First unnumbered bulleted item which has no label and is indented
from the left margin.
\item Second unnumbered bulleted item.
\item Third unnumbered bulleted item which has no label and is indented
from the left margin.
\end{itemize}
was produced by:
\begin{verbatim}
\begin{itemize}
\item First item...
\item Second item...
\item Third item...
\end{itemize}
\end{verbatim}
Numbered list is also provided in acmtog class file using the
enumerate environment for example,
\begin{enumerate}
\item The attenuated and diluted stellar radiation.
\item Scattered radiation, and
\item Reradiation from other grains.
\end{enumerate}
was produced by:
\begin{verbatim}
\begin{enumerate}
\item The attenuated...
\item Scattered radiation, and...
\item Reradiation from other grains...
\end{enumerate}
\end{verbatim}
\subsection{Illustrations (or figures)}
\label{subsub:sec_Illus}
The \verb juliacon class file will cope with most of the positioning of
your illustrations and you should not normally use the optional positional
qualifiers on the \verb figure environment that would override
these decisions.
\vskip 6pt
%
\begin{figure}[t]
\centerline{\includegraphics[width=4cm]{juliagraphs.png}}
\caption{This is example of the image in a column.}
\label{fig:sample_figure}
\end{figure}
The figure \ref{fig:sample_figure} is taken from the JuliaGraphs
organization \footnote{https://github.com/JuliaGraphs}.
Figure captions should be \emph{below} the figure itself, therefore the
\verb \caption command should appear after the figure or space left for
an illustration. For example, Figure 1 is produced using the following
commands:
\begin{verbatim}
\begin{figure}
\centerline{\includegraphics[width=20pc]{Graphics.eps}}
\caption{An example of the testing process for a
binary tree. The globa null hypothesis is tested
first at level $\alpha$ (a), and the level of
individual variables is reached last (d). Note
that individual hypotheses can be tested at
level $\alpha/4$ and not $\alpha/8$ as one might
expect at first.}
\label{sample-figure_2}
\end{figure}
\end{verbatim}
Figures can be resized using first and second argument of
\verb \includegraphics command. First argument is used for modifying
figure height and the second argument is used for modifying
figure width respectively.
\vskip 6pt
Cross-referencing of figures, tables, and numbered, displayed
equations using the \verb \label and \verb \ref commands is encouraged.
For example, in referencing Figure 1 above, we used
\verb Figure~\ref{sample-figure} \subsection{Tables}
\label{subsub:sec_Tab}
The \verb juliacon class file will cope with most of the positioning of
your tables and you should not normally use the optional positional qualifiers on the table environment which would override these
decisions. Table captions should be at the top.
\begin{verbatim}
\begin{table}
\tbl{Tuning Set and Testing Set}{
\begin{tabular}{|l|l|c|c|}\hline
Label & \multicolumn{1}{c|}{Description}
& Number of Users &
Number of Queries\\\hline
Train70 & Training Data &
\smash{\raise-7pt\hbox{70}} & 104\\
\cline{1-2}\cline{4-4}
Test70 & Testing Data I & & 105\\\hline
Test30 & Testing Data II & 30 & 119\\\hline
& Total & 100 & 328\\\hline
\end{tabular}}
\end{table}\end{verbatim}
\begin{table}
\tbl{Tuning Set and Testing Set}{
\begin{tabular}{|l|l|c|c|}\hline
Label & \multicolumn{1}{c|}{Description}
& Number of Users &
Number of Queries\\\hline
Test 1 & Training Data &
\smash{\raise-7pt\hbox{70}} & 104\\
\cline{1-2}\cline{4-4}
Test 2 & Testing Data I & & 105\\\hline
Test 3 & Testing Data II & 30 & 119\\\hline
& Total & 100 & 328\\\hline
\end{tabular}}
\end{table}
\subsection{Landscaping Pages}
\label{subsub:landscaping_pages}
If a table is too wide to fit the standard measure, it may be turned,
with its caption, to 90 degrees. Landscape tables cannot be produced
directly using the \verb juliacon class file because \TeX{} itself cannot
turn the page, and not all device drivers provide such a facility.
The following procedure can be used to produce such pages.
\vskip 6pt
Use the package \verb rotating in your document and change the coding
from
\begin{verbatim}
\begin{table}...\end{table}
to
\begin{sidewaystable}...\end{sidewaystable}
and for figures
\begin{figure}...\end{figure}
to
\begin{sidewaysfigure}...\end{sidewaysfigure}
\end{verbatim}
environments in your document to turn your table on the appropriate
page of your document. For instance, the following code prints
a page with the running head, a message half way down and the
table number towards the bottom.
\begin{verbatim}
\begin{sidewaystable}
\tbl{Landscape table caption to go here.}{...}
\label{landtab}
\end{sidewaystable}
\end{verbatim}
\subsection{Double Column Figure and Tables}
\label{subsub:double_fig_tab}
For generating the output of figures and tables in double column
we can use the following coding:
\begin{enumerate}
\item For Figures:
\begin{verbatim}
\begin{figure*}...\end{figure*}
\end{verbatim}
\item For landscape figures:
\begin{verbatim}
\begin{sidewaysfigure*}...\end{sidewaysfigure*}
\end{verbatim}
\item For Tables:
\begin{verbatim}
\begin{table*}...\end{table*}
\end{verbatim}
\item For landscape tables:
\begin{verbatim}
\begin{sidewaystable*}...\end{sidewaystable*}
\end{verbatim}
\end{enumerate}
\subsection{Typesetting Mathematics}
\label{subsub:type_math}
The \verb juliacon class file will set displayed mathematics with center to
the column width, provided that you use the \LaTeXe{} standard of
open and closed square brackets as delimiters.
The equation
\[
\sum_{i=1}^p \lambda_i = (S)
\]
was typeset using the acmtog class file with the commands
\begin{verbatim}
\[
\sum_{i=1}^p \lambda_i = (S)
\]
\end{verbatim}
For display equations, cross-referencing is encouraged. For example,
\begin{verbatim}
\begin{equation}
(n-1)^{-1} \sum^n_{i=1} (X_i - \overline{X})^2.
\label{eq:samplevar}
\end{equation}
Equation~(\ref{eq:samplevar}) gives the formula for
sample variance.
\end{verbatim}
The following output is generated with the above coding:
\begin{equation}
(n-1)^{-1} \sum^n_{i=1} (X_i - \overline{X})^2.
\label{eq:samplevar}
\end{equation}
Equation~(\ref{eq:samplevar}) gives the formula for
sample variance.
\subsection{Enunciations}
\label{subsub:enunciation}
The \verb juliacon class file generates the enunciations with the help of
the following commands:
\begin{verbatim}
\begin{theorem}...\end{theorem}
\begin{strategy}...\end{strategy}
\begin{property}...\end{property}
\begin{proposition}...\end{proposition}
\begin{lemma}...\end{lemma}
\begin{example}...\end{example}
\begin{proof}...\end{proof}
\begin{definition}...\end{definition}
\begin{algorithm}...\end{algorithm}
\begin{remark}...\end{remark}
\end{verbatim}
The above-mentioned coding can also include optional arguments
such as
\begin{verbatim}
\begin{theorem}[...]. Example for theorem:
\begin{theorem}[Generalized Poincare Conjecture]
Four score and seven ... created equal.
\end{theorem}
\end{verbatim}
\begin{theorem}[Generalized Poincare Conjecture]
Four score and seven years ago our fathers brought forth,
upon this continent, a new nation, conceived in Liberty,
and dedicated to the proposition that all men are
created equal.
\end{theorem}
\subsection{Extract}
\label{subsub:extract}
Extract environment should be coded within
\begin{verbatim}
\begin{extract}..\end{extract}
\end{verbatim}
\subsection{Balancing column at last page}
\label{subsub:Balance}
For balancing the both column length at last page use :
\begin{verbatim}
\vadjust{\vfill\pagebreak}
\end{verbatim}
%\vadjust{\vfill\pagebreak}
at appropriate place in your \TeX{} file or in bibliography file.
\section{Handling references}
\label{subsub:references}
References are most easily (and correctly) generated using the
BIBTEX, which is easily invoked via
\begin{verbatim}
\bibliographystyle{juliacon}
\bibliography{ref}
\end{verbatim}
When submitting the document source (.tex) file to external
parties, the ref.bib file should be sent with it.
\cite{bezanson2017julia}
\input{bib.tex}
\end{document}
% Inspired by the International Journal of Computer Applications template
| {
"alphanum_fraction": 0.7697663123,
"avg_line_length": 33.9358974359,
"ext": "tex",
"hexsha": "a2976548dae80757b2219c07348b340d1fc3864a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "56fe6ac33269af76b56de41b4dd14856364ad073",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "EconometricsBySimulation/AbstractLogic.jl-JuliaConSubmission",
"max_forks_repo_path": "paper/paper.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "56fe6ac33269af76b56de41b4dd14856364ad073",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "EconometricsBySimulation/AbstractLogic.jl-JuliaConSubmission",
"max_issues_repo_path": "paper/paper.tex",
"max_line_length": 130,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "56fe6ac33269af76b56de41b4dd14856364ad073",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "EconometricsBySimulation/AbstractLogic.jl-JuliaConSubmission",
"max_stars_repo_path": "paper/paper.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4908,
"size": 18529
} |
Subsets and Splits